blob: d8b1948aaa0aef077598f633e00c3ff926a5ebab [file] [log] [blame]
// SPDX-License-Identifier: GPL-2.0
/* Marvell RVU Admin Function driver
*
* Copyright (C) 2018 Marvell.
*
*/
#include <linux/module.h>
#include <linux/pci.h>
#include "rvu_struct.h"
#include "rvu_reg.h"
#include "rvu.h"
#include "npc.h"
#include "cgx.h"
#include "lmac_common.h"
static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id);
static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
int type, bool add);
static int nix_setup_ipolicers(struct rvu *rvu,
struct nix_hw *nix_hw, int blkaddr);
static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
struct nix_hw *nix_hw, u16 pcifunc);
static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
u32 leaf_prof);
static const char *nix_get_ctx_name(int ctype);
enum mc_tbl_sz {
MC_TBL_SZ_256,
MC_TBL_SZ_512,
MC_TBL_SZ_1K,
MC_TBL_SZ_2K,
MC_TBL_SZ_4K,
MC_TBL_SZ_8K,
MC_TBL_SZ_16K,
MC_TBL_SZ_32K,
MC_TBL_SZ_64K,
};
enum mc_buf_cnt {
MC_BUF_CNT_8,
MC_BUF_CNT_16,
MC_BUF_CNT_32,
MC_BUF_CNT_64,
MC_BUF_CNT_128,
MC_BUF_CNT_256,
MC_BUF_CNT_512,
MC_BUF_CNT_1024,
MC_BUF_CNT_2048,
};
enum nix_makr_fmt_indexes {
NIX_MARK_CFG_IP_DSCP_RED,
NIX_MARK_CFG_IP_DSCP_YELLOW,
NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
NIX_MARK_CFG_IP_ECN_RED,
NIX_MARK_CFG_IP_ECN_YELLOW,
NIX_MARK_CFG_IP_ECN_YELLOW_RED,
NIX_MARK_CFG_VLAN_DEI_RED,
NIX_MARK_CFG_VLAN_DEI_YELLOW,
NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
NIX_MARK_CFG_MAX,
};
/* For now considering MC resources needed for broadcast
* pkt replication only. i.e 256 HWVFs + 12 PFs.
*/
#define MC_TBL_SIZE MC_TBL_SZ_512
#define MC_BUF_CNT MC_BUF_CNT_128
struct mce {
struct hlist_node node;
u16 pcifunc;
};
int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
{
int i = 0;
/*If blkaddr is 0, return the first nix block address*/
if (blkaddr == 0)
return rvu->nix_blkaddr[blkaddr];
while (i + 1 < MAX_NIX_BLKS) {
if (rvu->nix_blkaddr[i] == blkaddr)
return rvu->nix_blkaddr[i + 1];
i++;
}
return 0;
}
bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
return false;
return true;
}
int rvu_get_nixlf_count(struct rvu *rvu)
{
int blkaddr = 0, max = 0;
struct rvu_block *block;
blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
while (blkaddr) {
block = &rvu->hw->block[blkaddr];
max += block->lf.max;
blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
}
return max;
}
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct rvu_hwinfo *hw = rvu->hw;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (*nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
if (nix_blkaddr)
*nix_blkaddr = blkaddr;
return 0;
}
int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
struct nix_hw **nix_hw, int *blkaddr)
{
struct rvu_pfvf *pfvf;
pfvf = rvu_get_pfvf(rvu, pcifunc);
*blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || *blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
*nix_hw = get_nix_hw(rvu->hw, *blkaddr);
if (!*nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
return 0;
}
static void nix_mce_list_init(struct nix_mce_list *list, int max)
{
INIT_HLIST_HEAD(&list->head);
list->count = 0;
list->max = max;
}
static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
{
int idx;
if (!mcast)
return 0;
idx = mcast->next_free_mce;
mcast->next_free_mce += count;
return idx;
}
struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
{
int nix_blkaddr = 0, i = 0;
struct rvu *rvu = hw->rvu;
nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
while (nix_blkaddr) {
if (blkaddr == nix_blkaddr && hw->nix)
return &hw->nix[i];
nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
i++;
}
return NULL;
}
u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
{
dwrr_mtu &= 0x1FULL;
/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
* Value of 4 is reserved for MTU value of 9728 bytes.
* Value of 5 is reserved for MTU value of 10240 bytes.
*/
switch (dwrr_mtu) {
case 4:
return 9728;
case 5:
return 10240;
default:
return BIT_ULL(dwrr_mtu);
}
return 0;
}
u32 convert_bytes_to_dwrr_mtu(u32 bytes)
{
/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
* Value of 4 is reserved for MTU value of 9728 bytes.
* Value of 5 is reserved for MTU value of 10240 bytes.
*/
if (bytes > BIT_ULL(16))
return 0;
switch (bytes) {
case 9728:
return 4;
case 10240:
return 5;
default:
return ilog2(bytes);
}
return 0;
}
static void nix_rx_sync(struct rvu *rvu, int blkaddr)
{
int err;
/* Sync all in flight RX packets to LLC/DRAM */
rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
if (err)
dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
/* SW_SYNC ensures all existing transactions are finished and pkts
* are written to LLC/DRAM, queues should be teared down after
* successful SW_SYNC. Due to a HW errata, in some rare scenarios
* an existing transaction might end after SW_SYNC operation. To
* ensure operation is fully done, do the SW_SYNC twice.
*/
rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
if (err)
dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
}
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
int lvl, u16 pcifunc, u16 schq)
{
struct rvu_hwinfo *hw = rvu->hw;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u16 map_func;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return false;
txsch = &nix_hw->txsch[lvl];
/* Check out of bounds */
if (schq >= txsch->schq.max)
return false;
mutex_lock(&rvu->rsrc_lock);
map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
mutex_unlock(&rvu->rsrc_lock);
/* TLs aggegating traffic are shared across PF and VFs */
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
return false;
else
return true;
}
if (map_func != pcifunc)
return false;
return true;
}
static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
struct nix_lf_alloc_rsp *rsp, bool loop)
{
struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
u16 req_chan_base, req_chan_end, req_chan_cnt;
struct rvu_hwinfo *hw = rvu->hw;
struct sdp_node_info *sdp_info;
int pkind, pf, vf, lbkid, vfid;
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
bool from_vf;
int err;
pf = rvu_get_pf(pcifunc);
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
type != NIX_INTF_TYPE_SDP)
return 0;
switch (type) {
case NIX_INTF_TYPE_CGX:
pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
pkind = rvu_npc_get_pkind(rvu, pf);
if (pkind < 0) {
dev_err(rvu->dev,
"PF_Func 0x%x: Invalid pkind\n", pcifunc);
return -EINVAL;
}
pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
pfvf->tx_chan_base = pfvf->rx_chan_base;
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
rvu_npc_set_pkind(rvu, pkind, pfvf);
mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
/* By default we enable pause frames */
if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
rvu),
lmac_id, true, true);
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
/* If NIX1 block is present on the silicon then NIXes are
* assigned alternatively for lbk interfaces. NIX0 should
* send packets on lbk link 1 channels and NIX1 should send
* on lbk link 0 channels for the communication between
* NIX0 and NIX1.
*/
lbkid = 0;
if (rvu->hw->lbk_links > 1)
lbkid = vf & 0x1 ? 0 : 1;
/* By default NIX0 is configured to send packet on lbk link 1
* (which corresponds to LBK1), same packet will receive on
* NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
* (which corresponds to LBK2) packet will receive on NIX0 lbk
* link 1.
* But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
* transmits and receives on lbk link 0, whick corresponds
* to LBK1 block, back to back connectivity between NIX and
* LBK can be achieved (which is similar to 96xx)
*
* RX TX
* NIX0 lbk link 1 (LBK2) 1 (LBK1)
* NIX0 lbk link 0 (LBK0) 0 (LBK0)
* NIX1 lbk link 0 (LBK1) 0 (LBK2)
* NIX1 lbk link 1 (LBK3) 1 (LBK3)
*/
if (loop)
lbkid = !lbkid;
/* Note that AF's VFs work in pairs and talk over consecutive
* loopback channels.Therefore if odd number of AF VFs are
* enabled then the last VF remains with no pair.
*/
pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
pfvf->tx_chan_base = vf & 0x1 ?
rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
rsp->tx_link = hw->cgx_links + lbkid;
pfvf->lbkid = lbkid;
rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
pfvf->rx_chan_cnt);
break;
case NIX_INTF_TYPE_SDP:
from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
sdp_info = parent_pf->sdp_info;
if (!sdp_info) {
dev_err(rvu->dev, "Invalid sdp_info pointer\n");
return -EINVAL;
}
if (from_vf) {
req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
sdp_info->num_pf_rings;
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
for (vfid = 0; vfid < vf; vfid++)
req_chan_base += sdp_info->vf_rings[vfid];
req_chan_cnt = sdp_info->vf_rings[vf];
req_chan_end = req_chan_base + req_chan_cnt - 1;
if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
dev_err(rvu->dev,
"PF_Func 0x%x: Invalid channel base and count\n",
pcifunc);
return -EINVAL;
}
} else {
req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
req_chan_cnt = sdp_info->num_pf_rings;
}
pfvf->rx_chan_base = req_chan_base;
pfvf->rx_chan_cnt = req_chan_cnt;
pfvf->tx_chan_base = pfvf->rx_chan_base;
pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
rsp->tx_link = hw->cgx_links + hw->lbk_links;
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
pfvf->rx_chan_cnt);
break;
}
/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
* RVU PF/VF's MAC address.
*/
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, pfvf->mac_addr);
/* Add this PF_FUNC to bcast pkt replication list */
err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
if (err) {
dev_err(rvu->dev,
"Bcast list, failed to enable PF_FUNC 0x%x\n",
pcifunc);
return err;
}
/* Install MCAM rule matching Ethernet broadcast mac address */
rvu_npc_install_bcast_match_entry(rvu, pcifunc,
nixlf, pfvf->rx_chan_base);
pfvf->maxlen = NIC_HW_MIN_FRS;
pfvf->minlen = NIC_HW_MIN_FRS;
return 0;
}
static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int err;
pfvf->maxlen = 0;
pfvf->minlen = 0;
/* Remove this PF_FUNC from bcast pkt replication list */
err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
if (err) {
dev_err(rvu->dev,
"Bcast list, failed to disable PF_FUNC 0x%x\n",
pcifunc);
}
/* Free and disable any MCAM entries used by this NIX LF */
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
/* Disable DMAC filters used */
rvu_cgx_disable_dmac_entries(rvu, pcifunc);
}
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
struct nix_bp_cfg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
int blkaddr, pf, type;
u16 chan_base, chan;
u64 cfg;
pf = rvu_get_pf(pcifunc);
type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
return 0;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
cfg & ~BIT_ULL(16));
}
return 0;
}
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
lmac_chan_cnt = cfg & 0xFF;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
sdp_chan_cnt = cfg & 0xFFF;
cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
/* Backpressure IDs range division
* CGX channles are mapped to (0 - 191) BPIDs
* LBK channles are mapped to (192 - 255) BPIDs
* SDP channles are mapped to (256 - 511) BPIDs
*
* Lmac channles and bpids mapped as follows
* cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
* cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
* cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
*/
switch (type) {
case NIX_INTF_TYPE_CGX:
if ((req->chan_base + req->chan_cnt) > 15)
return -EINVAL;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
/* Assign bpid based on cgx, lmac and chan id */
bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
(lmac_id * lmac_chan_cnt) + req->chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
if (bpid > cgx_bpid_cnt)
return -EINVAL;
break;
case NIX_INTF_TYPE_LBK:
if ((req->chan_base + req->chan_cnt) > 63)
return -EINVAL;
bpid = cgx_bpid_cnt + req->chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
return -EINVAL;
break;
case NIX_INTF_TYPE_SDP:
if ((req->chan_base + req->chan_cnt) > 255)
return -EINVAL;
bpid = sdp_bpid_cnt + req->chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
return -EINVAL;
break;
default:
return -EINVAL;
}
return bpid;
}
int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
struct nix_bp_cfg_req *req,
struct nix_bp_cfg_rsp *rsp)
{
int blkaddr, pf, type, chan_id = 0;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
u16 chan_base, chan;
s16 bpid, bpid_base;
u64 cfg;
pf = rvu_get_pf(pcifunc);
type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (is_sdp_pfvf(pcifunc))
type = NIX_INTF_TYPE_SDP;
/* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
type != NIX_INTF_TYPE_SDP)
return 0;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
chan_base = pfvf->rx_chan_base + req->chan_base;
bpid = bpid_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
if (bpid < 0) {
dev_warn(rvu->dev, "Fail to enable backpressure\n");
return -EINVAL;
}
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
cfg &= ~GENMASK_ULL(8, 0);
rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
chan_id++;
bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
}
for (chan = 0; chan < req->chan_cnt; chan++) {
/* Map channel and bpid assign to it */
rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
(bpid_base & 0x3FF);
if (req->bpid_per_chan)
bpid_base++;
}
rsp->chan_cnt = req->chan_cnt;
return 0;
}
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
u64 format, bool v4, u64 *fidx)
{
struct nix_lso_format field = {0};
/* IP's Length field */
field.layer = NIX_TXLAYER_OL3;
/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
field.offset = v4 ? 2 : 4;
field.sizem1 = 1; /* i.e 2 bytes */
field.alg = NIX_LSOALG_ADD_PAYLEN;
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
*(u64 *)&field);
/* No ID field in IPv6 header */
if (!v4)
return;
/* IP's ID field */
field.layer = NIX_TXLAYER_OL3;
field.offset = 4;
field.sizem1 = 1; /* i.e 2 bytes */
field.alg = NIX_LSOALG_ADD_SEGNUM;
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
*(u64 *)&field);
}
static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
u64 format, u64 *fidx)
{
struct nix_lso_format field = {0};
/* TCP's sequence number field */
field.layer = NIX_TXLAYER_OL4;
field.offset = 4;
field.sizem1 = 3; /* i.e 4 bytes */
field.alg = NIX_LSOALG_ADD_OFFSET;
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
*(u64 *)&field);
/* TCP's flags field */
field.layer = NIX_TXLAYER_OL4;
field.offset = 12;
field.sizem1 = 1; /* 2 bytes */
field.alg = NIX_LSOALG_TCP_FLAGS;
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
*(u64 *)&field);
}
static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
{
u64 cfg, idx, fidx = 0;
/* Get max HW supported format indices */
cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
nix_hw->lso.total = cfg;
/* Enable LSO */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
/* For TSO, set first and middle segment flags to
* mask out PSH, RST & FIN flags in TCP packet
*/
cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
/* Setup default static LSO formats
*
* Configure format fields for TCPv4 segmentation offload
*/
idx = NIX_LSO_FORMAT_IDX_TSOV4;
nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
/* Set rest of the fields to NOP */
for (; fidx < 8; fidx++) {
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
}
nix_hw->lso.in_use++;
/* Configure format fields for TCPv6 segmentation offload */
idx = NIX_LSO_FORMAT_IDX_TSOV6;
fidx = 0;
nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
/* Set rest of the fields to NOP */
for (; fidx < 8; fidx++) {
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
}
nix_hw->lso.in_use++;
}
static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
{
kfree(pfvf->rq_bmap);
kfree(pfvf->sq_bmap);
kfree(pfvf->cq_bmap);
if (pfvf->rq_ctx)
qmem_free(rvu->dev, pfvf->rq_ctx);
if (pfvf->sq_ctx)
qmem_free(rvu->dev, pfvf->sq_ctx);
if (pfvf->cq_ctx)
qmem_free(rvu->dev, pfvf->cq_ctx);
if (pfvf->rss_ctx)
qmem_free(rvu->dev, pfvf->rss_ctx);
if (pfvf->nix_qints_ctx)
qmem_free(rvu->dev, pfvf->nix_qints_ctx);
if (pfvf->cq_ints_ctx)
qmem_free(rvu->dev, pfvf->cq_ints_ctx);
pfvf->rq_bmap = NULL;
pfvf->cq_bmap = NULL;
pfvf->sq_bmap = NULL;
pfvf->rq_ctx = NULL;
pfvf->sq_ctx = NULL;
pfvf->cq_ctx = NULL;
pfvf->rss_ctx = NULL;
pfvf->nix_qints_ctx = NULL;
pfvf->cq_ints_ctx = NULL;
}
static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
struct rvu_pfvf *pfvf, int nixlf,
int rss_sz, int rss_grps, int hwctx_size,
u64 way_mask, bool tag_lsb_as_adder)
{
int err, grp, num_indices;
u64 val;
/* RSS is not requested for this NIXLF */
if (!rss_sz)
return 0;
num_indices = rss_sz * rss_grps;
/* Alloc NIX RSS HW context memory and config the base */
err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
if (err)
return err;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
(u64)pfvf->rss_ctx->iova);
/* Config full RSS table size, enable RSS and caching */
val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
if (tag_lsb_as_adder)
val |= BIT_ULL(5);
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
/* Config RSS group offset and sizes */
for (grp = 0; grp < rss_grps; grp++)
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
return 0;
}
static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
struct nix_aq_inst_s *inst)
{
struct admin_queue *aq = block->aq;
struct nix_aq_res_s *result;
int timeout = 1000;
u64 reg, head;
result = (struct nix_aq_res_s *)aq->res->base;
/* Get current head pointer where to append this instruction */
reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
head = (reg >> 4) & AQ_PTR_MASK;
memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
(void *)inst, aq->inst->entry_sz);
memset(result, 0, sizeof(*result));
/* sync into memory */
wmb();
/* Ring the doorbell and wait for result */
rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
while (result->compcode == NIX_AQ_COMP_NOTDONE) {
cpu_relax();
udelay(1);
timeout--;
if (!timeout)
return -EBUSY;
}
if (result->compcode != NIX_AQ_COMP_GOOD)
/* TODO: Replace this with some error code */
return -EBUSY;
return 0;
}
static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int nixlf, blkaddr, rc = 0;
struct nix_aq_inst_s inst;
struct rvu_block *block;
struct admin_queue *aq;
struct rvu_pfvf *pfvf;
void *ctx, *mask;
bool ena;
u64 cfg;
blkaddr = nix_hw->blkaddr;
block = &hw->block[blkaddr];
aq = block->aq;
if (!aq) {
dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
return NIX_AF_ERR_AQ_ENQUEUE;
}
pfvf = rvu_get_pfvf(rvu, pcifunc);
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
/* Skip NIXLF check for broadcast MCE entry and bandwidth profile
* operations done by AF itself.
*/
if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
(req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
if (!pfvf->nixlf || nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
}
switch (req->ctype) {
case NIX_AQ_CTYPE_RQ:
/* Check if index exceeds max no of queues */
if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
rc = NIX_AF_ERR_AQ_ENQUEUE;
break;
case NIX_AQ_CTYPE_SQ:
if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
rc = NIX_AF_ERR_AQ_ENQUEUE;
break;
case NIX_AQ_CTYPE_CQ:
if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
rc = NIX_AF_ERR_AQ_ENQUEUE;
break;
case NIX_AQ_CTYPE_RSS:
/* Check if RSS is enabled and qidx is within range */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
(req->qidx >= (256UL << (cfg & 0xF))))
rc = NIX_AF_ERR_AQ_ENQUEUE;
break;
case NIX_AQ_CTYPE_MCE:
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
/* Check if index exceeds MCE list length */
if (!nix_hw->mcast.mce_ctx ||
(req->qidx >= (256UL << (cfg & 0xF))))
rc = NIX_AF_ERR_AQ_ENQUEUE;
/* Adding multicast lists for requests from PF/VFs is not
* yet supported, so ignore this.
*/
if (rsp)
rc = NIX_AF_ERR_AQ_ENQUEUE;
break;
case NIX_AQ_CTYPE_BANDPROF:
if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
nix_hw, pcifunc))
rc = NIX_AF_ERR_INVALID_BANDPROF;
break;
default:
rc = NIX_AF_ERR_AQ_ENQUEUE;
}
if (rc)
return rc;
/* Check if SQ pointed SMQ belongs to this PF/VF or not */
if (req->ctype == NIX_AQ_CTYPE_SQ &&
((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
(req->op == NIX_AQ_INSTOP_WRITE &&
req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
pcifunc, req->sq.smq))
return NIX_AF_ERR_AQ_ENQUEUE;
}
memset(&inst, 0, sizeof(struct nix_aq_inst_s));
inst.lf = nixlf;
inst.cindex = req->qidx;
inst.ctype = req->ctype;
inst.op = req->op;
/* Currently we are not supporting enqueuing multiple instructions,
* so always choose first entry in result memory.
*/
inst.res_addr = (u64)aq->res->iova;
/* Hardware uses same aq->res->base for updating result of
* previous instruction hence wait here till it is done.
*/
spin_lock(&aq->lock);
/* Clean result + context memory */
memset(aq->res->base, 0, aq->res->entry_sz);
/* Context needs to be written at RES_ADDR + 128 */
ctx = aq->res->base + 128;
/* Mask needs to be written at RES_ADDR + 256 */
mask = aq->res->base + 256;
switch (req->op) {
case NIX_AQ_INSTOP_WRITE:
if (req->ctype == NIX_AQ_CTYPE_RQ)
memcpy(mask, &req->rq_mask,
sizeof(struct nix_rq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_SQ)
memcpy(mask, &req->sq_mask,
sizeof(struct nix_sq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_CQ)
memcpy(mask, &req->cq_mask,
sizeof(struct nix_cq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_RSS)
memcpy(mask, &req->rss_mask,
sizeof(struct nix_rsse_s));
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(mask, &req->mce_mask,
sizeof(struct nix_rx_mce_s));
else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
memcpy(mask, &req->prof_mask,
sizeof(struct nix_bandprof_s));
fallthrough;
case NIX_AQ_INSTOP_INIT:
if (req->ctype == NIX_AQ_CTYPE_RQ)
memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_SQ)
memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_CQ)
memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_RSS)
memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
break;
case NIX_AQ_INSTOP_NOP:
case NIX_AQ_INSTOP_READ:
case NIX_AQ_INSTOP_LOCK:
case NIX_AQ_INSTOP_UNLOCK:
break;
default:
rc = NIX_AF_ERR_AQ_ENQUEUE;
spin_unlock(&aq->lock);
return rc;
}
/* Submit the instruction to AQ */
rc = nix_aq_enqueue_wait(rvu, block, &inst);
if (rc) {
spin_unlock(&aq->lock);
return rc;
}
/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
if (req->op == NIX_AQ_INSTOP_INIT) {
if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
__set_bit(req->qidx, pfvf->rq_bmap);
if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
__set_bit(req->qidx, pfvf->sq_bmap);
if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
__set_bit(req->qidx, pfvf->cq_bmap);
}
if (req->op == NIX_AQ_INSTOP_WRITE) {
if (req->ctype == NIX_AQ_CTYPE_RQ) {
ena = (req->rq.ena & req->rq_mask.ena) |
(test_bit(req->qidx, pfvf->rq_bmap) &
~req->rq_mask.ena);
if (ena)
__set_bit(req->qidx, pfvf->rq_bmap);
else
__clear_bit(req->qidx, pfvf->rq_bmap);
}
if (req->ctype == NIX_AQ_CTYPE_SQ) {
ena = (req->rq.ena & req->sq_mask.ena) |
(test_bit(req->qidx, pfvf->sq_bmap) &
~req->sq_mask.ena);
if (ena)
__set_bit(req->qidx, pfvf->sq_bmap);
else
__clear_bit(req->qidx, pfvf->sq_bmap);
}
if (req->ctype == NIX_AQ_CTYPE_CQ) {
ena = (req->rq.ena & req->cq_mask.ena) |
(test_bit(req->qidx, pfvf->cq_bmap) &
~req->cq_mask.ena);
if (ena)
__set_bit(req->qidx, pfvf->cq_bmap);
else
__clear_bit(req->qidx, pfvf->cq_bmap);
}
}
if (rsp) {
/* Copy read context into mailbox */
if (req->op == NIX_AQ_INSTOP_READ) {
if (req->ctype == NIX_AQ_CTYPE_RQ)
memcpy(&rsp->rq, ctx,
sizeof(struct nix_rq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_SQ)
memcpy(&rsp->sq, ctx,
sizeof(struct nix_sq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_CQ)
memcpy(&rsp->cq, ctx,
sizeof(struct nix_cq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_RSS)
memcpy(&rsp->rss, ctx,
sizeof(struct nix_rsse_s));
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(&rsp->mce, ctx,
sizeof(struct nix_rx_mce_s));
else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
memcpy(&rsp->prof, ctx,
sizeof(struct nix_bandprof_s));
}
}
spin_unlock(&aq->lock);
return 0;
}
static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
struct nix_aq_enq_req *req, u8 ctype)
{
struct nix_cn10k_aq_enq_req aq_req;
struct nix_cn10k_aq_enq_rsp aq_rsp;
int rc, word;
if (req->ctype != NIX_AQ_CTYPE_CQ)
return 0;
rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
req->hdr.pcifunc, ctype, req->qidx);
if (rc) {
dev_err(rvu->dev,
"%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
__func__, nix_get_ctx_name(ctype), req->qidx,
req->hdr.pcifunc);
return rc;
}
/* Make copy of original context & mask which are required
* for resubmission
*/
memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
/* exclude fields which HW can update */
aq_req.cq_mask.cq_err = 0;
aq_req.cq_mask.wrptr = 0;
aq_req.cq_mask.tail = 0;
aq_req.cq_mask.head = 0;
aq_req.cq_mask.avg_level = 0;
aq_req.cq_mask.update_time = 0;
aq_req.cq_mask.substream = 0;
/* Context mask (cq_mask) holds mask value of fields which
* are changed in AQ WRITE operation.
* for example cq.drop = 0xa;
* cq_mask.drop = 0xff;
* Below logic performs '&' between cq and cq_mask so that non
* updated fields are masked out for request and response
* comparison
*/
for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
word++) {
*(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
*(u64 *)((u8 *)&aq_req.cq + word * 8) &=
(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
}
if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
return 0;
}
static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
struct nix_hw *nix_hw;
int err, retries = 5;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
retry:
err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
/* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
* As a work around perfrom CQ context read after each AQ write. If AQ
* read shows AQ write is not updated perform AQ write again.
*/
if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
if (retries--)
goto retry;
else
return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
}
}
return err;
}
static const char *nix_get_ctx_name(int ctype)
{
switch (ctype) {
case NIX_AQ_CTYPE_CQ:
return "CQ";
case NIX_AQ_CTYPE_SQ:
return "SQ";
case NIX_AQ_CTYPE_RQ:
return "RQ";
case NIX_AQ_CTYPE_RSS:
return "RSS";
}
return "";
}
static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
struct nix_aq_enq_req aq_req;
unsigned long *bmap;
int qidx, q_cnt = 0;
int err = 0, rc;
if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
return NIX_AF_ERR_AQ_ENQUEUE;
memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
aq_req.hdr.pcifunc = req->hdr.pcifunc;
if (req->ctype == NIX_AQ_CTYPE_CQ) {
aq_req.cq.ena = 0;
aq_req.cq_mask.ena = 1;
aq_req.cq.bp_ena = 0;
aq_req.cq_mask.bp_ena = 1;
q_cnt = pfvf->cq_ctx->qsize;
bmap = pfvf->cq_bmap;
}
if (req->ctype == NIX_AQ_CTYPE_SQ) {
aq_req.sq.ena = 0;
aq_req.sq_mask.ena = 1;
q_cnt = pfvf->sq_ctx->qsize;
bmap = pfvf->sq_bmap;
}
if (req->ctype == NIX_AQ_CTYPE_RQ) {
aq_req.rq.ena = 0;
aq_req.rq_mask.ena = 1;
q_cnt = pfvf->rq_ctx->qsize;
bmap = pfvf->rq_bmap;
}
aq_req.ctype = req->ctype;
aq_req.op = NIX_AQ_INSTOP_WRITE;
for (qidx = 0; qidx < q_cnt; qidx++) {
if (!test_bit(qidx, bmap))
continue;
aq_req.qidx = qidx;
rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
if (rc) {
err = rc;
dev_err(rvu->dev, "Failed to disable %s:%d context\n",
nix_get_ctx_name(req->ctype), qidx);
}
}
return err;
}
#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
{
struct nix_aq_enq_req lock_ctx_req;
int err;
if (req->op != NIX_AQ_INSTOP_INIT)
return 0;
if (req->ctype == NIX_AQ_CTYPE_MCE ||
req->ctype == NIX_AQ_CTYPE_DYNO)
return 0;
memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
lock_ctx_req.ctype = req->ctype;
lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
lock_ctx_req.qidx = req->qidx;
err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
if (err)
dev_err(rvu->dev,
"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
req->hdr.pcifunc,
nix_get_ctx_name(req->ctype), req->qidx);
return err;
}
int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
int err;
err = rvu_nix_aq_enq_inst(rvu, req, rsp);
if (!err)
err = nix_lf_hwctx_lockdown(rvu, req);
return err;
}
#else
int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
return rvu_nix_aq_enq_inst(rvu, req, rsp);
}
#endif
/* CN10K mbox handler */
int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
struct nix_cn10k_aq_enq_req *req,
struct nix_cn10k_aq_enq_rsp *rsp)
{
return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
(struct nix_aq_enq_rsp *)rsp);
}
int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp)
{
return nix_lf_hwctx_disable(rvu, req);
}
int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
struct nix_lf_alloc_req *req,
struct nix_lf_alloc_rsp *rsp)
{
int nixlf, qints, hwctx_size, intf, err, rc = 0;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
struct rvu_pfvf *pfvf;
u64 cfg, ctx_cfg;
int blkaddr;
if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
return NIX_AF_ERR_PARAM;
if (req->way_mask)
req->way_mask &= 0xFFFF;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
block = &hw->block[blkaddr];
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
if (req->npa_func) {
/* If default, use 'this' NIXLF's PFFUNC */
if (req->npa_func == RVU_DEFAULT_PF_FUNC)
req->npa_func = pcifunc;
if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
return NIX_AF_INVAL_NPA_PF_FUNC;
}
/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
if (req->sso_func) {
/* If default, use 'this' NIXLF's PFFUNC */
if (req->sso_func == RVU_DEFAULT_PF_FUNC)
req->sso_func = pcifunc;
if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
return NIX_AF_INVAL_SSO_PF_FUNC;
}
/* If RSS is being enabled, check if requested config is valid.
* RSS table size should be power of two, otherwise
* RSS_GRP::OFFSET + adder might go beyond that group or
* won't be able to use entire table.
*/
if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
!is_power_of_2(req->rss_sz)))
return NIX_AF_ERR_RSS_SIZE_INVALID;
if (req->rss_sz &&
(!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
return NIX_AF_ERR_RSS_GRPS_INVALID;
/* Reset this NIX LF */
err = rvu_lf_reset(rvu, block, nixlf);
if (err) {
dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
block->addr - BLKADDR_NIX0, nixlf);
return NIX_AF_ERR_LF_RESET;
}
ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
/* Alloc NIX RQ HW context memory and config the base */
hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
if (err)
goto free_mem;
pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
if (!pfvf->rq_bmap)
goto free_mem;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
(u64)pfvf->rq_ctx->iova);
/* Set caching and queue count in HW */
cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
/* Alloc NIX SQ HW context memory and config the base */
hwctx_size = 1UL << (ctx_cfg & 0xF);
err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
if (err)
goto free_mem;
pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
if (!pfvf->sq_bmap)
goto free_mem;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
(u64)pfvf->sq_ctx->iova);
cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
/* Alloc NIX CQ HW context memory and config the base */
hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
if (err)
goto free_mem;
pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
if (!pfvf->cq_bmap)
goto free_mem;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
(u64)pfvf->cq_ctx->iova);
cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
/* Initialize receive side scaling (RSS) */
hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
req->rss_grps, hwctx_size, req->way_mask,
!!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
if (err)
goto free_mem;
/* Alloc memory for CQINT's HW contexts */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
qints = (cfg >> 24) & 0xFFF;
hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
if (err)
goto free_mem;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
(u64)pfvf->cq_ints_ctx->iova);
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
BIT_ULL(36) | req->way_mask << 20);
/* Alloc memory for QINT's HW contexts */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
qints = (cfg >> 12) & 0xFFF;
hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
if (err)
goto free_mem;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
(u64)pfvf->nix_qints_ctx->iova);
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
BIT_ULL(36) | req->way_mask << 20);
/* Setup VLANX TPID's.
* Use VLAN1 for 802.1Q
* and VLAN0 for 802.1AD.
*/
cfg = (0x8100ULL << 16) | 0x88A8ULL;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
/* Enable LMTST for this NIX LF */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
if (req->npa_func)
cfg = req->npa_func;
if (req->sso_func)
cfg |= (u64)req->sso_func << 16;
cfg |= (u64)req->xqe_sz << 33;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
/* Config Rx pkt length, csum checks and apad enable / disable */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
/* Configure pkind for TX parse config */
cfg = NPC_TX_DEF_PKIND;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (is_sdp_pfvf(pcifunc))
intf = NIX_INTF_TYPE_SDP;
err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
!!(req->flags & NIX_LF_LBK_BLK_SEL));
if (err)
goto free_mem;
/* Disable NPC entries as NIXLF's contexts are not initialized yet */
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
/* Configure RX VTAG Type 7 (strip) for vf vlan */
rvu_write64(rvu, blkaddr,
NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
VTAGSIZE_T4 | VTAG_STRIP);
goto exit;
free_mem:
nix_ctx_free(rvu, pfvf);
rc = -ENOMEM;
exit:
/* Set macaddr of this PF/VF */
ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
/* set SQB size info */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
rsp->sqb_size = (cfg >> 34) & 0xFFFF;
rsp->rx_chan_base = pfvf->rx_chan_base;
rsp->tx_chan_base = pfvf->tx_chan_base;
rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
/* Get HW supported stat count */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
/* Get count of CQ IRQs and error IRQs supported per LF */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
rsp->qints = ((cfg >> 12) & 0xFFF);
rsp->cints = ((cfg >> 24) & 0xFFF);
rsp->cgx_links = hw->cgx_links;
rsp->lbk_links = hw->lbk_links;
rsp->sdp_links = hw->sdp_links;
return rc;
}
int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
block = &hw->block[blkaddr];
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
if (req->flags & NIX_LF_DISABLE_FLOWS)
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
else
rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
/* Free any tx vtag def entries used by this NIX LF */
if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
nix_free_tx_vtag_entries(rvu, pcifunc);
nix_interface_deinit(rvu, pcifunc, nixlf);
/* Reset this NIX LF */
err = rvu_lf_reset(rvu, block, nixlf);
if (err) {
dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
block->addr - BLKADDR_NIX0, nixlf);
return NIX_AF_ERR_LF_RESET;
}
nix_ctx_free(rvu, pfvf);
return 0;
}
int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
struct nix_mark_format_cfg *req,
struct nix_mark_format_cfg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
int blkaddr, rc;
u32 cfg;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
cfg = (((u32)req->offset & 0x7) << 16) |
(((u32)req->y_mask & 0xF) << 12) |
(((u32)req->y_val & 0xF) << 8) |
(((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
if (rc < 0) {
dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
return NIX_AF_ERR_MARK_CFG_FAIL;
}
rsp->mark_format_idx = rc;
return 0;
}
/* Handle shaper update specially for few revisions */
static bool
handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
int lvl, u64 reg, u64 regval)
{
u64 regbase, oldval, sw_xoff = 0;
u64 dbgval, md_debug0 = 0;
unsigned long poll_tmo;
bool rate_reg = 0;
u32 schq;
regbase = reg & 0xFFFF;
schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
/* Check for rate register */
switch (lvl) {
case NIX_TXSCH_LVL_TL1:
md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
break;
case NIX_TXSCH_LVL_TL2:
md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
regbase == NIX_AF_TL2X_PIR(0));
break;
case NIX_TXSCH_LVL_TL3:
md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
regbase == NIX_AF_TL3X_PIR(0));
break;
case NIX_TXSCH_LVL_TL4:
md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
regbase == NIX_AF_TL4X_PIR(0));
break;
case NIX_TXSCH_LVL_MDQ:
sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
regbase == NIX_AF_MDQX_PIR(0));
break;
}
if (!rate_reg)
return false;
/* Nothing special to do when state is not toggled */
oldval = rvu_read64(rvu, blkaddr, reg);
if ((oldval & 0x1) == (regval & 0x1)) {
rvu_write64(rvu, blkaddr, reg, regval);
return true;
}
/* PIR/CIR disable */
if (!(regval & 0x1)) {
rvu_write64(rvu, blkaddr, sw_xoff, 1);
rvu_write64(rvu, blkaddr, reg, 0);
udelay(4);
rvu_write64(rvu, blkaddr, sw_xoff, 0);
return true;
}
/* PIR/CIR enable */
rvu_write64(rvu, blkaddr, sw_xoff, 1);
if (md_debug0) {
poll_tmo = jiffies + usecs_to_jiffies(10000);
/* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
do {
if (time_after(jiffies, poll_tmo)) {
dev_err(rvu->dev,
"NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
nixlf, schq, lvl);
goto exit;
}
usleep_range(1, 5);
dbgval = rvu_read64(rvu, blkaddr, md_debug0);
} while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
}
rvu_write64(rvu, blkaddr, reg, regval);
exit:
rvu_write64(rvu, blkaddr, sw_xoff, 0);
return true;
}
/* Disable shaping of pkts by a scheduler queue
* at a given scheduler level.
*/
static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
int nixlf, int lvl, int schq)
{
struct rvu_hwinfo *hw = rvu->hw;
u64 cir_reg = 0, pir_reg = 0;
u64 cfg;
switch (lvl) {
case NIX_TXSCH_LVL_TL1:
cir_reg = NIX_AF_TL1X_CIR(schq);
pir_reg = 0; /* PIR not available at TL1 */
break;
case NIX_TXSCH_LVL_TL2:
cir_reg = NIX_AF_TL2X_CIR(schq);
pir_reg = NIX_AF_TL2X_PIR(schq);
break;
case NIX_TXSCH_LVL_TL3:
cir_reg = NIX_AF_TL3X_CIR(schq);
pir_reg = NIX_AF_TL3X_PIR(schq);
break;
case NIX_TXSCH_LVL_TL4:
cir_reg = NIX_AF_TL4X_CIR(schq);
pir_reg = NIX_AF_TL4X_PIR(schq);
break;
case NIX_TXSCH_LVL_MDQ:
cir_reg = NIX_AF_MDQX_CIR(schq);
pir_reg = NIX_AF_MDQX_PIR(schq);
break;
}
/* Shaper state toggle needs wait/poll */
if (hw->cap.nix_shaper_toggle_wait) {
if (cir_reg)
handle_txschq_shaper_update(rvu, blkaddr, nixlf,
lvl, cir_reg, 0);
if (pir_reg)
handle_txschq_shaper_update(rvu, blkaddr, nixlf,
lvl, pir_reg, 0);
return;
}
if (!cir_reg)
return;
cfg = rvu_read64(rvu, blkaddr, cir_reg);
rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
if (!pir_reg)
return;
cfg = rvu_read64(rvu, blkaddr, pir_reg);
rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
}
static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
int lvl, int schq)
{
struct rvu_hwinfo *hw = rvu->hw;
int link_level;
int link;
if (lvl >= hw->cap.nix_tx_aggr_lvl)
return;
/* Reset TL4's SDP link config */
if (lvl == NIX_TXSCH_LVL_TL4)
rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
if (lvl != link_level)
return;
/* Reset TL2's CGX or LBK link config */
for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
rvu_write64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
}
static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
int lvl, int schq)
{
struct rvu_hwinfo *hw = rvu->hw;
u64 reg;
/* Skip this if shaping is not supported */
if (!hw->cap.nix_shaping)
return;
/* Clear level specific SW_XOFF */
switch (lvl) {
case NIX_TXSCH_LVL_TL1:
reg = NIX_AF_TL1X_SW_XOFF(schq);
break;
case NIX_TXSCH_LVL_TL2:
reg = NIX_AF_TL2X_SW_XOFF(schq);
break;
case NIX_TXSCH_LVL_TL3:
reg = NIX_AF_TL3X_SW_XOFF(schq);
break;
case NIX_TXSCH_LVL_TL4:
reg = NIX_AF_TL4X_SW_XOFF(schq);
break;
case NIX_TXSCH_LVL_MDQ:
reg = NIX_AF_MDQX_SW_XOFF(schq);
break;
default:
return;
}
rvu_write64(rvu, blkaddr, reg, 0x0);
}
static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
{
struct rvu_hwinfo *hw = rvu->hw;
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
if (is_afvf(pcifunc)) {/* LBK links */
return hw->cgx_links;
} else if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return (cgx_id * hw->lmac_per_cgx) + lmac_id;
}
/* SDP link */
return hw->cgx_links + hw->lbk_links;
}
static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
int link, int *start, int *end)
{
struct rvu_hwinfo *hw = rvu->hw;
int pf = rvu_get_pf(pcifunc);
if (is_afvf(pcifunc)) { /* LBK links */
*start = hw->cap.nix_txsch_per_cgx_lmac * link;
*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
*start = hw->cap.nix_txsch_per_cgx_lmac * link;
*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
} else { /* SDP link */
*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
}
}
static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
struct nix_hw *nix_hw,
struct nix_txsch_alloc_req *req)
{
struct rvu_hwinfo *hw = rvu->hw;
int schq, req_schq, free_cnt;
struct nix_txsch *txsch;
int link, start, end;
txsch = &nix_hw->txsch[lvl];
req_schq = req->schq_contig[lvl] + req->schq[lvl];
if (!req_schq)
return 0;
link = nix_get_tx_link(rvu, pcifunc);
/* For traffic aggregating scheduler level, one queue is enough */
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
if (req_schq != 1)
return NIX_AF_ERR_TLX_ALLOC_FAIL;
return 0;
}
/* Get free SCHQ count and check if request can be accomodated */
if (hw->cap.nix_fixed_txschq_mapping) {
nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
if (end <= txsch->schq.max && schq < end &&
!test_bit(schq, txsch->schq.bmap))
free_cnt = 1;
else
free_cnt = 0;
} else {
free_cnt = rvu_rsrc_free_count(&txsch->schq);
}
if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
return NIX_AF_ERR_TLX_ALLOC_FAIL;
/* If contiguous queues are needed, check for availability */
if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
!rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
return NIX_AF_ERR_TLX_ALLOC_FAIL;
return 0;
}
static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
struct nix_txsch_alloc_rsp *rsp,
int lvl, int start, int end)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = rsp->hdr.pcifunc;
int idx, schq;
/* For traffic aggregating levels, queue alloc is based
* on transmit link to which PF_FUNC is mapped to.
*/
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
/* A single TL queue is allocated */
if (rsp->schq_contig[lvl]) {
rsp->schq_contig[lvl] = 1;
rsp->schq_contig_list[lvl][0] = start;
}
/* Both contig and non-contig reqs doesn't make sense here */
if (rsp->schq_contig[lvl])
rsp->schq[lvl] = 0;
if (rsp->schq[lvl]) {
rsp->schq[lvl] = 1;
rsp->schq_list[lvl][0] = start;
}
return;
}
/* Adjust the queue request count if HW supports
* only one queue per level configuration.
*/
if (hw->cap.nix_fixed_txschq_mapping) {
idx = pcifunc & RVU_PFVF_FUNC_MASK;
schq = start + idx;
if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
rsp->schq_contig[lvl] = 0;
rsp->schq[lvl] = 0;
return;
}
if (rsp->schq_contig[lvl]) {
rsp->schq_contig[lvl] = 1;
set_bit(schq, txsch->schq.bmap);
rsp->schq_contig_list[lvl][0] = schq;
rsp->schq[lvl] = 0;
} else if (rsp->schq[lvl]) {
rsp->schq[lvl] = 1;
set_bit(schq, txsch->schq.bmap);
rsp->schq_list[lvl][0] = schq;
}
return;
}
/* Allocate contiguous queue indices requesty first */
if (rsp->schq_contig[lvl]) {
schq = bitmap_find_next_zero_area(txsch->schq.bmap,
txsch->schq.max, start,
rsp->schq_contig[lvl], 0);
if (schq >= end)
rsp->schq_contig[lvl] = 0;
for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
set_bit(schq, txsch->schq.bmap);
rsp->schq_contig_list[lvl][idx] = schq;
schq++;
}
}
/* Allocate non-contiguous queue indices */
if (rsp->schq[lvl]) {
idx = 0;
for (schq = start; schq < end; schq++) {
if (!test_bit(schq, txsch->schq.bmap)) {
set_bit(schq, txsch->schq.bmap);
rsp->schq_list[lvl][idx++] = schq;
}
if (idx == rsp->schq[lvl])
break;
}
/* Update how many were allocated */
rsp->schq[lvl] = idx;
}
}
int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct nix_txsch_alloc_req *req,
struct nix_txsch_alloc_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int link, blkaddr, rc = 0;
int lvl, idx, start, end;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u32 *pfvf_map;
int nixlf;
u16 schq;
rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
if (rc)
return rc;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
mutex_lock(&rvu->rsrc_lock);
/* Check if request is valid as per HW capabilities
* and can be accomodated.
*/
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
if (rc)
goto err;
}
/* Allocate requested Tx scheduler queues */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
pfvf_map = txsch->pfvf_map;
if (!req->schq[lvl] && !req->schq_contig[lvl])
continue;
rsp->schq[lvl] = req->schq[lvl];
rsp->schq_contig[lvl] = req->schq_contig[lvl];
link = nix_get_tx_link(rvu, pcifunc);
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
start = link;
end = link;
} else if (hw->cap.nix_fixed_txschq_mapping) {
nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
} else {
start = 0;
end = txsch->schq.max;
}
nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
/* Reset queue config */
for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
schq = rsp->schq_contig_list[lvl][idx];
if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
NIX_TXSCHQ_CFG_DONE))
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
}
for (idx = 0; idx < req->schq[lvl]; idx++) {
schq = rsp->schq_list[lvl][idx];
if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
NIX_TXSCHQ_CFG_DONE))
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
}
}
rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
goto exit;
err:
rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
exit:
mutex_unlock(&rvu->rsrc_lock);
return rc;
}
static int nix_smq_flush(struct rvu *rvu, int blkaddr,
int smq, u16 pcifunc, int nixlf)
{
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
int err, restore_tx_en = 0;
u64 cfg;
/* enable cgx tx if disabled */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, true);
}
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
/* Do SMQ flush and set enqueue xoff */
cfg |= BIT_ULL(50) | BIT_ULL(49);
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
/* Disable backpressure from physical link,
* otherwise SMQ flush may stall.
*/
rvu_cgx_enadis_rx_bp(rvu, pf, false);
/* Wait for flush to complete */
err = rvu_poll_reg(rvu, blkaddr,
NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
if (err)
dev_err(rvu->dev,
"NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
rvu_cgx_enadis_rx_bp(rvu, pf, true);
/* restore cgx tx state */
if (restore_tx_en)
cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
return err;
}
static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
{
int blkaddr, nixlf, lvl, schq, err;
struct rvu_hwinfo *hw = rvu->hw;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u16 map_func;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
/* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
mutex_lock(&rvu->rsrc_lock);
for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
if (lvl >= hw->cap.nix_tx_aggr_lvl)
continue;
for (schq = 0; schq < txsch->schq.max; schq++) {
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
}
}
nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
nix_get_tx_link(rvu, pcifunc));
/* On PF cleanup, clear cfg done flag as
* PF would have changed default config.
*/
if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
schq = nix_get_tx_link(rvu, pcifunc);
/* Do not clear pcifunc in txsch->pfvf_map[schq] because
* VF might be using this TL1 queue
*/
map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
}
/* Flush SMQs */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
for (schq = 0; schq < txsch->schq.max; schq++) {
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
}
/* Now free scheduler queues to free pool */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
/* TLs above aggregation level are shared across all PF
* and it's VFs, hence skip freeing them.
*/
if (lvl >= hw->cap.nix_tx_aggr_lvl)
continue;
txsch = &nix_hw->txsch[lvl];
for (schq = 0; schq < txsch->schq.max; schq++) {
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
rvu_free_rsrc(&txsch->schq, schq);
txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
}
}
mutex_unlock(&rvu->rsrc_lock);
/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
if (err)
dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
return 0;
}
static int nix_txschq_free_one(struct rvu *rvu,
struct nix_txsch_free_req *req)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int lvl, schq, nixlf, blkaddr;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u32 *pfvf_map;
int rc;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
lvl = req->schq_lvl;
schq = req->schq;
txsch = &nix_hw->txsch[lvl];
if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
return 0;
pfvf_map = txsch->pfvf_map;
mutex_lock(&rvu->rsrc_lock);
if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
rc = NIX_AF_ERR_TLX_INVALID;
goto err;
}
/* Clear SW_XOFF of this resource only.
* For SMQ level, all path XOFF's
* need to be made clear by user
*/
nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
/* Flush if it is a SMQ. Onus of disabling
* TL2/3 queue links before SMQ flush is on user
*/
if (lvl == NIX_TXSCH_LVL_SMQ &&
nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
rc = NIX_AF_SMQ_FLUSH_FAILED;
goto err;
}
/* Free the resource */
rvu_free_rsrc(&txsch->schq, schq);
txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
mutex_unlock(&rvu->rsrc_lock);
return 0;
err:
mutex_unlock(&rvu->rsrc_lock);
return rc;
}
int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
struct nix_txsch_free_req *req,
struct msg_rsp *rsp)
{
if (req->flags & TXSCHQ_FREE_ALL)
return nix_txschq_free(rvu, req->hdr.pcifunc);
else
return nix_txschq_free_one(rvu, req);
}
static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
int lvl, u64 reg, u64 regval)
{
u64 regbase = reg & 0xFFFF;
u16 schq, parent;
if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
return false;
schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
/* Check if this schq belongs to this PF/VF or not */
if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
return false;
parent = (regval >> 16) & 0x1FF;
/* Validate MDQ's TL4 parent */
if (regbase == NIX_AF_MDQX_PARENT(0) &&
!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
return false;
/* Validate TL4's TL3 parent */
if (regbase == NIX_AF_TL4X_PARENT(0) &&
!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
return false;
/* Validate TL3's TL2 parent */
if (regbase == NIX_AF_TL3X_PARENT(0) &&
!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
return false;
/* Validate TL2's TL1 parent */
if (regbase == NIX_AF_TL2X_PARENT(0) &&
!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
return false;
return true;
}
static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
{
u64 regbase;
if (hw->cap.nix_shaping)
return true;
/* If shaping and coloring is not supported, then
* *_CIR and *_PIR registers should not be configured.
*/
regbase = reg & 0xFFFF;
switch (lvl) {
case NIX_TXSCH_LVL_TL1:
if (regbase == NIX_AF_TL1X_CIR(0))
return false;
break;
case NIX_TXSCH_LVL_TL2:
if (regbase == NIX_AF_TL2X_CIR(0) ||
regbase == NIX_AF_TL2X_PIR(0))
return false;
break;
case NIX_TXSCH_LVL_TL3:
if (regbase == NIX_AF_TL3X_CIR(0) ||
regbase == NIX_AF_TL3X_PIR(0))
return false;
break;
case NIX_TXSCH_LVL_TL4:
if (regbase == NIX_AF_TL4X_CIR(0) ||
regbase == NIX_AF_TL4X_PIR(0))
return false;
break;
case NIX_TXSCH_LVL_MDQ:
if (regbase == NIX_AF_MDQX_CIR(0) ||
regbase == NIX_AF_MDQX_PIR(0))
return false;
break;
}
return true;
}
static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
u16 pcifunc, int blkaddr)
{
u32 *pfvf_map;
int schq;
schq = nix_get_tx_link(rvu, pcifunc);
pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
/* Skip if PF has already done the config */
if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
return;
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
(TXSCH_TL1_DFLT_RR_PRIO << 1));
/* On OcteonTx2 the config was in bytes and newer silcons
* it's changed to weight.
*/
if (!rvu->hw->cap.nix_common_dwrr_mtu)
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
TXSCH_TL1_DFLT_RR_QTM);
else
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
CN10K_MAX_DWRR_WEIGHT);
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
}
/* Register offset - [15:0]
* Scheduler Queue number - [25:16]
*/
#define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0)
static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
int blkaddr, struct nix_txschq_config *req,
struct nix_txschq_config *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
int idx, schq;
u64 reg;
for (idx = 0; idx < req->num_regs; idx++) {
reg = req->reg[idx];
reg &= NIX_TX_SCHQ_MASK;
schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
!is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
return NIX_AF_INVAL_TXSCHQ_CFG;
rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
}
rsp->lvl = req->lvl;
rsp->num_regs = req->num_regs;
return 0;
}
static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
u16 pcifunc, struct nix_txsch *txsch)
{
struct rvu_hwinfo *hw = rvu->hw;
int lbk_link_start, lbk_links;
u8 pf = rvu_get_pf(pcifunc);
int schq;
if (!is_pf_cgxmapped(rvu, pf))
return;
lbk_link_start = hw->cgx_links;
for (schq = 0; schq < txsch->schq.max; schq++) {
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
/* Enable all LBK links with channel 63 by default so that
* packets can be sent to LBK with a NPC TX MCAM rule
*/
lbk_links = hw->lbk_links;
while (lbk_links--)
rvu_write64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq,
lbk_link_start +
lbk_links),
BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
}
}
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
struct nix_txschq_config *rsp)
{
u64 reg, val, regval, schq_regbase, val_mask;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
int blkaddr, idx, err;
int nixlf, schq;
u32 *pfvf_map;
if (req->lvl >= NIX_TXSCH_LVL_CNT ||
req->num_regs > MAX_REGS_PER_MBOX_MSG)
return NIX_AF_INVAL_TXSCHQ_CFG;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
if (err)
return err;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
if (req->read)
return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
txsch = &nix_hw->txsch[req->lvl];
pfvf_map = txsch->pfvf_map;
if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
pcifunc & RVU_PFVF_FUNC_MASK) {
mutex_lock(&rvu->rsrc_lock);
if (req->lvl == NIX_TXSCH_LVL_TL1)
nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
mutex_unlock(&rvu->rsrc_lock);
return 0;
}
for (idx = 0; idx < req->num_regs; idx++) {
reg = req->reg[idx];
reg &= NIX_TX_SCHQ_MASK;
regval = req->regval[idx];
schq_regbase = reg & 0xFFFF;
val_mask = req->regval_mask[idx];
if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
txsch->lvl, reg, regval))
return NIX_AF_INVAL_TXSCHQ_CFG;
/* Check if shaping and coloring is supported */
if (!is_txschq_shaping_valid(hw, req->lvl, reg))
continue;
val = rvu_read64(rvu, blkaddr, reg);
regval = (val & val_mask) | (regval & ~val_mask);
/* Handle shaping state toggle specially */
if (hw->cap.nix_shaper_toggle_wait &&
handle_txschq_shaper_update(rvu, blkaddr, nixlf,
req->lvl, reg, regval))
continue;
/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
pcifunc, 0);
regval &= ~(0x7FULL << 24);
regval |= ((u64)nixlf << 24);
}
/* Clear 'BP_ENA' config, if it's not allowed */
if (!hw->cap.nix_tx_link_bp) {
if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
(schq_regbase & 0xFF00) ==
NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
regval &= ~BIT_ULL(13);
}
/* Mark config as done for TL1 by PF */
if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
mutex_lock(&rvu->rsrc_lock);
pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
NIX_TXSCHQ_CFG_DONE);
mutex_unlock(&rvu->rsrc_lock);
}
/* SMQ flush is special hence split register writes such
* that flush first and write rest of the bits later.
*/
if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
(regval & BIT_ULL(49))) {
schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
regval &= ~BIT_ULL(49);
}
rvu_write64(rvu, blkaddr, reg, regval);
}
rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
&nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
return 0;
}
static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
struct nix_vtag_config *req)
{
u64 regval = req->vtag_size;
if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
req->vtag_size > VTAGSIZE_T8)
return -EINVAL;
/* RX VTAG Type 7 reserved for vf vlan */
if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
return NIX_AF_ERR_RX_VTAG_INUSE;
if (req->rx.capture_vtag)
regval |= BIT_ULL(5);
if (req->rx.strip_vtag)
regval |= BIT_ULL(4);
rvu_write64(rvu, blkaddr,
NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
return 0;
}
static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
u16 pcifunc, int index)
{
struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
struct nix_txvlan *vlan;
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
vlan = &nix_hw->txvlan;
if (vlan->entry2pfvf_map[index] != pcifunc)
return NIX_AF_ERR_PARAM;
rvu_write64(rvu, blkaddr,
NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
rvu_write64(rvu, blkaddr,
NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
vlan->entry2pfvf_map[index] = 0;
rvu_free_rsrc(&vlan->rsrc, index);
return 0;
}
static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
{
struct nix_txvlan *vlan;
struct nix_hw *nix_hw;
int index, blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return;
vlan = &nix_hw->txvlan;
mutex_lock(&vlan->rsrc_lock);
/* Scan all the entries and free the ones mapped to 'pcifunc' */
for (index = 0; index < vlan->rsrc