mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 14:14:01 +08:00
Merge branch 'octeontx2-traffic-shaping'
Sunil Goutham says: ==================== Octeontx2: Traffic shaping and SDP link config support This patch series adds support for traffic shaping configuration on all silicons available after 96xx C0. And also adds SDP link related configuration needed when Octeon is connected as an end-point and traffic needs to flow from end-point to host and vice versa. Series also has other changes like - New mbox messages in admin function driver for PF/VF drivers to retrieve available HW resource count. HW resources like block LFs, bandwidth profiles etc are covered. - Added PTP device ID for new CN10K and 95O silicons. - etc ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
9270c565b0
@ -10,4 +10,5 @@ obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o
|
||||
rvu_mbox-y := mbox.o rvu_trace.o
|
||||
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
|
||||
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
|
||||
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o
|
||||
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
|
||||
rvu_sdp.o
|
||||
|
@ -185,6 +185,7 @@ enum nix_scheduler {
|
||||
|
||||
#define NIX_INTF_TYPE_CGX 0
|
||||
#define NIX_INTF_TYPE_LBK 1
|
||||
#define NIX_INTF_TYPE_SDP 2
|
||||
|
||||
#define MAX_LMAC_PKIND 12
|
||||
#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
|
||||
@ -192,6 +193,8 @@ enum nix_scheduler {
|
||||
#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
|
||||
#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
|
||||
#define NIX_CHAN_SDP_CH_START (0x700ull)
|
||||
#define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a))
|
||||
#define NIX_CHAN_SDP_NUM_CHANS 256
|
||||
|
||||
/* The mask is to extract lower 10-bits of channel number
|
||||
* which CPT will pass to X2P.
|
||||
|
@ -87,7 +87,7 @@ struct mbox_msghdr {
|
||||
#define OTX2_MBOX_REQ_SIG (0xdead)
|
||||
#define OTX2_MBOX_RSP_SIG (0xbeef)
|
||||
u16 sig; /* Signature, for validating corrupted msgs */
|
||||
#define OTX2_MBOX_VERSION (0x0007)
|
||||
#define OTX2_MBOX_VERSION (0x0009)
|
||||
u16 ver; /* Version of msg's structure for this ID */
|
||||
u16 next_msgoff; /* Offset of next msg within mailbox region */
|
||||
int rc; /* Msg process'ed response code */
|
||||
@ -130,6 +130,7 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
|
||||
M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
|
||||
M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
|
||||
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
|
||||
M(FREE_RSRC_CNT, 0x004, free_rsrc_cnt, msg_req, free_rsrcs_rsp) \
|
||||
M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
|
||||
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
|
||||
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
|
||||
@ -191,6 +192,9 @@ M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \
|
||||
M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
|
||||
M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
|
||||
msg_rsp) \
|
||||
/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
|
||||
M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
|
||||
M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
|
||||
/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
|
||||
M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\
|
||||
npc_mcam_alloc_entry_rsp) \
|
||||
@ -243,7 +247,8 @@ M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \
|
||||
M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \
|
||||
nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
|
||||
M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
|
||||
M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \
|
||||
M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, \
|
||||
nix_txschq_config) \
|
||||
M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
|
||||
M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, \
|
||||
nix_vtag_config_rsp) \
|
||||
@ -274,7 +279,9 @@ M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \
|
||||
M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
|
||||
nix_bandprof_alloc_rsp) \
|
||||
M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
|
||||
msg_rsp)
|
||||
msg_rsp) \
|
||||
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
|
||||
nix_bandprof_get_hwinfo_rsp)
|
||||
|
||||
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
|
||||
#define MBOX_UP_CGX_MESSAGES \
|
||||
@ -363,6 +370,25 @@ struct rsrc_detach {
|
||||
u8 cptlfs:1;
|
||||
};
|
||||
|
||||
/* Number of resources available to the caller.
|
||||
* In reply to MBOX_MSG_FREE_RSRC_CNT.
|
||||
*/
|
||||
struct free_rsrcs_rsp {
|
||||
struct mbox_msghdr hdr;
|
||||
u16 schq[NIX_TXSCH_LVL_CNT];
|
||||
u16 sso;
|
||||
u16 tim;
|
||||
u16 ssow;
|
||||
u16 cpt;
|
||||
u8 npa;
|
||||
u8 nix;
|
||||
u16 schq_nix1[NIX_TXSCH_LVL_CNT];
|
||||
u8 nix1;
|
||||
u8 cpt1;
|
||||
u8 ree0;
|
||||
u8 ree1;
|
||||
};
|
||||
|
||||
#define MSIX_VECTOR_INVALID 0xFFFF
|
||||
#define MAX_RVU_BLKLF_CNT 256
|
||||
|
||||
@ -370,16 +396,20 @@ struct msix_offset_rsp {
|
||||
struct mbox_msghdr hdr;
|
||||
u16 npa_msixoff;
|
||||
u16 nix_msixoff;
|
||||
u8 sso;
|
||||
u8 ssow;
|
||||
u8 timlfs;
|
||||
u8 cptlfs;
|
||||
u16 sso;
|
||||
u16 ssow;
|
||||
u16 timlfs;
|
||||
u16 cptlfs;
|
||||
u16 sso_msixoff[MAX_RVU_BLKLF_CNT];
|
||||
u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
|
||||
u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
|
||||
u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
|
||||
u8 cpt1_lfs;
|
||||
u16 cpt1_lfs;
|
||||
u16 ree0_lfs;
|
||||
u16 ree1_lfs;
|
||||
u16 cpt1_lf_msixoff[MAX_RVU_BLKLF_CNT];
|
||||
u16 ree0_lf_msixoff[MAX_RVU_BLKLF_CNT];
|
||||
u16 ree1_lf_msixoff[MAX_RVU_BLKLF_CNT];
|
||||
};
|
||||
|
||||
struct get_hw_cap_rsp {
|
||||
@ -699,6 +729,9 @@ struct nix_lf_alloc_req {
|
||||
u16 sso_func;
|
||||
u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
|
||||
u64 way_mask;
|
||||
#define NIX_LF_RSS_TAG_LSB_AS_ADDER BIT_ULL(0)
|
||||
#define NIX_LF_LBK_BLK_SEL BIT_ULL(1)
|
||||
u64 flags;
|
||||
};
|
||||
|
||||
struct nix_lf_alloc_rsp {
|
||||
@ -718,6 +751,7 @@ struct nix_lf_alloc_rsp {
|
||||
u8 cgx_links; /* No. of CGX links present in HW */
|
||||
u8 lbk_links; /* No. of LBK links present in HW */
|
||||
u8 sdp_links; /* No. of SDP links present in HW */
|
||||
u8 tx_link; /* Transmit channel link number */
|
||||
};
|
||||
|
||||
struct nix_lf_free_req {
|
||||
@ -836,6 +870,7 @@ struct nix_txsch_free_req {
|
||||
struct nix_txschq_config {
|
||||
struct mbox_msghdr hdr;
|
||||
u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
|
||||
u8 read;
|
||||
#define TXSCHQ_IDX_SHIFT 16
|
||||
#define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
|
||||
#define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
|
||||
@ -843,6 +878,8 @@ struct nix_txschq_config {
|
||||
#define MAX_REGS_PER_MBOX_MSG 20
|
||||
u64 reg[MAX_REGS_PER_MBOX_MSG];
|
||||
u64 regval[MAX_REGS_PER_MBOX_MSG];
|
||||
/* All 0's => overwrite with new value */
|
||||
u64 regval_mask[MAX_REGS_PER_MBOX_MSG];
|
||||
};
|
||||
|
||||
struct nix_vtag_config {
|
||||
@ -1066,6 +1103,12 @@ struct nix_bandprof_free_req {
|
||||
u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
|
||||
};
|
||||
|
||||
struct nix_bandprof_get_hwinfo_rsp {
|
||||
struct mbox_msghdr hdr;
|
||||
u16 prof_count[BAND_PROF_NUM_LAYERS];
|
||||
u32 policer_timeunit;
|
||||
};
|
||||
|
||||
/* NPC mbox message structs */
|
||||
|
||||
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
|
||||
@ -1434,6 +1477,27 @@ struct cpt_rxc_time_cfg_req {
|
||||
u16 active_limit;
|
||||
};
|
||||
|
||||
struct sdp_node_info {
|
||||
/* Node to which this PF belons to */
|
||||
u8 node_id;
|
||||
u8 max_vfs;
|
||||
u8 num_pf_rings;
|
||||
u8 pf_srn;
|
||||
#define SDP_MAX_VFS 128
|
||||
u8 vf_rings[SDP_MAX_VFS];
|
||||
};
|
||||
|
||||
struct sdp_chan_info_msg {
|
||||
struct mbox_msghdr hdr;
|
||||
struct sdp_node_info info;
|
||||
};
|
||||
|
||||
struct sdp_get_chan_info_msg {
|
||||
struct mbox_msghdr hdr;
|
||||
u16 chan_base;
|
||||
u16 num_chan;
|
||||
};
|
||||
|
||||
/* CGX mailbox error codes
|
||||
* Range 1101 - 1200.
|
||||
*/
|
||||
|
@ -19,12 +19,11 @@
|
||||
#define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100
|
||||
#define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200
|
||||
#define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300
|
||||
#define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400
|
||||
#define PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP 0xB400
|
||||
#define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500
|
||||
#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900
|
||||
#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00
|
||||
#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00
|
||||
#define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600
|
||||
#define PCI_DEVID_OCTEONTX2_RST 0xA085
|
||||
#define PCI_DEVID_CN10K_PTP 0xA09E
|
||||
|
||||
#define PCI_PTP_BAR_NO 0
|
||||
#define PCI_RST_BAR_NO 0
|
||||
@ -39,6 +38,9 @@
|
||||
#define RST_MUL_BITS GENMASK_ULL(38, 33)
|
||||
#define CLOCK_BASE_RATE 50000000ULL
|
||||
|
||||
static struct ptp *first_ptp_block;
|
||||
static const struct pci_device_id ptp_id_table[];
|
||||
|
||||
static u64 get_clock_rate(void)
|
||||
{
|
||||
u64 cfg, ret = CLOCK_BASE_RATE * 16;
|
||||
@ -74,23 +76,14 @@ error:
|
||||
|
||||
struct ptp *ptp_get(void)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
struct ptp *ptp;
|
||||
struct ptp *ptp = first_ptp_block;
|
||||
|
||||
/* If the PTP pci device is found on the system and ptp
|
||||
* driver is bound to it then the PTP pci device is returned
|
||||
* to the caller(rvu driver).
|
||||
*/
|
||||
pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
|
||||
PCI_DEVID_OCTEONTX2_PTP, NULL);
|
||||
if (!pdev)
|
||||
/* Check PTP block is present in hardware */
|
||||
if (!pci_dev_present(ptp_id_table))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
ptp = pci_get_drvdata(pdev);
|
||||
/* Check driver is bound to PTP block */
|
||||
if (!ptp)
|
||||
ptp = ERR_PTR(-EPROBE_DEFER);
|
||||
if (IS_ERR(ptp))
|
||||
pci_dev_put(pdev);
|
||||
|
||||
return ptp;
|
||||
}
|
||||
@ -190,6 +183,8 @@ static int ptp_probe(struct pci_dev *pdev,
|
||||
writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
|
||||
|
||||
pci_set_drvdata(pdev, ptp);
|
||||
if (!first_ptp_block)
|
||||
first_ptp_block = ptp;
|
||||
|
||||
return 0;
|
||||
|
||||
@ -204,6 +199,9 @@ error:
|
||||
* `dev->driver_data`.
|
||||
*/
|
||||
pci_set_drvdata(pdev, ERR_PTR(err));
|
||||
if (!first_ptp_block)
|
||||
first_ptp_block = ERR_PTR(err);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -233,19 +231,14 @@ static const struct pci_device_id ptp_id_table[] = {
|
||||
PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
|
||||
PCI_VENDOR_ID_CAVIUM,
|
||||
PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP) },
|
||||
PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP) },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
|
||||
PCI_VENDOR_ID_CAVIUM,
|
||||
PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
|
||||
PCI_VENDOR_ID_CAVIUM,
|
||||
PCI_SUBSYS_DEVID_CN10K_A_PTP) },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
|
||||
PCI_VENDOR_ID_CAVIUM,
|
||||
PCI_SUBSYS_DEVID_CNF10K_A_PTP) },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
|
||||
PCI_VENDOR_ID_CAVIUM,
|
||||
PCI_SUBSYS_DEVID_CNF10K_B_PTP) },
|
||||
PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_PTP) },
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
|
@ -70,18 +70,21 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
|
||||
hw->cap.nix_shaping = true;
|
||||
hw->cap.nix_tx_link_bp = true;
|
||||
hw->cap.nix_rx_multicast = true;
|
||||
hw->cap.nix_shaper_toggle_wait = false;
|
||||
hw->rvu = rvu;
|
||||
|
||||
if (is_rvu_96xx_B0(rvu)) {
|
||||
if (is_rvu_pre_96xx_C0(rvu)) {
|
||||
hw->cap.nix_fixed_txschq_mapping = true;
|
||||
hw->cap.nix_txsch_per_cgx_lmac = 4;
|
||||
hw->cap.nix_txsch_per_lbk_lmac = 132;
|
||||
hw->cap.nix_txsch_per_sdp_lmac = 76;
|
||||
hw->cap.nix_shaping = false;
|
||||
hw->cap.nix_tx_link_bp = false;
|
||||
if (is_rvu_96xx_A0(rvu))
|
||||
if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu))
|
||||
hw->cap.nix_rx_multicast = false;
|
||||
}
|
||||
if (!is_rvu_pre_96xx_C0(rvu))
|
||||
hw->cap.nix_shaper_toggle_wait = true;
|
||||
|
||||
if (!is_rvu_otx2(rvu))
|
||||
hw->cap.per_pf_mbox_regs = true;
|
||||
@ -1115,6 +1118,12 @@ cpt:
|
||||
goto nix_err;
|
||||
}
|
||||
|
||||
err = rvu_sdp_init(rvu);
|
||||
if (err) {
|
||||
dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
|
||||
goto nix_err;
|
||||
}
|
||||
|
||||
rvu_program_channels(rvu);
|
||||
|
||||
return 0;
|
||||
@ -1367,9 +1376,10 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
|
||||
int blkaddr = BLKADDR_NIX0, vf;
|
||||
struct rvu_pfvf *pf;
|
||||
|
||||
pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
|
||||
|
||||
/* All CGX mapped PFs are set with assigned NIX block during init */
|
||||
if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
|
||||
pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
|
||||
blkaddr = pf->nix_blkaddr;
|
||||
} else if (is_afvf(pcifunc)) {
|
||||
vf = pcifunc - 1;
|
||||
@ -1382,6 +1392,10 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
|
||||
blkaddr = BLKADDR_NIX0;
|
||||
}
|
||||
|
||||
/* if SDP1 then the blkaddr is NIX1 */
|
||||
if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
|
||||
blkaddr = BLKADDR_NIX1;
|
||||
|
||||
switch (blkaddr) {
|
||||
case BLKADDR_NIX1:
|
||||
pfvf->nix_blkaddr = BLKADDR_NIX1;
|
||||
@ -1782,6 +1796,99 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
|
||||
struct free_rsrcs_rsp *rsp)
|
||||
{
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
struct rvu_block *block;
|
||||
struct nix_txsch *txsch;
|
||||
struct nix_hw *nix_hw;
|
||||
|
||||
mutex_lock(&rvu->rsrc_lock);
|
||||
|
||||
block = &hw->block[BLKADDR_NPA];
|
||||
rsp->npa = rvu_rsrc_free_count(&block->lf);
|
||||
|
||||
block = &hw->block[BLKADDR_NIX0];
|
||||
rsp->nix = rvu_rsrc_free_count(&block->lf);
|
||||
|
||||
block = &hw->block[BLKADDR_NIX1];
|
||||
rsp->nix1 = rvu_rsrc_free_count(&block->lf);
|
||||
|
||||
block = &hw->block[BLKADDR_SSO];
|
||||
rsp->sso = rvu_rsrc_free_count(&block->lf);
|
||||
|
||||
block = &hw->block[BLKADDR_SSOW];
|
||||
rsp->ssow = rvu_rsrc_free_count(&block->lf);
|
||||
|
||||
block = &hw->block[BLKADDR_TIM];
|
||||
rsp->tim = rvu_rsrc_free_count(&block->lf);
|
||||
|
||||
block = &hw->block[BLKADDR_CPT0];
|
||||
rsp->cpt = rvu_rsrc_free_count(&block->lf);
|
||||
|
||||
block = &hw->block[BLKADDR_CPT1];
|
||||
rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
|
||||
|
||||
if (rvu->hw->cap.nix_fixed_txschq_mapping) {
|
||||
rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
|
||||
rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
|
||||
rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
|
||||
rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
|
||||
/* NIX1 */
|
||||
if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
|
||||
goto out;
|
||||
rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
|
||||
rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
|
||||
rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
|
||||
rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
|
||||
} else {
|
||||
nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
|
||||
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
|
||||
rsp->schq[NIX_TXSCH_LVL_SMQ] =
|
||||
rvu_rsrc_free_count(&txsch->schq);
|
||||
|
||||
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
|
||||
rsp->schq[NIX_TXSCH_LVL_TL4] =
|
||||
rvu_rsrc_free_count(&txsch->schq);
|
||||
|
||||
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
|
||||
rsp->schq[NIX_TXSCH_LVL_TL3] =
|
||||
rvu_rsrc_free_count(&txsch->schq);
|
||||
|
||||
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
|
||||
rsp->schq[NIX_TXSCH_LVL_TL2] =
|
||||
rvu_rsrc_free_count(&txsch->schq);
|
||||
|
||||
if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
|
||||
goto out;
|
||||
|
||||
nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
|
||||
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
|
||||
rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
|
||||
rvu_rsrc_free_count(&txsch->schq);
|
||||
|
||||
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
|
||||
rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
|
||||
rvu_rsrc_free_count(&txsch->schq);
|
||||
|
||||
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
|
||||
rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
|
||||
rvu_rsrc_free_count(&txsch->schq);
|
||||
|
||||
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
|
||||
rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
|
||||
rvu_rsrc_free_count(&txsch->schq);
|
||||
}
|
||||
|
||||
rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
|
||||
out:
|
||||
rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
|
||||
mutex_unlock(&rvu->rsrc_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp)
|
||||
{
|
||||
|
@ -243,8 +243,10 @@ struct rvu_pfvf {
|
||||
u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
|
||||
u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
|
||||
u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
|
||||
u8 lbkid; /* NIX0/1 lbk link ID */
|
||||
u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/
|
||||
unsigned long flags;
|
||||
struct sdp_node_info *sdp_info;
|
||||
};
|
||||
|
||||
enum rvu_pfvf_flags {
|
||||
@ -328,6 +330,7 @@ struct hw_cap {
|
||||
u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */
|
||||
bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
|
||||
bool nix_shaping; /* Is shaping and coloring supported */
|
||||
bool nix_shaper_toggle_wait; /* Shaping toggle needs poll/wait */
|
||||
bool nix_tx_link_bp; /* Can link backpressure TL queues ? */
|
||||
bool nix_rx_multicast; /* Rx packet replication support */
|
||||
bool nix_common_dwrr_mtu; /* Common DWRR MTU for quantum config */
|
||||
@ -517,20 +520,34 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
|
||||
}
|
||||
|
||||
/* Silicon revisions */
|
||||
static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu)
|
||||
{
|
||||
struct pci_dev *pdev = rvu->pdev;
|
||||
/* 96XX A0/B0, 95XX A0/A1/B0 chips */
|
||||
return ((pdev->revision == 0x00) || (pdev->revision == 0x01) ||
|
||||
(pdev->revision == 0x10) || (pdev->revision == 0x11) ||
|
||||
(pdev->revision == 0x14));
|
||||
}
|
||||
|
||||
static inline bool is_rvu_96xx_A0(struct rvu *rvu)
|
||||
{
|
||||
struct pci_dev *pdev = rvu->pdev;
|
||||
|
||||
return (pdev->revision == 0x00) &&
|
||||
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
|
||||
return (pdev->revision == 0x00);
|
||||
}
|
||||
|
||||
static inline bool is_rvu_96xx_B0(struct rvu *rvu)
|
||||
{
|
||||
struct pci_dev *pdev = rvu->pdev;
|
||||
|
||||
return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) &&
|
||||
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
|
||||
return (pdev->revision == 0x00) || (pdev->revision == 0x01);
|
||||
}
|
||||
|
||||
static inline bool is_rvu_95xx_A0(struct rvu *rvu)
|
||||
{
|
||||
struct pci_dev *pdev = rvu->pdev;
|
||||
|
||||
return (pdev->revision == 0x10) || (pdev->revision == 0x11);
|
||||
}
|
||||
|
||||
/* REVID for PCIe devices.
|
||||
@ -581,6 +598,16 @@ static inline u16 rvu_nix_chan_lbk(struct rvu *rvu, u8 lbkid,
|
||||
return rvu->hw->lbk_chan_base + lbkid * lbk_chans + chan;
|
||||
}
|
||||
|
||||
static inline u16 rvu_nix_chan_sdp(struct rvu *rvu, u8 chan)
|
||||
{
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
|
||||
if (!hw->cap.programmable_chans)
|
||||
return NIX_CHAN_SDP_CHX(chan);
|
||||
|
||||
return hw->sdp_chan_base + chan;
|
||||
}
|
||||
|
||||
static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan)
|
||||
{
|
||||
return rvu->hw->cpt_chan_base + chan;
|
||||
@ -643,10 +670,17 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
|
||||
int qsize, int inst_size, int res_size);
|
||||
void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
|
||||
|
||||
/* SDP APIs */
|
||||
int rvu_sdp_init(struct rvu *rvu);
|
||||
bool is_sdp_pfvf(u16 pcifunc);
|
||||
bool is_sdp_pf(u16 pcifunc);
|
||||
bool is_sdp_vf(u16 pcifunc);
|
||||
|
||||
/* CGX APIs */
|
||||
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
|
||||
{
|
||||
return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs);
|
||||
return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) &&
|
||||
!is_sdp_pf(pf << RVU_PFVF_PF_SHIFT);
|
||||
}
|
||||
|
||||
static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
|
||||
@ -750,7 +784,6 @@ bool is_npc_intf_tx(u8 intf);
|
||||
bool is_npc_intf_rx(u8 intf);
|
||||
bool is_npc_interface_valid(struct rvu *rvu, u8 intf);
|
||||
int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena);
|
||||
int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel);
|
||||
int npc_flow_steering_init(struct rvu *rvu, int blkaddr);
|
||||
const char *npc_get_field_name(u8 hdr);
|
||||
int npc_get_bank(struct npc_mcam *mcam, int index);
|
||||
|
@ -290,16 +290,22 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
|
||||
return true;
|
||||
}
|
||||
|
||||
static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
|
||||
static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
|
||||
struct nix_lf_alloc_rsp *rsp, bool loop)
|
||||
{
|
||||
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||
struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||
u16 req_chan_base, req_chan_end, req_chan_cnt;
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
struct sdp_node_info *sdp_info;
|
||||
int pkind, pf, vf, lbkid, vfid;
|
||||
struct mac_ops *mac_ops;
|
||||
int pkind, pf, vf, lbkid;
|
||||
u8 cgx_id, lmac_id;
|
||||
bool from_vf;
|
||||
int err;
|
||||
|
||||
pf = rvu_get_pf(pcifunc);
|
||||
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
|
||||
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
|
||||
type != NIX_INTF_TYPE_SDP)
|
||||
return 0;
|
||||
|
||||
switch (type) {
|
||||
@ -317,10 +323,13 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
|
||||
pfvf->tx_chan_base = pfvf->rx_chan_base;
|
||||
pfvf->rx_chan_cnt = 1;
|
||||
pfvf->tx_chan_cnt = 1;
|
||||
rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
|
||||
|
||||
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
|
||||
rvu_npc_set_pkind(rvu, pkind, pfvf);
|
||||
|
||||
mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
|
||||
|
||||
/* By default we enable pause frames */
|
||||
if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
|
||||
mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
|
||||
@ -340,6 +349,25 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
|
||||
if (rvu->hw->lbk_links > 1)
|
||||
lbkid = vf & 0x1 ? 0 : 1;
|
||||
|
||||
/* By default NIX0 is configured to send packet on lbk link 1
|
||||
* (which corresponds to LBK1), same packet will receive on
|
||||
* NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
|
||||
* (which corresponds to LBK2) packet will receive on NIX0 lbk
|
||||
* link 1.
|
||||
* But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
|
||||
* transmits and receives on lbk link 0, whick corresponds
|
||||
* to LBK1 block, back to back connectivity between NIX and
|
||||
* LBK can be achieved (which is similar to 96xx)
|
||||
*
|
||||
* RX TX
|
||||
* NIX0 lbk link 1 (LBK2) 1 (LBK1)
|
||||
* NIX0 lbk link 0 (LBK0) 0 (LBK0)
|
||||
* NIX1 lbk link 0 (LBK1) 0 (LBK2)
|
||||
* NIX1 lbk link 1 (LBK3) 1 (LBK3)
|
||||
*/
|
||||
if (loop)
|
||||
lbkid = !lbkid;
|
||||
|
||||
/* Note that AF's VFs work in pairs and talk over consecutive
|
||||
* loopback channels.Therefore if odd number of AF VFs are
|
||||
* enabled then the last VF remains with no pair.
|
||||
@ -350,7 +378,48 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
|
||||
rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
|
||||
pfvf->rx_chan_cnt = 1;
|
||||
pfvf->tx_chan_cnt = 1;
|
||||
rsp->tx_link = hw->cgx_links + lbkid;
|
||||
pfvf->lbkid = lbkid;
|
||||
rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
|
||||
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
|
||||
pfvf->rx_chan_base,
|
||||
pfvf->rx_chan_cnt);
|
||||
|
||||
break;
|
||||
case NIX_INTF_TYPE_SDP:
|
||||
from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
|
||||
parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
|
||||
sdp_info = parent_pf->sdp_info;
|
||||
if (!sdp_info) {
|
||||
dev_err(rvu->dev, "Invalid sdp_info pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (from_vf) {
|
||||
req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
|
||||
sdp_info->num_pf_rings;
|
||||
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
|
||||
for (vfid = 0; vfid < vf; vfid++)
|
||||
req_chan_base += sdp_info->vf_rings[vfid];
|
||||
req_chan_cnt = sdp_info->vf_rings[vf];
|
||||
req_chan_end = req_chan_base + req_chan_cnt - 1;
|
||||
if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
|
||||
req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
|
||||
dev_err(rvu->dev,
|
||||
"PF_Func 0x%x: Invalid channel base and count\n",
|
||||
pcifunc);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
|
||||
req_chan_cnt = sdp_info->num_pf_rings;
|
||||
}
|
||||
|
||||
pfvf->rx_chan_base = req_chan_base;
|
||||
pfvf->rx_chan_cnt = req_chan_cnt;
|
||||
pfvf->tx_chan_base = pfvf->rx_chan_base;
|
||||
pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
|
||||
|
||||
rsp->tx_link = hw->cgx_links + hw->lbk_links;
|
||||
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
|
||||
pfvf->rx_chan_base,
|
||||
pfvf->rx_chan_cnt);
|
||||
@ -434,9 +503,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
|
||||
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
|
||||
int type, int chan_id)
|
||||
{
|
||||
int bpid, blkaddr, lmac_chan_cnt;
|
||||
int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
|
||||
u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
u16 cgx_bpid_cnt, lbk_bpid_cnt;
|
||||
struct rvu_pfvf *pfvf;
|
||||
u8 cgx_id, lmac_id;
|
||||
u64 cfg;
|
||||
@ -445,8 +514,12 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
|
||||
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
|
||||
lmac_chan_cnt = cfg & 0xFF;
|
||||
|
||||
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
|
||||
sdp_chan_cnt = cfg & 0xFFF;
|
||||
|
||||
cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
|
||||
lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
|
||||
sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
|
||||
|
||||
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
|
||||
|
||||
@ -484,6 +557,17 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
|
||||
if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
|
||||
return -EINVAL;
|
||||
break;
|
||||
case NIX_INTF_TYPE_SDP:
|
||||
if ((req->chan_base + req->chan_cnt) > 255)
|
||||
return -EINVAL;
|
||||
|
||||
bpid = sdp_bpid_cnt + req->chan_base;
|
||||
if (req->bpid_per_chan)
|
||||
bpid += chan_id;
|
||||
|
||||
if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -503,9 +587,12 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
|
||||
|
||||
pf = rvu_get_pf(pcifunc);
|
||||
type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
|
||||
if (is_sdp_pfvf(pcifunc))
|
||||
type = NIX_INTF_TYPE_SDP;
|
||||
|
||||
/* Enable backpressure only for CGX mapped PFs and LBK interface */
|
||||
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
|
||||
/* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
|
||||
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
|
||||
type != NIX_INTF_TYPE_SDP)
|
||||
return 0;
|
||||
|
||||
pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||
@ -522,8 +609,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
|
||||
}
|
||||
|
||||
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
|
||||
cfg &= ~GENMASK_ULL(8, 0);
|
||||
rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
|
||||
cfg | (bpid & 0xFF) | BIT_ULL(16));
|
||||
cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
|
||||
chan_id++;
|
||||
bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
|
||||
}
|
||||
@ -671,9 +759,10 @@ static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
|
||||
static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
|
||||
struct rvu_pfvf *pfvf, int nixlf,
|
||||
int rss_sz, int rss_grps, int hwctx_size,
|
||||
u64 way_mask)
|
||||
u64 way_mask, bool tag_lsb_as_adder)
|
||||
{
|
||||
int err, grp, num_indices;
|
||||
u64 val;
|
||||
|
||||
/* RSS is not requested for this NIXLF */
|
||||
if (!rss_sz)
|
||||
@ -689,10 +778,13 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
|
||||
(u64)pfvf->rss_ctx->iova);
|
||||
|
||||
/* Config full RSS table size, enable RSS and caching */
|
||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
|
||||
BIT_ULL(36) | BIT_ULL(4) |
|
||||
ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
|
||||
way_mask << 20);
|
||||
val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
|
||||
ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
|
||||
|
||||
if (tag_lsb_as_adder)
|
||||
val |= BIT_ULL(5);
|
||||
|
||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
|
||||
/* Config RSS group offset and sizes */
|
||||
for (grp = 0; grp < rss_grps; grp++)
|
||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
|
||||
@ -1241,7 +1333,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
|
||||
/* Initialize receive side scaling (RSS) */
|
||||
hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
|
||||
err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
|
||||
req->rss_grps, hwctx_size, req->way_mask);
|
||||
req->rss_grps, hwctx_size, req->way_mask,
|
||||
!!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
|
||||
if (err)
|
||||
goto free_mem;
|
||||
|
||||
@ -1299,7 +1392,11 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
|
||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
|
||||
|
||||
intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
|
||||
err = nix_interface_init(rvu, pcifunc, intf, nixlf);
|
||||
if (is_sdp_pfvf(pcifunc))
|
||||
intf = NIX_INTF_TYPE_SDP;
|
||||
|
||||
err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
|
||||
!!(req->flags & NIX_LF_LBK_BLK_SEL));
|
||||
if (err)
|
||||
goto free_mem;
|
||||
|
||||
@ -1423,12 +1520,104 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Handle shaper update specially for few revisions */
|
||||
static bool
|
||||
handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
|
||||
int lvl, u64 reg, u64 regval)
|
||||
{
|
||||
u64 regbase, oldval, sw_xoff = 0;
|
||||
u64 dbgval, md_debug0 = 0;
|
||||
unsigned long poll_tmo;
|
||||
bool rate_reg = 0;
|
||||
u32 schq;
|
||||
|
||||
regbase = reg & 0xFFFF;
|
||||
schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
|
||||
|
||||
/* Check for rate register */
|
||||
switch (lvl) {
|
||||
case NIX_TXSCH_LVL_TL1:
|
||||
md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
|
||||
sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
|
||||
|
||||
rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
|
||||
break;
|
||||
case NIX_TXSCH_LVL_TL2:
|
||||
md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
|
||||
sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
|
||||
|
||||
rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
|
||||
regbase == NIX_AF_TL2X_PIR(0));
|
||||
break;
|
||||
case NIX_TXSCH_LVL_TL3:
|
||||
md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
|
||||
sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
|
||||
|
||||
rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
|
||||
regbase == NIX_AF_TL3X_PIR(0));
|
||||
break;
|
||||
case NIX_TXSCH_LVL_TL4:
|
||||
md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
|
||||
sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
|
||||
|
||||
rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
|
||||
regbase == NIX_AF_TL4X_PIR(0));
|
||||
break;
|
||||
case NIX_TXSCH_LVL_MDQ:
|
||||
sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
|
||||
rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
|
||||
regbase == NIX_AF_MDQX_PIR(0));
|
||||
break;
|
||||
}
|
||||
|
||||
if (!rate_reg)
|
||||
return false;
|
||||
|
||||
/* Nothing special to do when state is not toggled */
|
||||
oldval = rvu_read64(rvu, blkaddr, reg);
|
||||
if ((oldval & 0x1) == (regval & 0x1)) {
|
||||
rvu_write64(rvu, blkaddr, reg, regval);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* PIR/CIR disable */
|
||||
if (!(regval & 0x1)) {
|
||||
rvu_write64(rvu, blkaddr, sw_xoff, 1);
|
||||
rvu_write64(rvu, blkaddr, reg, 0);
|
||||
udelay(4);
|
||||
rvu_write64(rvu, blkaddr, sw_xoff, 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* PIR/CIR enable */
|
||||
rvu_write64(rvu, blkaddr, sw_xoff, 1);
|
||||
if (md_debug0) {
|
||||
poll_tmo = jiffies + usecs_to_jiffies(10000);
|
||||
/* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
|
||||
do {
|
||||
if (time_after(jiffies, poll_tmo)) {
|
||||
dev_err(rvu->dev,
|
||||
"NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
|
||||
nixlf, schq, lvl);
|
||||
goto exit;
|
||||
}
|
||||
usleep_range(1, 5);
|
||||
dbgval = rvu_read64(rvu, blkaddr, md_debug0);
|
||||
} while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
|
||||
}
|
||||
rvu_write64(rvu, blkaddr, reg, regval);
|
||||
exit:
|
||||
rvu_write64(rvu, blkaddr, sw_xoff, 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Disable shaping of pkts by a scheduler queue
|
||||
* at a given scheduler level.
|
||||
*/
|
||||
static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
|
||||
int lvl, int schq)
|
||||
int nixlf, int lvl, int schq)
|
||||
{
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
u64 cir_reg = 0, pir_reg = 0;
|
||||
u64 cfg;
|
||||
|
||||
@ -1449,6 +1638,21 @@ static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
|
||||
cir_reg = NIX_AF_TL4X_CIR(schq);
|
||||
pir_reg = NIX_AF_TL4X_PIR(schq);
|
||||
break;
|
||||
case NIX_TXSCH_LVL_MDQ:
|
||||
cir_reg = NIX_AF_MDQX_CIR(schq);
|
||||
pir_reg = NIX_AF_MDQX_PIR(schq);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Shaper state toggle needs wait/poll */
|
||||
if (hw->cap.nix_shaper_toggle_wait) {
|
||||
if (cir_reg)
|
||||
handle_txschq_shaper_update(rvu, blkaddr, nixlf,
|
||||
lvl, cir_reg, 0);
|
||||
if (pir_reg)
|
||||
handle_txschq_shaper_update(rvu, blkaddr, nixlf,
|
||||
lvl, pir_reg, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!cir_reg)
|
||||
@ -1466,6 +1670,7 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
|
||||
int lvl, int schq)
|
||||
{
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
int link_level;
|
||||
int link;
|
||||
|
||||
if (lvl >= hw->cap.nix_tx_aggr_lvl)
|
||||
@ -1475,7 +1680,9 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
|
||||
if (lvl == NIX_TXSCH_LVL_TL4)
|
||||
rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
|
||||
|
||||
if (lvl != NIX_TXSCH_LVL_TL2)
|
||||
link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
|
||||
NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
|
||||
if (lvl != link_level)
|
||||
return;
|
||||
|
||||
/* Reset TL2's CGX or LBK link config */
|
||||
@ -1484,6 +1691,40 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
|
||||
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
|
||||
}
|
||||
|
||||
static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
|
||||
int lvl, int schq)
|
||||
{
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
u64 reg;
|
||||
|
||||
/* Skip this if shaping is not supported */
|
||||
if (!hw->cap.nix_shaping)
|
||||
return;
|
||||
|
||||
/* Clear level specific SW_XOFF */
|
||||
switch (lvl) {
|
||||
case NIX_TXSCH_LVL_TL1:
|
||||
reg = NIX_AF_TL1X_SW_XOFF(schq);
|
||||
break;
|
||||
case NIX_TXSCH_LVL_TL2:
|
||||
reg = NIX_AF_TL2X_SW_XOFF(schq);
|
||||
break;
|
||||
case NIX_TXSCH_LVL_TL3:
|
||||
reg = NIX_AF_TL3X_SW_XOFF(schq);
|
||||
break;
|
||||
case NIX_TXSCH_LVL_TL4:
|
||||
reg = NIX_AF_TL4X_SW_XOFF(schq);
|
||||
break;
|
||||
case NIX_TXSCH_LVL_MDQ:
|
||||
reg = NIX_AF_MDQX_SW_XOFF(schq);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
rvu_write64(rvu, blkaddr, reg, 0x0);
|
||||
}
|
||||
|
||||
static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
|
||||
{
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
@ -1661,15 +1902,14 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
|
||||
int link, blkaddr, rc = 0;
|
||||
int lvl, idx, start, end;
|
||||
struct nix_txsch *txsch;
|
||||
struct rvu_pfvf *pfvf;
|
||||
struct nix_hw *nix_hw;
|
||||
u32 *pfvf_map;
|
||||
int nixlf;
|
||||
u16 schq;
|
||||
|
||||
pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
|
||||
if (!pfvf->nixlf || blkaddr < 0)
|
||||
return NIX_AF_ERR_AF_LF_INVALID;
|
||||
rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
nix_hw = get_nix_hw(rvu->hw, blkaddr);
|
||||
if (!nix_hw)
|
||||
@ -1718,7 +1958,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
|
||||
NIX_TXSCHQ_CFG_DONE))
|
||||
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
|
||||
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
|
||||
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
|
||||
nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
|
||||
}
|
||||
|
||||
for (idx = 0; idx < req->schq[lvl]; idx++) {
|
||||
@ -1727,7 +1967,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
|
||||
NIX_TXSCHQ_CFG_DONE))
|
||||
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
|
||||
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
|
||||
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
|
||||
nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1744,7 +1984,7 @@ exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void nix_smq_flush(struct rvu *rvu, int blkaddr,
|
||||
static int nix_smq_flush(struct rvu *rvu, int blkaddr,
|
||||
int smq, u16 pcifunc, int nixlf)
|
||||
{
|
||||
int pf = rvu_get_pf(pcifunc);
|
||||
@ -1780,6 +2020,7 @@ static void nix_smq_flush(struct rvu *rvu, int blkaddr,
|
||||
/* restore cgx tx state */
|
||||
if (restore_tx_en)
|
||||
cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
|
||||
@ -1788,6 +2029,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
struct nix_txsch *txsch;
|
||||
struct nix_hw *nix_hw;
|
||||
u16 map_func;
|
||||
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
|
||||
if (blkaddr < 0)
|
||||
@ -1801,19 +2043,36 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
|
||||
if (nixlf < 0)
|
||||
return NIX_AF_ERR_AF_LF_INVALID;
|
||||
|
||||
/* Disable TL2/3 queue links before SMQ flush*/
|
||||
/* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
|
||||
mutex_lock(&rvu->rsrc_lock);
|
||||
for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
|
||||
if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
|
||||
for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
|
||||
txsch = &nix_hw->txsch[lvl];
|
||||
|
||||
if (lvl >= hw->cap.nix_tx_aggr_lvl)
|
||||
continue;
|
||||
|
||||
txsch = &nix_hw->txsch[lvl];
|
||||
for (schq = 0; schq < txsch->schq.max; schq++) {
|
||||
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
|
||||
continue;
|
||||
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
|
||||
nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
|
||||
}
|
||||
}
|
||||
nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
|
||||
nix_get_tx_link(rvu, pcifunc));
|
||||
|
||||
/* On PF cleanup, clear cfg done flag as
|
||||
* PF would have changed default config.
|
||||
*/
|
||||
if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
|
||||
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
|
||||
schq = nix_get_tx_link(rvu, pcifunc);
|
||||
/* Do not clear pcifunc in txsch->pfvf_map[schq] because
|
||||
* VF might be using this TL1 queue
|
||||
*/
|
||||
map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
|
||||
txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
|
||||
}
|
||||
|
||||
/* Flush SMQs */
|
||||
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
|
||||
@ -1859,6 +2118,7 @@ static int nix_txschq_free_one(struct rvu *rvu,
|
||||
struct nix_txsch *txsch;
|
||||
struct nix_hw *nix_hw;
|
||||
u32 *pfvf_map;
|
||||
int rc;
|
||||
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
|
||||
if (blkaddr < 0)
|
||||
@ -1883,15 +2143,24 @@ static int nix_txschq_free_one(struct rvu *rvu,
|
||||
mutex_lock(&rvu->rsrc_lock);
|
||||
|
||||
if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
|
||||
mutex_unlock(&rvu->rsrc_lock);
|
||||
rc = NIX_AF_ERR_TLX_INVALID;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Clear SW_XOFF of this resource only.
|
||||
* For SMQ level, all path XOFF's
|
||||
* need to be made clear by user
|
||||
*/
|
||||
nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
|
||||
|
||||
/* Flush if it is a SMQ. Onus of disabling
|
||||
* TL2/3 queue links before SMQ flush is on user
|
||||
*/
|
||||
if (lvl == NIX_TXSCH_LVL_SMQ)
|
||||
nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
|
||||
if (lvl == NIX_TXSCH_LVL_SMQ &&
|
||||
nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
|
||||
rc = NIX_AF_SMQ_FLUSH_FAILED;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Free the resource */
|
||||
rvu_free_rsrc(&txsch->schq, schq);
|
||||
@ -1899,7 +2168,8 @@ static int nix_txschq_free_one(struct rvu *rvu,
|
||||
mutex_unlock(&rvu->rsrc_lock);
|
||||
return 0;
|
||||
err:
|
||||
return NIX_AF_ERR_TLX_INVALID;
|
||||
mutex_unlock(&rvu->rsrc_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
|
||||
@ -1982,6 +2252,11 @@ static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
|
||||
regbase == NIX_AF_TL4X_PIR(0))
|
||||
return false;
|
||||
break;
|
||||
case NIX_TXSCH_LVL_MDQ:
|
||||
if (regbase == NIX_AF_MDQX_CIR(0) ||
|
||||
regbase == NIX_AF_MDQX_PIR(0))
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -2014,6 +2289,33 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
|
||||
pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
|
||||
}
|
||||
|
||||
/* Register offset - [15:0]
|
||||
* Scheduler Queue number - [25:16]
|
||||
*/
|
||||
#define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0)
|
||||
|
||||
static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
|
||||
int blkaddr, struct nix_txschq_config *req,
|
||||
struct nix_txschq_config *rsp)
|
||||
{
|
||||
u16 pcifunc = req->hdr.pcifunc;
|
||||
int idx, schq;
|
||||
u64 reg;
|
||||
|
||||
for (idx = 0; idx < req->num_regs; idx++) {
|
||||
reg = req->reg[idx];
|
||||
reg &= NIX_TX_SCHQ_MASK;
|
||||
schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
|
||||
if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
|
||||
!is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
|
||||
return NIX_AF_INVAL_TXSCHQ_CFG;
|
||||
rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
|
||||
}
|
||||
rsp->lvl = req->lvl;
|
||||
rsp->num_regs = req->num_regs;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
|
||||
u16 pcifunc, struct nix_txsch *txsch)
|
||||
{
|
||||
@ -2045,11 +2347,11 @@ static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
|
||||
|
||||
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
|
||||
struct nix_txschq_config *req,
|
||||
struct msg_rsp *rsp)
|
||||
struct nix_txschq_config *rsp)
|
||||
{
|
||||
u64 reg, val, regval, schq_regbase, val_mask;
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
u16 pcifunc = req->hdr.pcifunc;
|
||||
u64 reg, regval, schq_regbase;
|
||||
struct nix_txsch *txsch;
|
||||
struct nix_hw *nix_hw;
|
||||
int blkaddr, idx, err;
|
||||
@ -2068,6 +2370,9 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
|
||||
if (!nix_hw)
|
||||
return NIX_AF_ERR_INVALID_NIXBLK;
|
||||
|
||||
if (req->read)
|
||||
return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
|
||||
|
||||
txsch = &nix_hw->txsch[req->lvl];
|
||||
pfvf_map = txsch->pfvf_map;
|
||||
|
||||
@ -2082,8 +2387,10 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
|
||||
|
||||
for (idx = 0; idx < req->num_regs; idx++) {
|
||||
reg = req->reg[idx];
|
||||
reg &= NIX_TX_SCHQ_MASK;
|
||||
regval = req->regval[idx];
|
||||
schq_regbase = reg & 0xFFFF;
|
||||
val_mask = req->regval_mask[idx];
|
||||
|
||||
if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
|
||||
txsch->lvl, reg, regval))
|
||||
@ -2093,6 +2400,15 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
|
||||
if (!is_txschq_shaping_valid(hw, req->lvl, reg))
|
||||
continue;
|
||||
|
||||
val = rvu_read64(rvu, blkaddr, reg);
|
||||
regval = (val & val_mask) | (regval & ~val_mask);
|
||||
|
||||
/* Handle shaping state toggle specially */
|
||||
if (hw->cap.nix_shaper_toggle_wait &&
|
||||
handle_txschq_shaper_update(rvu, blkaddr, nixlf,
|
||||
req->lvl, reg, regval))
|
||||
continue;
|
||||
|
||||
/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
|
||||
if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
|
||||
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
|
||||
@ -2133,7 +2449,6 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
|
||||
|
||||
rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
|
||||
&nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2523,14 +2838,19 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
struct nix_mce_list *mce_list;
|
||||
int pf;
|
||||
|
||||
/* skip multicast pkt replication for AF's VFs */
|
||||
if (is_afvf(pcifunc))
|
||||
/* skip multicast pkt replication for AF's VFs & SDP links */
|
||||
if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
|
||||
return 0;
|
||||
|
||||
if (!hw->cap.nix_rx_multicast)
|
||||
return 0;
|
||||
|
||||
pf = rvu_get_pf(pcifunc);
|
||||
if (!is_pf_cgxmapped(rvu, pf))
|
||||
return 0;
|
||||
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
|
||||
if (blkaddr < 0)
|
||||
return -EINVAL;
|
||||
@ -3538,6 +3858,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
|
||||
struct nix_txsch *txsch;
|
||||
u64 cfg, lmac_fifo_len;
|
||||
struct nix_hw *nix_hw;
|
||||
struct rvu_pfvf *pfvf;
|
||||
u8 cgx = 0, lmac = 0;
|
||||
u16 max_mtu;
|
||||
|
||||
@ -3594,7 +3915,8 @@ rx_frscfg:
|
||||
link = (cgx * hw->lmac_per_cgx) + lmac;
|
||||
} else if (pf == 0) {
|
||||
/* For VFs of PF0 ingress is LBK port, so config LBK link */
|
||||
link = hw->cgx_links;
|
||||
pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||
link = hw->cgx_links + pfvf->lbkid;
|
||||
}
|
||||
|
||||
if (link < 0)
|
||||
@ -4851,3 +5173,36 @@ static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
|
||||
rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
|
||||
}
|
||||
}
|
||||
|
||||
int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
|
||||
struct nix_bandprof_get_hwinfo_rsp *rsp)
|
||||
{
|
||||
struct nix_ipolicer *ipolicer;
|
||||
int blkaddr, layer, err;
|
||||
struct nix_hw *nix_hw;
|
||||
u64 tu;
|
||||
|
||||
if (!rvu->hw->cap.ipolicer)
|
||||
return NIX_AF_ERR_IPOLICER_NOTSUPP;
|
||||
|
||||
err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Return number of bandwidth profiles free at each layer */
|
||||
mutex_lock(&rvu->rsrc_lock);
|
||||
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
|
||||
if (layer == BAND_PROF_INVAL_LAYER)
|
||||
continue;
|
||||
|
||||
ipolicer = &nix_hw->ipolicer[layer];
|
||||
rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
|
||||
}
|
||||
mutex_unlock(&rvu->rsrc_lock);
|
||||
|
||||
/* Set the policer timeunit in nanosec */
|
||||
tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
|
||||
rsp->policer_timeunit = (tu + 1) * 100;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -85,36 +85,6 @@ static int npc_mcam_verify_pf_func(struct rvu *rvu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel)
|
||||
{
|
||||
int pf = rvu_get_pf(pcifunc);
|
||||
u8 cgx_id, lmac_id;
|
||||
int base = 0, end;
|
||||
|
||||
if (is_npc_intf_tx(intf))
|
||||
return 0;
|
||||
|
||||
/* return in case of AF installed rules */
|
||||
if (is_pffunc_af(pcifunc))
|
||||
return 0;
|
||||
|
||||
if (is_afvf(pcifunc)) {
|
||||
end = rvu_get_num_lbk_chans();
|
||||
if (end < 0)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
|
||||
base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0x0);
|
||||
/* CGX mapped functions has maximum of 16 channels */
|
||||
end = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0xF);
|
||||
}
|
||||
|
||||
if (channel < base || channel > end)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
|
||||
{
|
||||
int blkaddr;
|
||||
@ -634,8 +604,8 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
|
||||
struct nix_rx_action action;
|
||||
int blkaddr, index;
|
||||
|
||||
/* AF's VFs work in promiscuous mode */
|
||||
if (is_afvf(pcifunc))
|
||||
/* AF's and SDP VFs work in promiscuous mode */
|
||||
if (is_afvf(pcifunc) || is_sdp_vf(pcifunc))
|
||||
return;
|
||||
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
|
||||
@ -863,7 +833,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
|
||||
u16 vf_func;
|
||||
|
||||
/* Only CGX PF/VF can add allmulticast entry */
|
||||
if (is_afvf(pcifunc))
|
||||
if (is_afvf(pcifunc) && is_sdp_vf(pcifunc))
|
||||
return;
|
||||
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
|
||||
@ -2706,7 +2676,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
|
||||
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
u16 pcifunc = req->hdr.pcifunc;
|
||||
u16 channel, chan_mask;
|
||||
int blkaddr, rc;
|
||||
u8 nix_intf;
|
||||
|
||||
@ -2714,10 +2683,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
|
||||
if (blkaddr < 0)
|
||||
return NPC_MCAM_INVALID_REQ;
|
||||
|
||||
chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
|
||||
channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK;
|
||||
channel &= chan_mask;
|
||||
|
||||
mutex_lock(&mcam->lock);
|
||||
rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
|
||||
if (rc)
|
||||
@ -2739,12 +2704,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
|
||||
else
|
||||
nix_intf = pfvf->nix_rx_intf;
|
||||
|
||||
if (!is_pffunc_af(pcifunc) &&
|
||||
npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) {
|
||||
rc = NPC_MCAM_INVALID_REQ;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (!is_pffunc_af(pcifunc) &&
|
||||
npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
|
||||
rc = NPC_MCAM_INVALID_REQ;
|
||||
@ -3091,7 +3050,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
u16 entry = NPC_MCAM_ENTRY_INVALID;
|
||||
u16 cntr = NPC_MCAM_ENTRY_INVALID;
|
||||
u16 channel, chan_mask;
|
||||
int blkaddr, rc;
|
||||
u8 nix_intf;
|
||||
|
||||
@ -3102,13 +3060,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
|
||||
if (!is_npc_interface_valid(rvu, req->intf))
|
||||
return NPC_MCAM_INVALID_REQ;
|
||||
|
||||
chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
|
||||
channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK;
|
||||
channel &= chan_mask;
|
||||
|
||||
if (npc_mcam_verify_channel(rvu, req->hdr.pcifunc, req->intf, channel))
|
||||
return NPC_MCAM_INVALID_REQ;
|
||||
|
||||
if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
|
||||
req->hdr.pcifunc))
|
||||
return NPC_MCAM_INVALID_REQ;
|
||||
|
@ -1173,11 +1173,6 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
|
||||
if (err)
|
||||
return NPC_FLOW_NOT_SUPPORTED;
|
||||
|
||||
/* Skip channel validation if AF is installing */
|
||||
if (!is_pffunc_af(req->hdr.pcifunc) &&
|
||||
npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
|
||||
return NPC_FLOW_CHAN_INVALID;
|
||||
|
||||
pfvf = rvu_get_pfvf(rvu, target);
|
||||
|
||||
/* PF installing for its VF */
|
||||
|
@ -33,8 +33,8 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
|
||||
{NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } },
|
||||
{NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
|
||||
{0x1200, 0x12E0} } },
|
||||
{NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
|
||||
{0x1610, 0x1618} } },
|
||||
{NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
|
||||
{0x1610, 0x1618}, {0x1700, 0x17B0} } },
|
||||
{NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } },
|
||||
{NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
|
||||
};
|
||||
|
108
drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
Normal file
108
drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
Normal file
@ -0,0 +1,108 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Marvell OcteonTx2 RVU Admin Function driver
|
||||
*
|
||||
* Copyright (C) 2021 Marvell.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include "rvu.h"
|
||||
|
||||
/* SDP PF device id */
|
||||
#define PCI_DEVID_OTX2_SDP_PF 0xA0F6
|
||||
|
||||
/* Maximum SDP blocks in a chip */
|
||||
#define MAX_SDP 2
|
||||
|
||||
/* SDP PF number */
|
||||
static int sdp_pf_num[MAX_SDP] = {-1, -1};
|
||||
|
||||
bool is_sdp_pfvf(u16 pcifunc)
|
||||
{
|
||||
u16 pf = rvu_get_pf(pcifunc);
|
||||
u32 found = 0, i = 0;
|
||||
|
||||
while (i < MAX_SDP) {
|
||||
if (pf == sdp_pf_num[i])
|
||||
found = 1;
|
||||
i++;
|
||||
}
|
||||
|
||||
if (!found)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_sdp_pf(u16 pcifunc)
|
||||
{
|
||||
return (is_sdp_pfvf(pcifunc) &&
|
||||
!(pcifunc & RVU_PFVF_FUNC_MASK));
|
||||
}
|
||||
|
||||
bool is_sdp_vf(u16 pcifunc)
|
||||
{
|
||||
return (is_sdp_pfvf(pcifunc) &&
|
||||
!!(pcifunc & RVU_PFVF_FUNC_MASK));
|
||||
}
|
||||
|
||||
int rvu_sdp_init(struct rvu *rvu)
|
||||
{
|
||||
struct pci_dev *pdev = NULL;
|
||||
struct rvu_pfvf *pfvf;
|
||||
u32 i = 0;
|
||||
|
||||
while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
|
||||
PCI_DEVID_OTX2_SDP_PF,
|
||||
pdev)) != NULL) {
|
||||
/* The RVU PF number is one less than bus number */
|
||||
sdp_pf_num[i] = pdev->bus->number - 1;
|
||||
pfvf = &rvu->pf[sdp_pf_num[i]];
|
||||
|
||||
pfvf->sdp_info = devm_kzalloc(rvu->dev,
|
||||
sizeof(struct sdp_node_info),
|
||||
GFP_KERNEL);
|
||||
if (!pfvf->sdp_info)
|
||||
return -ENOMEM;
|
||||
|
||||
dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]);
|
||||
|
||||
put_device(&pdev->dev);
|
||||
i++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
rvu_mbox_handler_set_sdp_chan_info(struct rvu *rvu,
|
||||
struct sdp_chan_info_msg *req,
|
||||
struct msg_rsp *rsp)
|
||||
{
|
||||
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
|
||||
|
||||
memcpy(pfvf->sdp_info, &req->info, sizeof(struct sdp_node_info));
|
||||
dev_info(rvu->dev, "AF: SDP%d max_vfs %d num_pf_rings %d pf_srn %d\n",
|
||||
req->info.node_id, req->info.max_vfs, req->info.num_pf_rings,
|
||||
req->info.pf_srn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
rvu_mbox_handler_get_sdp_chan_info(struct rvu *rvu, struct msg_req *req,
|
||||
struct sdp_get_chan_info_msg *rsp)
|
||||
{
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
int blkaddr;
|
||||
|
||||
if (!hw->cap.programmable_chans) {
|
||||
rsp->chan_base = NIX_CHAN_SDP_CH_START;
|
||||
rsp->num_chan = NIX_CHAN_SDP_NUM_CHANS;
|
||||
} else {
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
|
||||
rsp->chan_base = hw->sdp_chan_base;
|
||||
rsp->num_chan = rvu_read64(rvu, blkaddr, NIX_AF_CONST1) & 0xFFFUL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@ -584,25 +584,6 @@ void otx2_get_mac_from_af(struct net_device *netdev)
|
||||
}
|
||||
EXPORT_SYMBOL(otx2_get_mac_from_af);
|
||||
|
||||
static int otx2_get_link(struct otx2_nic *pfvf)
|
||||
{
|
||||
int link = 0;
|
||||
u16 map;
|
||||
|
||||
/* cgx lmac link */
|
||||
if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) {
|
||||
map = pfvf->hw.tx_chan_base & 0x7FF;
|
||||
link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
|
||||
}
|
||||
/* LBK channel */
|
||||
if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE) {
|
||||
map = pfvf->hw.tx_chan_base & 0x7FF;
|
||||
link = pfvf->hw.cgx_links | ((map >> 8) & 0xF);
|
||||
}
|
||||
|
||||
return link;
|
||||
}
|
||||
|
||||
int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
|
||||
{
|
||||
struct otx2_hw *hw = &pfvf->hw;
|
||||
@ -661,8 +642,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
|
||||
req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
|
||||
|
||||
req->num_regs++;
|
||||
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
|
||||
otx2_get_link(pfvf));
|
||||
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
|
||||
/* Enable this queue and backpressure */
|
||||
req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
|
||||
|
||||
@ -1610,6 +1590,7 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
|
||||
pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
|
||||
pfvf->hw.cgx_links = rsp->cgx_links;
|
||||
pfvf->hw.lbk_links = rsp->lbk_links;
|
||||
pfvf->hw.tx_link = rsp->tx_link;
|
||||
}
|
||||
EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
|
||||
|
||||
|
@ -218,6 +218,7 @@ struct otx2_hw {
|
||||
u64 cgx_fec_uncorr_blks;
|
||||
u8 cgx_links; /* No. of CGX links present in HW */
|
||||
u8 lbk_links; /* No. of LBK links present in HW */
|
||||
u8 tx_link; /* Transmit channel link number */
|
||||
#define HW_TSO 0
|
||||
#define CN10K_MBOX 1
|
||||
#define CN10K_LMTST 2
|
||||
|
Loading…
Reference in New Issue
Block a user