Merge branch 'bnxt_en-msix-improvements'

Michael Chan says:

====================
bnxt_en: MSIX improvements

This patchset makes some improvements related to MSIX.  The first
patch adjusts the default MSIX vectors assigned for RoCE.  On the
PF, the number of MSIX is increased to 64 from the current 9.  The
second patch allocates additional MSIX vectors ahead of time when
changing ethtool channels if dynamic MSIX is supported.  The 3rd
patch makes sure that the IRQ name is not truncated.
====================

Link: https://patch.msgid.link/20240909202737.93852-1-michael.chan@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-09-10 18:42:48 -07:00
commit e35b0515bb
5 changed files with 42 additions and 13 deletions

View File

@ -13803,6 +13803,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
struct bnxt_hw_rings hwr = {0};
int rx_rings = rx;
int rc;
if (tcs)
tx_sets = tcs;
@ -13835,7 +13836,23 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
}
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
hwr.cp_p5 = hwr.tx + rx;
return bnxt_hwrm_check_rings(bp, &hwr);
rc = bnxt_hwrm_check_rings(bp, &hwr);
if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
if (!bnxt_ulp_registered(bp->edev)) {
hwr.cp += bnxt_get_ulp_msix_num(bp);
hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
}
if (hwr.cp > bp->total_irqs) {
int total_msix = bnxt_change_msix(bp, hwr.cp);
if (total_msix < hwr.cp) {
netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
hwr.cp, total_msix);
rc = -ENOSPC;
}
}
}
return rc;
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)

View File

@ -1217,12 +1217,15 @@ struct bnxt_napi {
bool in_reset;
};
/* "TxRx", 2 hypens, plus maximum integer */
#define BNXT_IRQ_NAME_EXTRA 17
struct bnxt_irq {
irq_handler_t handler;
unsigned int vector;
u8 requested:1;
u8 have_cpumask:1;
char name[IFNAMSIZ + 2];
char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA];
cpumask_var_t cpu_mask;
};

View File

@ -955,11 +955,6 @@ static int bnxt_set_channels(struct net_device *dev,
}
tx_xdp = req_rx_rings;
}
rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
if (rc) {
netdev_warn(dev, "Unable to allocate the requested rings\n");
return rc;
}
if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
@ -968,6 +963,12 @@ static int bnxt_set_channels(struct net_device *dev,
return -EINVAL;
}
rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
if (rc) {
netdev_warn(dev, "Unable to allocate the requested rings\n");
return rc;
}
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's

View File

@ -176,11 +176,17 @@ EXPORT_SYMBOL(bnxt_unregister_dev);
static int bnxt_set_dflt_ulp_msix(struct bnxt *bp)
{
u32 roce_msix = BNXT_VF(bp) ?
BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
int roce_msix = BNXT_MAX_ROCE_MSIX;
return ((bp->flags & BNXT_FLAG_ROCE_CAP) ?
min_t(u32, roce_msix, num_online_cpus()) : 0);
if (BNXT_VF(bp))
roce_msix = BNXT_MAX_ROCE_MSIX_VF;
else if (bp->port_partition_type)
roce_msix = BNXT_MAX_ROCE_MSIX_NPAR_PF;
/* NQ MSIX vectors should match the number of CPUs plus 1 more for
* the CREQ MSIX, up to the default.
*/
return min_t(int, roce_msix, num_online_cpus() + 1);
}
int bnxt_send_msg(struct bnxt_en_dev *edev,

View File

@ -15,8 +15,10 @@
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
#define BNXT_MAX_ROCE_MSIX 9
#define BNXT_MAX_VF_ROCE_MSIX 2
#define BNXT_MAX_ROCE_MSIX_VF 2
#define BNXT_MAX_ROCE_MSIX_NPAR_PF 5
#define BNXT_MAX_ROCE_MSIX 64
struct hwrm_async_event_cmpl;
struct bnxt;