staging: switch to netif_napi_add_weight()

netif_napi_add() will soon default to weight of 64 instead of
requiring the user to specify that parameter. Most users already
pass or should pass 64. Move the callers in staging to
netif_napi_add_weight() which will keep the argument.
This way we'll avoid a huge cross-tree conversion when
argument is removed.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Link: https://lore.kernel.org/r/20220705225801.923601-1-kuba@kernel.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Jakub Kicinski 2022-07-05 15:58:01 -07:00 committed by Greg Kroah-Hartman
parent 234cf3971b
commit 60c85e23be
2 changed files with 4 additions and 4 deletions

View File

@ -469,8 +469,8 @@ void cvm_oct_rx_initialize(void)
if (!(pow_receive_groups & BIT(i)))
continue;
netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
cvm_oct_napi_poll, rx_napi_weight);
netif_napi_add_weight(dev_for_napi, &oct_rx_group[i].napi,
cvm_oct_napi_poll, rx_napi_weight);
napi_enable(&oct_rx_group[i].napi);
oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;

View File

@ -3037,8 +3037,8 @@ static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring
/* Inbound completion handling rx_rings run in
* separate NAPI contexts.
*/
netif_napi_add(qdev->ndev, &rx_ring->napi, qlge_napi_poll_msix,
64);
netif_napi_add_weight(qdev->ndev, &rx_ring->napi,
qlge_napi_poll_msix, 64);
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
} else {