mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 06:34:12 +08:00
netdev: replace napi_reschedule with napi_schedule
Now that napi_schedule return a bool, we can drop napi_reschedule that
does the same exact function. The function comes from a very old commit
bfe13f54f5
("ibm_emac: Convert to use napi_struct independent of struct
net_device") and the purpose is actually deprecated in favour of
different logic.
Convert every user of napi_reschedule to napi_schedule.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Acked-by: Jeff Johnson <quic_jjohnson@quicinc.com> # ath10k
Acked-by: Nick Child <nnac123@linux.ibm.com> # ibm
Acked-by: Marc Kleine-Budde <mkl@pengutronix.de> # for can/dev/rx-offload.c
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://lore.kernel.org/r/20231009133754.9834-3-ansuelsmth@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
0a77900321
commit
73382e919f
@ -488,7 +488,7 @@ poll_more:
|
||||
if (unlikely(ib_req_notify_cq(priv->recv_cq,
|
||||
IB_CQ_NEXT_COMP |
|
||||
IB_CQ_REPORT_MISSED_EVENTS)) &&
|
||||
napi_reschedule(napi))
|
||||
napi_schedule(napi))
|
||||
goto poll_more;
|
||||
}
|
||||
|
||||
@ -518,7 +518,7 @@ poll_more:
|
||||
napi_complete(napi);
|
||||
if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
|
||||
IB_CQ_REPORT_MISSED_EVENTS)) &&
|
||||
napi_reschedule(napi))
|
||||
napi_schedule(napi))
|
||||
goto poll_more;
|
||||
}
|
||||
return n < 0 ? 0 : n;
|
||||
|
@ -67,7 +67,7 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
|
||||
|
||||
/* Check if there was another interrupt */
|
||||
if (!skb_queue_empty(&offload->skb_queue))
|
||||
napi_reschedule(&offload->napi);
|
||||
napi_schedule(&offload->napi);
|
||||
}
|
||||
|
||||
return work_done;
|
||||
|
@ -4261,7 +4261,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
|
||||
|
||||
if (fl_starving(adap, fl)) {
|
||||
rxq = container_of(fl, struct sge_eth_rxq, fl);
|
||||
if (napi_reschedule(&rxq->rspq.napi))
|
||||
if (napi_schedule(&rxq->rspq.napi))
|
||||
fl->starving++;
|
||||
else
|
||||
set_bit(id, s->starving_fl);
|
||||
|
@ -2094,7 +2094,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
|
||||
struct sge_eth_rxq *rxq;
|
||||
|
||||
rxq = container_of(fl, struct sge_eth_rxq, fl);
|
||||
if (napi_reschedule(&rxq->rspq.napi))
|
||||
if (napi_schedule(&rxq->rspq.napi))
|
||||
fl->starving++;
|
||||
else
|
||||
set_bit(id, s->starving_fl);
|
||||
|
@ -198,7 +198,7 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
|
||||
*/
|
||||
if (nps_enet_is_tx_pending(priv)) {
|
||||
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
|
||||
napi_reschedule(napi);
|
||||
napi_schedule(napi);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -281,7 +281,7 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
|
||||
if (block->rx)
|
||||
reschedule |= gve_rx_work_pending(block->rx);
|
||||
|
||||
if (reschedule && napi_reschedule(napi))
|
||||
if (reschedule && napi_schedule(napi))
|
||||
iowrite32be(GVE_IRQ_MASK, irq_doorbell);
|
||||
}
|
||||
return work_done;
|
||||
|
@ -900,7 +900,7 @@ static int ehea_poll(struct napi_struct *napi, int budget)
|
||||
if (!cqe && !cqe_skb)
|
||||
return rx;
|
||||
|
||||
if (!napi_reschedule(napi))
|
||||
if (!napi_schedule(napi))
|
||||
return rx;
|
||||
|
||||
cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
|
||||
|
@ -442,7 +442,7 @@ static int mal_poll(struct napi_struct *napi, int budget)
|
||||
if (unlikely(mc->ops->peek_rx(mc->dev) ||
|
||||
test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
|
||||
MAL_DBG2(mal, "rotting packet" NL);
|
||||
if (!napi_reschedule(napi))
|
||||
if (!napi_schedule(napi))
|
||||
goto more_work;
|
||||
|
||||
spin_lock_irqsave(&mal->lock, flags);
|
||||
|
@ -1432,7 +1432,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
|
||||
BUG_ON(lpar_rc != H_SUCCESS);
|
||||
|
||||
if (ibmveth_rxq_pending_buffer(adapter) &&
|
||||
napi_reschedule(napi)) {
|
||||
napi_schedule(napi)) {
|
||||
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
|
||||
VIO_IRQ_DISABLE);
|
||||
}
|
||||
|
@ -3519,7 +3519,7 @@ restart_poll:
|
||||
if (napi_complete_done(napi, frames_processed)) {
|
||||
enable_scrq_irq(adapter, rx_scrq);
|
||||
if (pending_scrq(adapter, rx_scrq)) {
|
||||
if (napi_reschedule(napi)) {
|
||||
if (napi_schedule(napi)) {
|
||||
disable_scrq_irq(adapter, rx_scrq);
|
||||
goto restart_poll;
|
||||
}
|
||||
|
@ -400,7 +400,7 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
|
||||
for (ring = 0; ring < priv->rx_ring_num; ring++) {
|
||||
if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
|
||||
local_bh_disable();
|
||||
napi_reschedule(&priv->rx_cq[ring]->napi);
|
||||
napi_schedule(&priv->rx_cq[ring]->napi);
|
||||
local_bh_enable();
|
||||
}
|
||||
}
|
||||
|
@ -683,7 +683,7 @@ static int nixge_poll(struct napi_struct *napi, int budget)
|
||||
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
|
||||
/* If there's more, reschedule, but clear */
|
||||
nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
|
||||
napi_reschedule(napi);
|
||||
napi_schedule(napi);
|
||||
} else {
|
||||
/* if not, turn on RX IRQs again ... */
|
||||
cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
|
||||
|
@ -802,7 +802,7 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv)
|
||||
stmmac_start_rx(priv, priv->ioaddr, i);
|
||||
|
||||
local_bh_disable();
|
||||
napi_reschedule(&ch->rx_napi);
|
||||
napi_schedule(&ch->rx_napi);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
|
@ -723,9 +723,9 @@ static int eth_poll(struct napi_struct *napi, int budget)
|
||||
napi_complete(napi);
|
||||
qmgr_enable_irq(rxq);
|
||||
if (!qmgr_stat_below_low_watermark(rxq) &&
|
||||
napi_reschedule(napi)) { /* not empty again */
|
||||
napi_schedule(napi)) { /* not empty again */
|
||||
#if DEBUG_RX
|
||||
netdev_debug(dev, "eth_poll napi_reschedule succeeded\n");
|
||||
netdev_debug(dev, "eth_poll napi_schedule succeeded\n");
|
||||
#endif
|
||||
qmgr_disable_irq(rxq);
|
||||
continue;
|
||||
|
@ -1030,7 +1030,7 @@ static int fjes_poll(struct napi_struct *napi, int budget)
|
||||
}
|
||||
|
||||
if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
|
||||
napi_reschedule(napi);
|
||||
napi_schedule(napi);
|
||||
} else {
|
||||
spin_lock(&hw->rx_status_lock);
|
||||
for (epidx = 0; epidx < hw->max_epid; epidx++) {
|
||||
|
@ -687,10 +687,10 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
|
||||
napi_complete(napi);
|
||||
qmgr_enable_irq(rxq);
|
||||
if (!qmgr_stat_empty(rxq) &&
|
||||
napi_reschedule(napi)) {
|
||||
napi_schedule(napi)) {
|
||||
#if DEBUG_RX
|
||||
printk(KERN_DEBUG "%s: hss_hdlc_poll"
|
||||
" napi_reschedule succeeded\n",
|
||||
" napi_schedule succeeded\n",
|
||||
dev->name);
|
||||
#endif
|
||||
qmgr_disable_irq(rxq);
|
||||
|
@ -3148,7 +3148,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
|
||||
* immediate servicing.
|
||||
*/
|
||||
if (ath10k_ce_interrupt_summary(ar)) {
|
||||
napi_reschedule(ctx);
|
||||
napi_schedule(ctx);
|
||||
goto out;
|
||||
}
|
||||
ath10k_pci_enable_legacy_irq(ar);
|
||||
|
@ -852,7 +852,7 @@ int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
|
||||
if (!ret) {
|
||||
napi_complete_done(napi, work_done);
|
||||
rxq->sleep_lock_pending = true;
|
||||
napi_reschedule(napi);
|
||||
napi_schedule(napi);
|
||||
return work_done;
|
||||
}
|
||||
|
||||
|
@ -516,16 +516,6 @@ static inline void napi_schedule_irqoff(struct napi_struct *n)
|
||||
__napi_schedule_irqoff(n);
|
||||
}
|
||||
|
||||
/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
|
||||
static inline bool napi_reschedule(struct napi_struct *napi)
|
||||
{
|
||||
if (napi_schedule_prep(napi)) {
|
||||
__napi_schedule(napi);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* napi_complete_done - NAPI processing complete
|
||||
* @n: NAPI context
|
||||
|
Loading…
Reference in New Issue
Block a user