2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-18 10:13:57 +08:00

chelsio: NAPI speed improvement

Speedup and cleanup the receive processing by eliminating the
mmio read and a lock round trip.

Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
This commit is contained in:
Stephen Hemminger 2007-01-08 11:26:30 -08:00 committed by Jeff Garzik
parent 4d22de3e6c
commit 3de00b8958

View File

@ -1559,6 +1559,14 @@ static int process_responses(struct adapter *adapter, int budget)
return budget; return budget;
} }
static inline int responses_pending(const struct adapter *adapter)
{
const struct respQ *Q = &adapter->sge->respQ;
const struct respQ_e *e = &Q->entries[Q->cidx];
return (e->GenerationBit == Q->genbit);
}
#ifdef CONFIG_CHELSIO_T1_NAPI #ifdef CONFIG_CHELSIO_T1_NAPI
/* /*
* A simpler version of process_responses() that handles only pure (i.e., * A simpler version of process_responses() that handles only pure (i.e.,
@ -1568,13 +1576,16 @@ static int process_responses(struct adapter *adapter, int budget)
* which the caller must ensure is a valid pure response. Returns 1 if it * which the caller must ensure is a valid pure response. Returns 1 if it
* encounters a valid data-carrying response, 0 otherwise. * encounters a valid data-carrying response, 0 otherwise.
*/ */
static int process_pure_responses(struct adapter *adapter, struct respQ_e *e) static int process_pure_responses(struct adapter *adapter)
{ {
struct sge *sge = adapter->sge; struct sge *sge = adapter->sge;
struct respQ *q = &sge->respQ; struct respQ *q = &sge->respQ;
struct respQ_e *e = &q->entries[q->cidx];
unsigned int flags = 0; unsigned int flags = 0;
unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
if (e->DataValid)
return 1;
do { do {
flags |= e->Qsleeping; flags |= e->Qsleeping;
@ -1610,23 +1621,20 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
int t1_poll(struct net_device *dev, int *budget) int t1_poll(struct net_device *dev, int *budget)
{ {
struct adapter *adapter = dev->priv; struct adapter *adapter = dev->priv;
int effective_budget = min(*budget, dev->quota); int work_done;
int work_done = process_responses(adapter, effective_budget);
work_done = process_responses(adapter, min(*budget, dev->quota));
*budget -= work_done; *budget -= work_done;
dev->quota -= work_done; dev->quota -= work_done;
if (work_done >= effective_budget) if (unlikely(responses_pending(adapter)))
return 1; return 1;
spin_lock_irq(&adapter->async_lock); netif_rx_complete(dev);
__netif_rx_complete(dev);
writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
adapter->regs + A_PL_ENABLE);
spin_unlock_irq(&adapter->async_lock);
return 0; return 0;
} }
/* /*
@ -1635,44 +1643,33 @@ int t1_poll(struct net_device *dev, int *budget)
irqreturn_t t1_interrupt(int irq, void *data) irqreturn_t t1_interrupt(int irq, void *data)
{ {
struct adapter *adapter = data; struct adapter *adapter = data;
struct net_device *dev = adapter->sge->netdev;
struct sge *sge = adapter->sge; struct sge *sge = adapter->sge;
u32 cause; int handled;
int handled = 0;
cause = readl(adapter->regs + A_PL_CAUSE); if (likely(responses_pending(adapter))) {
if (cause == 0 || cause == ~0) struct net_device *dev = sge->netdev;
return IRQ_NONE;
spin_lock(&adapter->async_lock);
if (cause & F_PL_INTR_SGE_DATA) {
struct respQ *q = &adapter->sge->respQ;
struct respQ_e *e = &q->entries[q->cidx];
handled = 1;
writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
if (e->GenerationBit == q->genbit && if (__netif_rx_schedule_prep(dev)) {
__netif_rx_schedule_prep(dev)) { if (process_pure_responses(adapter))
if (e->DataValid || process_pure_responses(adapter, e)) { __netif_rx_schedule(dev);
/* mask off data IRQ */ else {
writel(adapter->slow_intr_mask, /* no data, no NAPI needed */
adapter->regs + A_PL_ENABLE); writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
__netif_rx_schedule(sge->netdev); netif_poll_enable(dev); /* undo schedule_prep */
goto unlock;
} }
/* no data, no NAPI needed */
netif_poll_enable(dev);
} }
writel(q->cidx, adapter->regs + A_SG_SLEEPING); return IRQ_HANDLED;
} else }
handled = t1_slow_intr_handler(adapter);
spin_lock(&adapter->async_lock);
handled = t1_slow_intr_handler(adapter);
spin_unlock(&adapter->async_lock);
if (!handled) if (!handled)
sge->stats.unhandled_irqs++; sge->stats.unhandled_irqs++;
unlock:
spin_unlock(&adapter->async_lock);
return IRQ_RETVAL(handled != 0); return IRQ_RETVAL(handled != 0);
} }
@ -1695,17 +1692,13 @@ unlock:
irqreturn_t t1_interrupt(int irq, void *cookie) irqreturn_t t1_interrupt(int irq, void *cookie)
{ {
int work_done; int work_done;
struct respQ_e *e;
struct adapter *adapter = cookie; struct adapter *adapter = cookie;
struct respQ *Q = &adapter->sge->respQ;
spin_lock(&adapter->async_lock); spin_lock(&adapter->async_lock);
e = &Q->entries[Q->cidx];
prefetch(e);
writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
if (likely(e->GenerationBit == Q->genbit)) if (likely(responses_pending(adapter))
work_done = process_responses(adapter, -1); work_done = process_responses(adapter, -1);
else else
work_done = t1_slow_intr_handler(adapter); work_done = t1_slow_intr_handler(adapter);