mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
net: use symbolic values for ndo_start_xmit() return codes
Convert magic values 1 and -1 to NETDEV_TX_BUSY and NETDEV_TX_LOCKED respectively. 0 (NETDEV_TX_OK) is not changed to keep the noise down, except in very few cases where its in direct proximity to one of the other values. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
5b2c4b972c
commit
5b54814022
@ -1069,7 +1069,7 @@ isdn_net_xmit(struct net_device *ndev, struct sk_buff *skb)
|
||||
lp = isdn_net_get_locked_lp(nd);
|
||||
if (!lp) {
|
||||
printk(KERN_WARNING "%s: all channels busy - requeuing!\n", ndev->name);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
/* we have our lp locked from now on */
|
||||
|
||||
@ -1273,14 +1273,14 @@ isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
isdn_net_dial(); /* Initiate dialing */
|
||||
netif_stop_queue(ndev);
|
||||
return 1; /* let upper layer requeue skb packet */
|
||||
return NETDEV_TX_BUSY; /* let upper layer requeue skb packet */
|
||||
}
|
||||
#endif
|
||||
/* Initiate dialing */
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
isdn_net_dial();
|
||||
isdn_net_device_stop_queue(lp);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
} else {
|
||||
isdn_net_unreachable(ndev, skb,
|
||||
"No phone number");
|
||||
|
@ -703,7 +703,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
printk (KERN_ERR "%s: no tx context available: %u\n",
|
||||
__func__, priv->mpt_txfidx_tail);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
mf = mpt_get_msg_frame(LanCtx, mpt_dev);
|
||||
@ -713,7 +713,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
printk (KERN_ERR "%s: Unable to alloc request frame\n",
|
||||
__func__);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
|
||||
|
@ -1088,7 +1088,7 @@ static int elp_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
pr_debug("%s: failed to transmit packet\n", dev->name);
|
||||
}
|
||||
spin_unlock_irqrestore(&adapter->lock, flags);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
if (elp_debug >= 3)
|
||||
pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len);
|
||||
|
@ -1014,7 +1014,7 @@ static int corkscrew_start_xmit(struct sk_buff *skb,
|
||||
int i;
|
||||
|
||||
if (vp->tx_full) /* No room to transmit with */
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
if (vp->cur_tx != 0)
|
||||
prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE];
|
||||
else
|
||||
|
@ -1030,7 +1030,7 @@ static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
netif_stop_queue(dev);
|
||||
|
||||
if(atomic_read(&lp->tx_count)==0) {
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
if (skb_padto(skb, ETH_ZLEN)) {
|
||||
|
@ -2107,7 +2107,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
|
||||
dev->name);
|
||||
netif_stop_queue(dev);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
vp->tx_skbuff[entry] = skb;
|
||||
|
@ -541,7 +541,7 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
|
||||
unsigned long flags;
|
||||
|
||||
if (!TX_BUFFS_AVAIL)
|
||||
return -1;
|
||||
return NETDEV_TX_LOCKED;
|
||||
|
||||
netif_stop_queue (dev);
|
||||
|
||||
|
@ -756,7 +756,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
|
||||
spin_unlock_irqrestore(&cp->lock, intr_flags);
|
||||
pr_err(PFX "%s: BUG! Tx Ring full when queue awake!\n",
|
||||
dev->name);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
#if CP_VLAN_TAG_USED
|
||||
|
@ -564,7 +564,7 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
if (!TX_BUFFS_AVAIL){
|
||||
local_irq_restore(flags);
|
||||
return -1;
|
||||
return NETDEV_TX_LOCKED;
|
||||
}
|
||||
|
||||
#ifdef DEBUG_DRIVER
|
||||
|
@ -829,7 +829,7 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
dev->trans_start = jiffies;
|
||||
} else {
|
||||
printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
|
||||
return 1; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
|
||||
return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
|
||||
on this skb, he also reports -ENETDOWN and printk's, so either
|
||||
we free and return(0) or don't free and return 1 */
|
||||
}
|
||||
|
@ -526,7 +526,7 @@ ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
if (priv(dev)->tx_tail == next_ptr) {
|
||||
local_irq_restore(flags);
|
||||
return 1; /* unable to queue */
|
||||
return NETDEV_TX_BUSY; /* unable to queue */
|
||||
}
|
||||
|
||||
dev->trans_start = jiffies;
|
||||
|
@ -957,7 +957,7 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
/* We've wrapped around and the transmitter is still busy */
|
||||
netif_stop_queue(dev);
|
||||
aup->tx_full = 1;
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
else if (buff_stat & TX_T_DONE) {
|
||||
update_tx_stats(dev, ptxd->status);
|
||||
|
@ -2934,7 +2934,7 @@ static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
* individual queues.
|
||||
*/
|
||||
if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
dev->trans_start = jiffies;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1551,7 +1551,7 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
spin_unlock_irq(&lp->lock);
|
||||
if (net_debug) printk("cs89x0: Tx buffer not free!\n");
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
/* Write the contents of the packet */
|
||||
writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
|
||||
|
@ -168,14 +168,14 @@ static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
|
||||
tickssofar = jiffies - dev->trans_start;
|
||||
if (tickssofar < 5)
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
/* else */
|
||||
printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem");
|
||||
/* Restart the adapter. */
|
||||
spin_lock_irqsave(&de600_lock, flags);
|
||||
if (adapter_init(dev)) {
|
||||
spin_unlock_irqrestore(&de600_lock, flags);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
spin_unlock_irqrestore(&de600_lock, flags);
|
||||
}
|
||||
@ -199,7 +199,7 @@ static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) {
|
||||
if (adapter_init(dev)) {
|
||||
spin_unlock_irqrestore(&de600_lock, flags);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -531,7 +531,7 @@ static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
case (TXBF0 | TXBF1): /* NONE!!! */
|
||||
printk(KERN_WARNING "%s: No tx-buffer available!\n", dev->name);
|
||||
spin_unlock_irqrestore(&de620_lock, flags);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
de620_write_block(dev, buffer, skb->len, len-skb->len);
|
||||
|
||||
|
@ -3318,7 +3318,7 @@ static int dfx_xmt_queue_pkt(
|
||||
{
|
||||
skb_pull(skb,3);
|
||||
spin_unlock_irqrestore(&bp->lock, flags);
|
||||
return(1); /* requeue packet for later */
|
||||
return NETDEV_TX_BUSY; /* requeue packet for later */
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -957,7 +957,7 @@ static int depca_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (TX_BUFFS_AVAIL)
|
||||
netif_start_queue(dev);
|
||||
} else
|
||||
status = -1;
|
||||
status = NETDEV_TX_LOCKED;
|
||||
|
||||
out:
|
||||
return status;
|
||||
@ -1839,7 +1839,7 @@ static int load_packet(struct net_device *dev, struct sk_buff *skb)
|
||||
|
||||
lp->tx_new = (++end) & lp->txRingMask; /* update current pointers */
|
||||
} else {
|
||||
status = -1;
|
||||
status = NETDEV_TX_LOCKED;
|
||||
}
|
||||
|
||||
return status;
|
||||
|
@ -756,7 +756,7 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
dm9000_dbg(db, 3, "%s:\n", __func__);
|
||||
|
||||
if (db->tx_pkt_cnt > 1)
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
spin_lock_irqsave(&db->lock, flags);
|
||||
|
||||
|
@ -1716,7 +1716,7 @@ static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
/* This is a hard error - log it. */
|
||||
DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
|
||||
netif_stop_queue(netdev);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
netdev->trans_start = jiffies;
|
||||
|
@ -873,7 +873,7 @@ static int ewrk3_queue_pkt (struct sk_buff *skb, struct net_device *dev)
|
||||
err_out:
|
||||
ENABLE_IRQs;
|
||||
spin_unlock_irq (&lp->hw_lock);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -290,7 +290,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
if (!fep->link) {
|
||||
/* Link is down or autonegotiation is in progress. */
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&fep->hw_lock, flags);
|
||||
@ -305,7 +305,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
*/
|
||||
printk("%s: tx queue full!.\n", dev->name);
|
||||
spin_unlock_irqrestore(&fep->hw_lock, flags);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
/* Clear all of the status flags */
|
||||
|
@ -1280,7 +1280,7 @@ static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
status=readw(hmp->base + TxStatus);
|
||||
if( !(status & 0x0001) || (status & 0x0002))
|
||||
writew(0x0001, hmp->base + TxCmd);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
/* Caution: the write order is important here, set the field
|
||||
|
@ -777,7 +777,7 @@ static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
return 0;
|
||||
}
|
||||
if (bc->skb)
|
||||
return -1;
|
||||
return NETDEV_TX_LOCKED;
|
||||
/* strip KISS byte */
|
||||
if (skb->len >= HDLCDRV_MAXFLEN+1 || skb->len < 3) {
|
||||
dev_kfree_skb(skb);
|
||||
|
@ -409,7 +409,7 @@ static int hdlcdrv_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
return 0;
|
||||
}
|
||||
if (sm->skb)
|
||||
return -1;
|
||||
return NETDEV_TX_LOCKED;
|
||||
netif_stop_queue(dev);
|
||||
sm->skb = skb;
|
||||
return 0;
|
||||
|
@ -531,7 +531,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
if (!netif_running(dev)) {
|
||||
printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
if (netif_queue_stopped(dev)) {
|
||||
@ -541,7 +541,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
*/
|
||||
if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
|
||||
/* 20 sec timeout not reached */
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name,
|
||||
|
@ -1484,7 +1484,7 @@ static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
|
||||
stop_queue:
|
||||
netif_stop_queue(ndev);
|
||||
DBG2(dev, "stopped TX queue" NL);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
/* Tx lock BHs */
|
||||
|
@ -512,13 +512,13 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
printk(KERN_DEBUG "%s: tx_full\n", dev->name);
|
||||
netif_stop_queue(dev);
|
||||
aup->tx_full = 1;
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
|
||||
printk(KERN_DEBUG "%s: tx_full\n", dev->name);
|
||||
netif_stop_queue(dev);
|
||||
aup->tx_full = 1;
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
pDB = aup->tx_db_inuse[aup->tx_head];
|
||||
|
@ -607,7 +607,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
* stopped so the network layer will retry after the
|
||||
* fsm completes and wakes the queue.
|
||||
*/
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
else if (unlikely(err)) {
|
||||
/* other fatal error - forget the speed change and
|
||||
|
@ -370,7 +370,7 @@ static int __ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
spin_unlock(&ei_local->page_lock);
|
||||
enable_irq_lockdep_irqrestore(dev->irq, &flags);
|
||||
dev->stats.tx_errors++;
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -400,7 +400,7 @@ net_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
/* Gasp! It hasn't. But that shouldn't happen since
|
||||
we're waiting for TxOk, so return 1 and requeue this packet. */
|
||||
local_irq_restore(flags);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
/* Write the contents of the packet */
|
||||
|
@ -645,7 +645,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
"BUG! Tx Ring full when queue awake!\n");
|
||||
dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
|
||||
bp->tx_head, bp->tx_tail);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
entry = bp->tx_head;
|
||||
|
@ -547,7 +547,7 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
|
||||
netif_stop_queue(dev);
|
||||
mp->tx_fullup = 1;
|
||||
spin_unlock_irqrestore(&mp->lock, flags);
|
||||
return 1; /* can't take it at the moment */
|
||||
return NETDEV_TX_BUSY; /* can't take it at the moment */
|
||||
}
|
||||
spin_unlock_irqrestore(&mp->lock, flags);
|
||||
|
||||
|
@ -2687,7 +2687,7 @@ again:
|
||||
/* we are out of transmit resources */
|
||||
tx->stop_queue++;
|
||||
netif_tx_stop_queue(netdev_queue);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
/* Setup checksum offloading, if needed */
|
||||
|
@ -640,7 +640,7 @@ static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
if (!TX_BUFFS_AVAIL(head, tail)) {
|
||||
DTX(("no buffs available, returning 1\n"));
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&mp->irq_lock, flags);
|
||||
|
@ -1165,7 +1165,7 @@ static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
if (test_and_set_bit(0, (void*)&p->lock)) {
|
||||
printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -1097,7 +1097,7 @@ again:
|
||||
if (unlikely(dev->CFG_cache & CFG_LNKSTS)) {
|
||||
netif_stop_queue(ndev);
|
||||
if (unlikely(dev->CFG_cache & CFG_LNKSTS))
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
netif_start_queue(ndev);
|
||||
}
|
||||
|
||||
@ -1115,7 +1115,7 @@ again:
|
||||
netif_start_queue(ndev);
|
||||
goto again;
|
||||
}
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
if (free_idx == dev->tx_intr_idx) {
|
||||
|
@ -1130,7 +1130,7 @@ static int axnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
|
||||
spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
||||
dev->stats.tx_errors++;
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -877,7 +877,7 @@ static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (length > ETH_FRAME_LEN) {
|
||||
printk(KERN_NOTICE "%s: Attempting to send a large packet"
|
||||
" (%d bytes).\n", dev->name, length);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
DEBUG(4, "%s: Transmitting a packet of length %lu.\n",
|
||||
|
@ -1388,7 +1388,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
dev->stats.tx_aborted_errors++;
|
||||
printk(KERN_DEBUG "%s: Internal error -- sent packet while busy.\n",
|
||||
dev->name);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
smc->saved_skb = skb;
|
||||
|
||||
|
@ -1399,7 +1399,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
DEBUG(2 + (okay ? 2 : 0), "%s: avail. tx space=%u%s\n",
|
||||
dev->name, freespace, okay ? " (okay)":" (not enough)");
|
||||
if (!okay) { /* not enough space */
|
||||
return 1; /* upper layer may decide to requeue this packet */
|
||||
return NETDEV_TX_BUSY; /* upper layer may decide to requeue this packet */
|
||||
}
|
||||
/* send the packet */
|
||||
PutWord(XIRCREG_EDP, (u_short)pktlen);
|
||||
|
@ -955,12 +955,12 @@ plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
struct plip_local *snd = &nl->snd_data;
|
||||
|
||||
if (netif_queue_stopped(dev))
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
/* We may need to grab the bus */
|
||||
if (!nl->port_owner) {
|
||||
if (parport_claim(nl->pardev))
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
nl->port_owner = 1;
|
||||
}
|
||||
|
||||
@ -969,7 +969,7 @@ plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
if (skb->len > dev->mtu + dev->hard_header_len) {
|
||||
printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
|
||||
netif_start_queue (dev);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
if (net_debug > 2)
|
||||
|
@ -2084,7 +2084,7 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
netif_stop_queue(dev);
|
||||
spin_unlock_irqrestore(&sc->sbm_lock, flags);
|
||||
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
dev->trans_start = jiffies;
|
||||
|
@ -1108,7 +1108,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
if (!sh_eth_txfree(ndev)) {
|
||||
netif_stop_queue(ndev);
|
||||
spin_unlock_irqrestore(&mdp->lock, flags);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&mdp->lock, flags);
|
||||
|
@ -1584,7 +1584,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
|
||||
/* Don't transmit data before the complete of auto-negotiation */
|
||||
if(!sis_priv->autong_complete){
|
||||
netif_stop_queue(net_dev);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&sis_priv->lock, flags);
|
||||
|
@ -1082,7 +1082,7 @@ static int skfp_send_pkt(struct sk_buff *skb, struct net_device *dev)
|
||||
if (bp->QueueSkb == 0) { // return with tbusy set: queue full
|
||||
|
||||
netif_stop_queue(dev);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
bp->QueueSkb--;
|
||||
skb_queue_tail(&bp->SendSkbQueue, skb);
|
||||
|
@ -503,7 +503,7 @@ static int smc_wait_to_send_packet( struct sk_buff * skb, struct net_device * de
|
||||
/* THIS SHOULD NEVER HAPPEN. */
|
||||
dev->stats.tx_aborted_errors++;
|
||||
printk(CARDNAME": Bad Craziness - sent packet while busy.\n" );
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
lp->saved_skb = skb;
|
||||
|
||||
|
@ -223,7 +223,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
if (!laddr) {
|
||||
printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
|
||||
dev_kfree_skb(skb);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY
|
||||
}
|
||||
|
||||
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
|
||||
|
@ -1236,7 +1236,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
*/
|
||||
if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
|
||||
netif_stop_queue(dev);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
|
||||
|
@ -1023,7 +1023,7 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
#if(NUM_XMIT_BUFFS > 1)
|
||||
if(test_and_set_bit(0,(void *) &p->lock)) {
|
||||
printk("%s: Queue was locked\n",dev->name);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
|
@ -526,7 +526,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
|
||||
if (netif_queue_stopped(dev)) {
|
||||
int tickssofar = jiffies - dev->trans_start;
|
||||
if (tickssofar < 20)
|
||||
return( 1 );
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
|
||||
dev->name, DREG ));
|
||||
@ -577,7 +577,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
|
||||
if (test_and_set_bit( 0, (void*)&lp->lock ) != 0) {
|
||||
printk( "%s: tx queue lock!.\n", dev->name);
|
||||
/* don't clear dev->tbusy flag. */
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
AREG = CSR0;
|
||||
|
@ -2275,7 +2275,7 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
spin_unlock_irq(&hp->happy_lock);
|
||||
printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
|
||||
dev->name);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
entry = hp->tx_new;
|
||||
|
@ -1111,7 +1111,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
|
||||
dev->name, priv->txHead, priv->txTail );
|
||||
netif_stop_queue(dev);
|
||||
priv->txBusyCount++;
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
tail_list->forward = 0;
|
||||
|
@ -1243,7 +1243,7 @@ static int xl_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
return 0;
|
||||
} else {
|
||||
spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ;
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1187,7 +1187,7 @@ static int streamer_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
} else {
|
||||
netif_stop_queue(dev);
|
||||
spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1055,7 +1055,7 @@ static int olympic_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
return 0;
|
||||
} else {
|
||||
spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -4601,7 +4601,7 @@ static int smctr_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
netif_stop_queue(dev);
|
||||
|
||||
if(tp->QueueSkb == 0)
|
||||
return (1); /* Return with tbusy set: queue full */
|
||||
return NETDEV_TX_BUSY; /* Return with tbusy set: queue full */
|
||||
|
||||
tp->QueueSkb--;
|
||||
skb_queue_tail(&tp->SendSkbQueue, skb);
|
||||
|
@ -633,7 +633,7 @@ static int tms380tr_hardware_send_packet(struct sk_buff *skb, struct net_device
|
||||
if (tms380tr_debug > 0)
|
||||
printk(KERN_DEBUG "%s: No free TPL\n", dev->name);
|
||||
spin_unlock_irqrestore(&tp->lock, flags);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
dmabuf = 0;
|
||||
|
@ -612,7 +612,7 @@ static int de_start_xmit (struct sk_buff *skb, struct net_device *dev)
|
||||
if (tx_free == 0) {
|
||||
netif_stop_queue(dev);
|
||||
spin_unlock_irq(&de->lock);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
tx_free--;
|
||||
|
||||
|
@ -1461,12 +1461,12 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct de4x5_private *lp = netdev_priv(dev);
|
||||
u_long iobase = dev->base_addr;
|
||||
int status = 0;
|
||||
int status = NETDEV_TX_OK;
|
||||
u_long flags = 0;
|
||||
|
||||
netif_stop_queue(dev);
|
||||
if (!lp->tx_enable) { /* Cannot send for now */
|
||||
return -1;
|
||||
return NETDEV_TX_LOCKED;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1480,7 +1480,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
/* Test if cache is already locked - requeue skb if so */
|
||||
if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
|
||||
return -1;
|
||||
return NETDEV_TX_LOCKED;
|
||||
|
||||
/* Transmit descriptor ring full or stale skb */
|
||||
if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
|
||||
|
@ -686,7 +686,7 @@ static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
|
||||
spin_unlock_irqrestore(&db->lock, flags);
|
||||
printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
|
||||
db->tx_queue_cnt);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
/* Disable NIC interrupt */
|
||||
|
@ -591,7 +591,7 @@ static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
|
||||
spin_unlock_irqrestore(&db->lock, flags);
|
||||
printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_packet_cnt);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
/* Disable NIC interrupt */
|
||||
|
@ -205,15 +205,15 @@ static int dlci_transmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
case DLCI_RET_OK:
|
||||
dev->stats.tx_packets++;
|
||||
ret = 0;
|
||||
ret = NETDEV_TX_OK;
|
||||
break;
|
||||
case DLCI_RET_ERR:
|
||||
dev->stats.tx_errors++;
|
||||
ret = 0;
|
||||
ret = NETDEV_TX_OK;
|
||||
break;
|
||||
case DLCI_RET_DROP:
|
||||
dev->stats.tx_dropped++;
|
||||
ret = 1;
|
||||
ret = NETDEV_TX_BUSY;
|
||||
break;
|
||||
}
|
||||
/* Alan Cox recommends always returning 0, and always freeing the packet */
|
||||
|
@ -469,7 +469,7 @@ sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
#else /* CONFIG_SBNI_MULTILINE */
|
||||
|
@ -283,7 +283,7 @@ static int wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
#endif
|
||||
netif_stop_queue(dev);
|
||||
spin_unlock_irq(&port->lock);
|
||||
return 1; /* request packet to be queued */
|
||||
return NETDEV_TX_BUSY; /* request packet to be queued */
|
||||
}
|
||||
|
||||
#ifdef DEBUG_PKT
|
||||
|
@ -1935,7 +1935,7 @@ static int mpi_start_xmit(struct sk_buff *skb, struct net_device *dev) {
|
||||
netif_stop_queue (dev);
|
||||
if (npacks > MAXTXQ) {
|
||||
dev->stats.tx_fifo_errors++;
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
skb_queue_tail (&ai->txq, skb);
|
||||
return 0;
|
||||
@ -2139,7 +2139,7 @@ static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) {
|
||||
|
||||
if (i == MAX_FIDS / 2) {
|
||||
dev->stats.tx_fifo_errors++;
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
}
|
||||
/* check min length*/
|
||||
@ -2211,7 +2211,7 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
|
||||
|
||||
if (i == MAX_FIDS) {
|
||||
dev->stats.tx_fifo_errors++;
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
}
|
||||
/* check min length*/
|
||||
|
@ -1199,7 +1199,7 @@ bad_end:
|
||||
arlan_process_interrupt(dev);
|
||||
netif_stop_queue (dev);
|
||||
ARLAN_DEBUG_EXIT("arlan_tx");
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
|
||||
|
@ -818,7 +818,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
spin_unlock_irqrestore(&priv->irqlock, flags);
|
||||
spin_unlock_bh(&priv->timerlock);
|
||||
netif_stop_queue(dev);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
frame_ctl = IEEE80211_FTYPE_DATA;
|
||||
|
@ -377,7 +377,7 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct hostap_interface *iface;
|
||||
local_info_t *local;
|
||||
int ret = 1;
|
||||
int ret = NETDEV_TX_BUSY;
|
||||
u16 fc;
|
||||
struct hostap_tx_data tx;
|
||||
ap_tx_ret tx_ret;
|
||||
|
@ -539,7 +539,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
spin_unlock_irqrestore(&ieee->lock, flags);
|
||||
netif_stop_queue(dev);
|
||||
dev->stats.tx_errors++;
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_xmit);
|
||||
|
||||
|
@ -923,7 +923,7 @@ static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
if (!(pcmcia_dev_present(link))) {
|
||||
DEBUG(2, "ray_dev_start_xmit - device not present\n");
|
||||
return -1;
|
||||
return NETDEV_TX_LOCKED;
|
||||
}
|
||||
DEBUG(3, "ray_dev_start_xmit(skb=%p, dev=%p)\n", skb, dev);
|
||||
if (local->authentication_state == NEED_TO_AUTH) {
|
||||
@ -931,7 +931,7 @@ static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (!build_auth_frame(local, local->auth_id, OPEN_AUTH_REQUEST)) {
|
||||
local->authentication_state = AUTHENTICATED;
|
||||
netif_stop_queue(dev);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
}
|
||||
|
||||
@ -944,7 +944,7 @@ static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
case XMIT_NO_CCS:
|
||||
case XMIT_NEED_AUTH:
|
||||
netif_stop_queue(dev);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
case XMIT_NO_INTR:
|
||||
case XMIT_MSG_BAD:
|
||||
case XMIT_OK:
|
||||
|
@ -1540,7 +1540,7 @@ static int strip_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (!netif_running(dev)) {
|
||||
printk(KERN_ERR "%s: xmit call when iface is down\n",
|
||||
dev->name);
|
||||
return (1);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
netif_stop_queue(dev);
|
||||
|
@ -2867,7 +2867,7 @@ static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev)
|
||||
spin_unlock_irqrestore(&lp->spinlock, flags);
|
||||
/* Check that we can continue */
|
||||
if (lp->tx_n_in_use == (NTXBLOCKS - 1))
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
/* Do we need some padding? */
|
||||
@ -2880,10 +2880,10 @@ static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev)
|
||||
skb_copy_from_linear_data(skb, data, skb->len);
|
||||
/* Write packet on the card */
|
||||
if(wv_packet_write(dev, data, ETH_ZLEN))
|
||||
return 1; /* We failed */
|
||||
return NETDEV_TX_BUSY; /* We failed */
|
||||
}
|
||||
else if(wv_packet_write(dev, skb->data, skb->len))
|
||||
return 1; /* We failed */
|
||||
return NETDEV_TX_BUSY; /* We failed */
|
||||
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
|
@ -1315,9 +1315,9 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
dev->trans_start = jiffies;
|
||||
rc = netiucv_transmit_skb(privptr->conn, skb) != 0;
|
||||
rc = netiucv_transmit_skb(privptr->conn, skb);
|
||||
netiucv_clear_busy(dev);
|
||||
return rc;
|
||||
return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -585,11 +585,11 @@ int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
|
||||
* available
|
||||
*/
|
||||
netif_stop_queue(netdev);
|
||||
status = 1;
|
||||
status = NETDEV_TX_BUSY;
|
||||
} else {
|
||||
DBG_WARNING(et131x_dbginfo,
|
||||
"Misc error; drop packet\n");
|
||||
status = 0;
|
||||
status = NETDEV_TX_OK;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -814,7 +814,7 @@ int ieee80211_xmit(struct sk_buff *skb,
|
||||
spin_unlock_irqrestore(&ieee->lock, flags);
|
||||
netif_stop_queue(dev);
|
||||
stats->tx_errors++;
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
}
|
||||
|
||||
|
@ -432,21 +432,21 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
|
||||
/* success and more buf */
|
||||
/* avail, re: hw_txdata */
|
||||
netif_wake_queue(wlandev->netdev);
|
||||
result = 0;
|
||||
result = NETDEV_TX_OK;
|
||||
} else if (txresult == 1) {
|
||||
/* success, no more avail */
|
||||
pr_debug("txframe success, no more bufs\n");
|
||||
/* netdev->tbusy = 1; don't set here, irqhdlr */
|
||||
/* may have already cleared it */
|
||||
result = 0;
|
||||
result = NETDEV_TX_OK;
|
||||
} else if (txresult == 2) {
|
||||
/* alloc failure, drop frame */
|
||||
pr_debug("txframe returned alloc_fail\n");
|
||||
result = 1;
|
||||
result = NETDEV_TX_BUSY;
|
||||
} else {
|
||||
/* buffer full or queue busy, drop frame. */
|
||||
pr_debug("txframe returned full or busy\n");
|
||||
result = 1;
|
||||
result = NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
failed:
|
||||
|
@ -520,7 +520,7 @@ static int eth_start_xmit(struct sk_buff *skb, struct net_device *net)
|
||||
*/
|
||||
if (list_empty(&dev->tx_reqs)) {
|
||||
spin_unlock_irqrestore(&dev->req_lock, flags);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
req = container_of(dev->tx_reqs.next, struct usb_request, list);
|
||||
|
@ -1615,7 +1615,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
int ret = 1, head_need;
|
||||
int ret = NETDEV_TX_BUSY, head_need;
|
||||
u16 ethertype, hdrlen, meshhdrlen = 0;
|
||||
__le16 fc;
|
||||
struct ieee80211_hdr hdr;
|
||||
|
@ -137,7 +137,7 @@ static int rose_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
if (!netif_running(dev)) {
|
||||
printk(KERN_ERR "ROSE: rose_xmit - called when iface is down\n");
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
dev_kfree_skb(skb);
|
||||
stats->tx_errors++;
|
||||
|
@ -338,7 +338,7 @@ restart:
|
||||
|
||||
if (busy) {
|
||||
netif_stop_queue(dev);
|
||||
return 1;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
dev->stats.tx_errors++;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user