mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 01:04:08 +08:00
netdev: ethernet dev_alloc_skb to netdev_alloc_skb
Replaced deprecating dev_alloc_skb with netdev_alloc_skb in drivers/net/ethernet - Removed extra skb->dev = dev after netdev_alloc_skb Signed-off-by: Pradeep A Dalvi <netdev@pradeepdalvi.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
21a4e46995
commit
c056b734e5
@ -1164,7 +1164,7 @@ static void eth16i_rx(struct net_device *dev)
|
||||
else { /* Ok so now we should have a good packet */
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = dev_alloc_skb(pkt_len + 3);
|
||||
skb = netdev_alloc_skb(dev, pkt_len + 3);
|
||||
if( skb == NULL ) {
|
||||
printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n",
|
||||
dev->name, pkt_len);
|
||||
|
@ -1002,7 +1002,7 @@ static void fjn_rx(struct net_device *dev)
|
||||
dev->stats.rx_errors++;
|
||||
break;
|
||||
}
|
||||
skb = dev_alloc_skb(pkt_len+2);
|
||||
skb = netdev_alloc_skb(dev, pkt_len + 2);
|
||||
if (skb == NULL) {
|
||||
netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n",
|
||||
pkt_len);
|
||||
|
@ -1274,7 +1274,7 @@ static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
|
||||
/* Note: This depends on the alloc_skb functions allocating more
|
||||
* space than requested, i.e. aligning to 16bytes */
|
||||
|
||||
ringptr->skb = dev_alloc_skb(roundup(MAX_ETHER_SIZE + 2, 4));
|
||||
ringptr->skb = netdev_alloc_skb(dev, roundup(MAX_ETHER_SIZE + 2, 4));
|
||||
|
||||
if (NULL != ringptr->skb) {
|
||||
/*
|
||||
@ -1284,7 +1284,6 @@ static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
|
||||
*/
|
||||
skb_reserve(ringptr->skb, 2);
|
||||
|
||||
ringptr->skb->dev = dev;
|
||||
ringptr->skb->data = (u_char *) skb_put(ringptr->skb, MAX_ETHER_SIZE);
|
||||
|
||||
/* ringptr->pdl points to the beginning of the PDL, i.e. the PDH */
|
||||
@ -1817,7 +1816,7 @@ static void hp100_rx(struct net_device *dev)
|
||||
#endif
|
||||
|
||||
/* Now we allocate the skb and transfer the data into it. */
|
||||
skb = dev_alloc_skb(pkt_len+2);
|
||||
skb = netdev_alloc_skb(dev, pkt_len + 2);
|
||||
if (skb == NULL) { /* Not enough memory->drop packet */
|
||||
#ifdef HP100_DEBUG
|
||||
printk("hp100: %s: rx: couldn't allocate a sk_buff of size %d\n",
|
||||
|
@ -583,7 +583,7 @@ static void receive_packet(struct net_device *dev, int len)
|
||||
unsigned long flags;
|
||||
|
||||
rlen = (len + 1) & ~1;
|
||||
skb = dev_alloc_skb(rlen + 2);
|
||||
skb = netdev_alloc_skb(dev, rlen + 2);
|
||||
|
||||
if (!skb) {
|
||||
pr_warning("%s: memory squeeze, dropping packet\n", dev->name);
|
||||
|
@ -851,7 +851,7 @@ static void el16_rx(struct net_device *dev)
|
||||
struct sk_buff *skb;
|
||||
|
||||
pkt_len &= 0x3fff;
|
||||
skb = dev_alloc_skb(pkt_len+2);
|
||||
skb = netdev_alloc_skb(dev, pkt_len + 2);
|
||||
if (skb == NULL) {
|
||||
pr_err("%s: Memory squeeze, dropping packet.\n",
|
||||
dev->name);
|
||||
|
@ -983,7 +983,7 @@ static void elmc_rcv_int(struct net_device *dev)
|
||||
if ((totlen = rbd->status) & RBD_LAST) { /* the first and the last buffer? */
|
||||
totlen &= RBD_MASK; /* length of this frame */
|
||||
rbd->status = 0;
|
||||
skb = (struct sk_buff *) dev_alloc_skb(totlen + 2);
|
||||
skb = netdev_alloc_skb(dev, totlen + 2);
|
||||
if (skb != NULL) {
|
||||
skb_reserve(skb, 2); /* 16 byte alignment */
|
||||
skb_put(skb,totlen);
|
||||
|
@ -1169,7 +1169,7 @@ static void mc32_rx_ring(struct net_device *dev)
|
||||
/* Try to save time by avoiding a copy on big frames */
|
||||
|
||||
if ((length > RX_COPYBREAK) &&
|
||||
((newskb=dev_alloc_skb(1532)) != NULL))
|
||||
((newskb = netdev_alloc_skb(dev, 1532)) != NULL))
|
||||
{
|
||||
skb=lp->rx_ring[rx_ring_tail].skb;
|
||||
skb_put(skb, length);
|
||||
@ -1180,7 +1180,7 @@ static void mc32_rx_ring(struct net_device *dev)
|
||||
}
|
||||
else
|
||||
{
|
||||
skb=dev_alloc_skb(length+2);
|
||||
skb = netdev_alloc_skb(dev, length + 2);
|
||||
|
||||
if(skb==NULL) {
|
||||
dev->stats.rx_dropped++;
|
||||
|
@ -549,14 +549,13 @@ static inline int init_rx_bufs(struct net_device *dev)
|
||||
/* First build the Receive Buffer Descriptor List */
|
||||
|
||||
for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
|
||||
struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
|
||||
struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
|
||||
|
||||
if (skb == NULL) {
|
||||
remove_rx_bufs(dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
skb->dev = dev;
|
||||
rbd->v_next = rbd+1;
|
||||
rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
|
||||
rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
|
||||
@ -810,7 +809,7 @@ static inline int i596_rx(struct net_device *dev)
|
||||
struct sk_buff *newskb;
|
||||
|
||||
/* Get fresh skbuff to replace filled one. */
|
||||
newskb = dev_alloc_skb(PKT_BUF_SZ);
|
||||
newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
|
||||
if (newskb == NULL) {
|
||||
skb = NULL; /* drop pkt */
|
||||
goto memory_squeeze;
|
||||
@ -819,7 +818,6 @@ static inline int i596_rx(struct net_device *dev)
|
||||
skb_put(skb, pkt_len);
|
||||
rx_in_place = 1;
|
||||
rbd->skb = newskb;
|
||||
newskb->dev = dev;
|
||||
rbd->v_data = newskb->data;
|
||||
rbd->b_data = WSWAPchar(virt_to_bus(newskb->data));
|
||||
#ifdef __mc68000__
|
||||
@ -827,7 +825,7 @@ static inline int i596_rx(struct net_device *dev)
|
||||
#endif
|
||||
}
|
||||
else
|
||||
skb = dev_alloc_skb(pkt_len + 2);
|
||||
skb = netdev_alloc_skb(dev, pkt_len + 2);
|
||||
memory_squeeze:
|
||||
if (skb == NULL) {
|
||||
/* XXX tulip.c can defer packets here!! */
|
||||
|
@ -1563,7 +1563,7 @@ eepro_rx(struct net_device *dev)
|
||||
|
||||
dev->stats.rx_bytes+=rcv_size;
|
||||
rcv_size &= 0x3fff;
|
||||
skb = dev_alloc_skb(rcv_size+5);
|
||||
skb = netdev_alloc_skb(dev, rcv_size + 5);
|
||||
if (skb == NULL) {
|
||||
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
|
||||
dev->stats.rx_dropped++;
|
||||
|
@ -955,7 +955,7 @@ static void eexp_hw_rx_pio(struct net_device *dev)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
pkt_len &= 0x3fff;
|
||||
skb = dev_alloc_skb(pkt_len+16);
|
||||
skb = netdev_alloc_skb(dev, pkt_len + 16);
|
||||
if (skb == NULL)
|
||||
{
|
||||
printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name);
|
||||
|
@ -867,7 +867,7 @@ ether1_recv_done (struct net_device *dev)
|
||||
struct sk_buff *skb;
|
||||
|
||||
length = (length + 1) & ~1;
|
||||
skb = dev_alloc_skb (length + 2);
|
||||
skb = netdev_alloc_skb(dev, length + 2);
|
||||
|
||||
if (skb) {
|
||||
skb_reserve (skb, 2);
|
||||
|
@ -656,7 +656,7 @@ i596_rx_one(struct net_device *dev, struct i596_private *lp,
|
||||
if (rfd->stat & RFD_STAT_OK) {
|
||||
/* a good frame */
|
||||
int pkt_len = (rfd->count & 0x3fff);
|
||||
struct sk_buff *skb = dev_alloc_skb(pkt_len);
|
||||
struct sk_buff *skb = netdev_alloc_skb(dev, pkt_len);
|
||||
|
||||
(*frames)++;
|
||||
|
||||
|
@ -964,7 +964,7 @@ static void ni52_rcv_int(struct net_device *dev)
|
||||
/* the first and the last buffer? */
|
||||
totlen &= RBD_MASK; /* length of this frame */
|
||||
writew(0x00, &rbd->status);
|
||||
skb = (struct sk_buff *)dev_alloc_skb(totlen+2);
|
||||
skb = netdev_alloc_skb(dev, totlen + 2);
|
||||
if (skb != NULL) {
|
||||
skb_reserve(skb, 2);
|
||||
skb_put(skb, totlen);
|
||||
|
@ -778,7 +778,7 @@ static void sun3_82586_rcv_int(struct net_device *dev)
|
||||
{
|
||||
totlen &= RBD_MASK; /* length of this frame */
|
||||
rbd->status = 0;
|
||||
skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
|
||||
skb = netdev_alloc_skb(dev, totlen + 2);
|
||||
if(skb != NULL)
|
||||
{
|
||||
skb_reserve(skb,2);
|
||||
|
@ -762,7 +762,7 @@ static void znet_rx(struct net_device *dev)
|
||||
/* Malloc up new buffer. */
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = dev_alloc_skb(pkt_len);
|
||||
skb = netdev_alloc_skb(dev, pkt_len);
|
||||
if (skb == NULL) {
|
||||
if (znet_debug)
|
||||
printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
|
||||
|
@ -744,9 +744,6 @@ static int ipg_get_rxbuff(struct net_device *dev, int entry)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Associate the receive buffer with the IPG NIC. */
|
||||
skb->dev = dev;
|
||||
|
||||
/* Save the address of the sk_buff structure. */
|
||||
sp->rx_buff[entry] = skb;
|
||||
|
||||
|
@ -114,7 +114,7 @@ struct ltq_etop_priv {
|
||||
static int
|
||||
ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
|
||||
{
|
||||
ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
|
||||
ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
|
||||
if (!ch->skb[ch->dma.desc])
|
||||
return -ENOMEM;
|
||||
ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
|
||||
|
@ -667,7 +667,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
|
||||
|
||||
skb = __skb_dequeue(&mp->rx_recycle);
|
||||
if (skb == NULL)
|
||||
skb = dev_alloc_skb(mp->skb_size);
|
||||
skb = netdev_alloc_skb(mp->dev, mp->skb_size);
|
||||
|
||||
if (skb == NULL) {
|
||||
mp->oom = 1;
|
||||
|
@ -350,7 +350,7 @@ static void rxq_refill(struct net_device *dev)
|
||||
while (pep->rx_desc_count < pep->rx_ring_size) {
|
||||
int size;
|
||||
|
||||
skb = dev_alloc_skb(pep->skb_size);
|
||||
skb = netdev_alloc_skb(dev, pep->skb_size);
|
||||
if (!skb)
|
||||
break;
|
||||
if (SKB_DMA_REALIGN)
|
||||
|
@ -463,12 +463,11 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
|
||||
int used_frags;
|
||||
dma_addr_t dma;
|
||||
|
||||
skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
|
||||
skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
|
||||
if (!skb) {
|
||||
en_dbg(RX_ERR, priv, "Failed allocating skb\n");
|
||||
return NULL;
|
||||
}
|
||||
skb->dev = priv->dev;
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
skb->len = length;
|
||||
|
||||
|
@ -278,7 +278,8 @@ ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
|
||||
|
||||
for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
|
||||
if (!ksp->rx_buffers[buff_n].skb) {
|
||||
struct sk_buff *skb = dev_alloc_skb(MAX_RXBUF_SIZE);
|
||||
struct sk_buff *skb =
|
||||
netdev_alloc_skb(ksp->ndev, MAX_RXBUF_SIZE);
|
||||
dma_addr_t mapping;
|
||||
|
||||
ksp->rx_buffers[buff_n].skb = skb;
|
||||
@ -299,7 +300,6 @@ ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
|
||||
break;
|
||||
}
|
||||
ksp->rx_buffers[buff_n].dma_ptr = mapping;
|
||||
skb->dev = ksp->ndev;
|
||||
ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;
|
||||
|
||||
/* Record this into the DMA ring */
|
||||
|
@ -796,7 +796,7 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
|
||||
|
||||
frame_hdr = ks->frame_head_info;
|
||||
while (ks->frame_cnt--) {
|
||||
skb = dev_alloc_skb(frame_hdr->len + 16);
|
||||
skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
|
||||
if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
|
||||
(frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
|
||||
skb_reserve(skb, 2);
|
||||
|
@ -4863,7 +4863,7 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
memset(&skb->data[skb->len], 0, 50 - skb->len);
|
||||
skb->len = 50;
|
||||
} else {
|
||||
skb = dev_alloc_skb(50);
|
||||
skb = netdev_alloc_skb(dev, 50);
|
||||
if (!skb)
|
||||
return NETDEV_TX_BUSY;
|
||||
memcpy(skb->data, org_skb->data, org_skb->len);
|
||||
@ -4885,7 +4885,7 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
(ETH_P_IPV6 == htons(skb->protocol)))) {
|
||||
struct sk_buff *org_skb = skb;
|
||||
|
||||
skb = dev_alloc_skb(org_skb->len);
|
||||
skb = netdev_alloc_skb(dev, org_skb->len);
|
||||
if (!skb) {
|
||||
rc = NETDEV_TX_BUSY;
|
||||
goto unlock;
|
||||
@ -5019,7 +5019,7 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
|
||||
|
||||
do {
|
||||
/* skb->data != skb->head */
|
||||
skb = dev_alloc_skb(packet_len + 2);
|
||||
skb = netdev_alloc_skb(dev, packet_len + 2);
|
||||
if (!skb) {
|
||||
dev->stats.rx_dropped++;
|
||||
return -ENOMEM;
|
||||
|
@ -954,14 +954,13 @@ static void enc28j60_hw_rx(struct net_device *ndev)
|
||||
if (len > MAX_FRAMELEN)
|
||||
ndev->stats.rx_over_errors++;
|
||||
} else {
|
||||
skb = dev_alloc_skb(len + NET_IP_ALIGN);
|
||||
skb = netdev_alloc_skb(ndev, len + NET_IP_ALIGN);
|
||||
if (!skb) {
|
||||
if (netif_msg_rx_err(priv))
|
||||
dev_err(&ndev->dev,
|
||||
"out of memory for Rx'd frame\n");
|
||||
ndev->stats.rx_dropped++;
|
||||
} else {
|
||||
skb->dev = ndev;
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
/* copy the packet from the receive buffer */
|
||||
enc28j60_mem_read(priv,
|
||||
|
@ -152,7 +152,7 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t len)
|
||||
if (!len)
|
||||
return len;
|
||||
|
||||
skb = dev_alloc_skb(len + NET_IP_ALIGN);
|
||||
skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
|
||||
if (!skb) {
|
||||
dev->stats.rx_dropped++;
|
||||
return -ENOMEM;
|
||||
|
@ -589,7 +589,7 @@ static void irqrx_handler(struct net_device *dev)
|
||||
|
||||
/* fetch buffer */
|
||||
|
||||
skb = dev_alloc_skb(rda.length + 2);
|
||||
skb = netdev_alloc_skb(dev, rda.length + 2);
|
||||
if (skb == NULL)
|
||||
dev->stats.rx_dropped++;
|
||||
else {
|
||||
|
@ -1934,11 +1934,10 @@ static void refill_rx(struct net_device *dev)
|
||||
int entry = np->dirty_rx % RX_RING_SIZE;
|
||||
if (np->rx_skbuff[entry] == NULL) {
|
||||
unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
|
||||
skb = dev_alloc_skb(buflen);
|
||||
skb = netdev_alloc_skb(dev, buflen);
|
||||
np->rx_skbuff[entry] = skb;
|
||||
if (skb == NULL)
|
||||
break; /* Better luck next round. */
|
||||
skb->dev = dev; /* Mark as being used by this device. */
|
||||
np->rx_dma[entry] = pci_map_single(np->pci_dev,
|
||||
skb->data, buflen, PCI_DMA_FROMDEVICE);
|
||||
np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
|
||||
@ -2344,7 +2343,7 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
|
||||
/* Check if the packet is long enough to accept
|
||||
* without copying to a minimally-sized skbuff. */
|
||||
if (pkt_len < rx_copybreak &&
|
||||
(skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) {
|
||||
(skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
|
||||
/* 16 byte align the IP header */
|
||||
skb_reserve(skb, RX_OFFSET);
|
||||
pci_dma_sync_single_for_cpu(np->pci_dev,
|
||||
|
@ -51,7 +51,7 @@ static int sonic_open(struct net_device *dev)
|
||||
printk("sonic_open: initializing sonic driver.\n");
|
||||
|
||||
for (i = 0; i < SONIC_NUM_RRS; i++) {
|
||||
struct sk_buff *skb = dev_alloc_skb(SONIC_RBSIZE + 2);
|
||||
struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
|
||||
if (skb == NULL) {
|
||||
while(i > 0) { /* free any that were allocated successfully */
|
||||
i--;
|
||||
@ -422,7 +422,7 @@ static void sonic_rx(struct net_device *dev)
|
||||
status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
|
||||
if (status & SONIC_RCR_PRX) {
|
||||
/* Malloc up new buffer. */
|
||||
new_skb = dev_alloc_skb(SONIC_RBSIZE + 2);
|
||||
new_skb = netdev_alloc_skb(SONIC_RBSIZE + 2);
|
||||
if (new_skb == NULL) {
|
||||
printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
|
||||
lp->stats.rx_dropped++;
|
||||
|
@ -2524,7 +2524,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
|
||||
size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
|
||||
|
||||
/* allocate skb */
|
||||
skb = dev_alloc_skb(size);
|
||||
skb = netdev_alloc_skb(nic->dev, size);
|
||||
if (!skb) {
|
||||
DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
|
||||
ring->dev->name);
|
||||
@ -6820,7 +6820,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
|
||||
*/
|
||||
rxdp1->Buffer0_ptr = *temp0;
|
||||
} else {
|
||||
*skb = dev_alloc_skb(size);
|
||||
*skb = netdev_alloc_skb(dev, size);
|
||||
if (!(*skb)) {
|
||||
DBG_PRINT(INFO_DBG,
|
||||
"%s: Out of memory to allocate %s\n",
|
||||
@ -6849,7 +6849,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
|
||||
rxdp3->Buffer0_ptr = *temp0;
|
||||
rxdp3->Buffer1_ptr = *temp1;
|
||||
} else {
|
||||
*skb = dev_alloc_skb(size);
|
||||
*skb = netdev_alloc_skb(dev, size);
|
||||
if (!(*skb)) {
|
||||
DBG_PRINT(INFO_DBG,
|
||||
"%s: Out of memory to allocate %s\n",
|
||||
|
Loading…
Reference in New Issue
Block a user