net: use DMA_x_DEVICE and dma_mapping_error with skb_frag_dma_map

When I converted some drivers from pci_map_page to skb_frag_dma_map I
neglected to convert PCI_DMA_xDEVICE into DMA_x_DEVICE and
pci_dma_mapping_error into dma_mapping_error.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ian Campbell 2011-10-06 11:10:48 +01:00 committed by David S. Miller
parent 27737aa3a9
commit 5d6bcdfe38
29 changed files with 43 additions and 44 deletions

View File

@ -803,8 +803,7 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
maplen = frag->size;
mapaddr = skb_frag_dma_map(&c2dev->pcidev->dev, frag,
0, maplen,
PCI_DMA_TODEVICE);
0, maplen, DMA_TO_DEVICE);
elem = elem->next;
elem->skb = NULL;
elem->mapaddr = mapaddr;

View File

@ -445,7 +445,7 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
&skb_shinfo(skb)->frags[skb_fragment_index];
bus_address = skb_frag_dma_map(&nesdev->pcidev->dev,
frag, 0, frag->size,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
wqe_fragment_length[wqe_fragment_index] =
cpu_to_le16(skb_shinfo(skb)->frags[skb_fragment_index].size);
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
@ -566,7 +566,7 @@ tso_sq_no_longer_full:
tso_bus_address[tso_frag_count] =
skb_frag_dma_map(&nesdev->pcidev->dev,
frag, 0, frag->size,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
}
tso_frag_index = 0;

View File

@ -2487,7 +2487,7 @@ restart:
mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0,
frag->size,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
flagsize = (frag->size << 16);
if (skb->ip_summed == CHECKSUM_PARTIAL)

View File

@ -2183,7 +2183,7 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
frag, 0,
buffer_info->length,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
ATL1C_PCIMAP_TODEVICE);

View File

@ -1769,7 +1769,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
frag,
(i * MAX_TX_BUF_LEN),
tx_buffer->length,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |

View File

@ -2285,7 +2285,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buf_len -= buffer_info->length;
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
frag, i * ATL1_MAX_TX_BUF_LEN,
buffer_info->length, PCI_DMA_TODEVICE);
buffer_info->length, DMA_TO_DEVICE);
if (++next_to_use == tpd_ring->count)
next_to_use = 0;

View File

@ -6539,7 +6539,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = frag->size;
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping))
goto dma_error;
dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,

View File

@ -6779,12 +6779,12 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = frag->size;
mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
len, PCI_DMA_TODEVICE);
len, DMA_TO_DEVICE);
tnapi->tx_buffers[entry].skb = NULL;
dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
mapping);
if (pci_dma_mapping_error(tp->pdev, mapping))
if (dma_mapping_error(&tp->pdev->dev, mapping))
goto dma_error;
if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,

View File

@ -1278,7 +1278,7 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
}
mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
frag->size, PCI_DMA_TODEVICE);
frag->size, DMA_TO_DEVICE);
desc_mapping = mapping;
desc_len = frag->size;

View File

@ -980,7 +980,7 @@ static inline unsigned int make_sgl(const struct sk_buff *skb,
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
sgp->len[j] = cpu_to_be32(frag->size);
sgp->addr[j] = cpu_to_be64(mapping);
j ^= 1;

View File

@ -607,7 +607,7 @@ static inline void enic_queue_wq_skb_cont(struct enic *enic,
enic_queue_wq_desc_cont(wq, skb,
skb_frag_dma_map(&enic->pdev->dev,
frag, 0, frag->size,
PCI_DMA_TODEVICE),
DMA_TO_DEVICE),
frag->size,
(len_left == 0), /* EOP? */
loopback);
@ -726,7 +726,7 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
(unsigned int)WQ_ENET_MAX_DESC_LEN);
dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
offset, len,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
enic_queue_wq_desc_cont(wq, skb,
dma_addr,
len,

View File

@ -2777,7 +2777,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
frag->size, PCI_DMA_TODEVICE);
frag->size, DMA_TO_DEVICE);
e = e->next;
e->skb = skb;

View File

@ -1229,9 +1229,9 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0,
frag->size,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
if (pci_dma_mapping_error(pdev, re->frag_addr[i]))
if (dma_mapping_error(&pdev->dev, re->frag_addr[i]))
goto map_page_error;
}
return 0;
@ -1936,9 +1936,9 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
frag->size, PCI_DMA_TODEVICE);
frag->size, DMA_TO_DEVICE);
if (pci_dma_mapping_error(hw->pdev, mapping))
if (dma_mapping_error(&hw->pdev->dev, mapping))
goto mapping_unwind;
upper = upper_32_bits(mapping);

View File

@ -2928,7 +2928,7 @@ again:
frag_idx++;
len = frag->size;
bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
dma_unmap_addr_set(&tx->info[idx], bus, bus);
dma_unmap_len_set(&tx->info[idx], len, len);
}

View File

@ -1161,7 +1161,7 @@ again:
break;
buf = skb_frag_dma_map(&dev->pci_dev->dev, frag, 0,
frag->size, PCI_DMA_TODEVICE);
frag->size, DMA_TO_DEVICE);
dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n",
(long long)buf, (long) page_to_pfn(frag->page),
frag->page_offset);

View File

@ -4193,7 +4193,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
frag, 0,
frag->size,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
if (offload_type == SKB_GSO_UDP)
txdp->Control_1 |= TXD_UFO_EN;

View File

@ -2150,7 +2150,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
&np->pci_dev->dev,
frag, offset,
bcnt,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 0;
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
@ -2264,7 +2264,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
&np->pci_dev->dev,
frag, offset,
bcnt,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 0;
put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));

View File

@ -1506,9 +1506,9 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
map[i + 1] = skb_frag_dma_map(&mac->dma_pdev->dev, frag, 0,
frag->size, PCI_DMA_TODEVICE);
frag->size, DMA_TO_DEVICE);
map_size[i+1] = frag->size;
if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) {
if (dma_mapping_error(&mac->dma_pdev->dev, map[i + 1])) {
nfrags = i;
goto out_err_nolock;
}

View File

@ -1906,8 +1906,8 @@ netxen_map_tx_skb(struct pci_dev *pdev,
nf = &pbuf->frag_array[i+1];
map = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, map))
DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, map))
goto unwind;
nf->dma = map;

View File

@ -2389,9 +2389,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
}
map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, frag->size,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
err = pci_dma_mapping_error(qdev->pdev, map);
err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping frags failed with error: %d\n",

View File

@ -2136,8 +2136,8 @@ qlcnic_map_tx_skb(struct pci_dev *pdev,
nf = &pbuf->frag_array[i+1];
map = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, map))
DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, map))
goto unwind;
nf->dma = map;

View File

@ -1432,9 +1432,9 @@ static int ql_map_send(struct ql_adapter *qdev,
}
map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, frag->size,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
err = pci_dma_mapping_error(qdev->pdev, map);
err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping frags failed with error: %d.\n",

View File

@ -243,7 +243,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Map for DMA */
unmap_single = false;
dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
}
/* Transfer ownership of the skb to the final buffer */
@ -926,8 +926,8 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
skb_frag_t *frag)
{
st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
frag->size, PCI_DMA_TODEVICE);
if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
frag->size, DMA_TO_DEVICE);
if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
st->unmap_single = false;
st->unmap_len = frag->size;
st->in_len = frag->size;

View File

@ -2830,7 +2830,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
len = fragp->size;
mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
tabort = cas_calc_tabort(cp, fragp->page_offset, len);
if (unlikely(tabort)) {

View File

@ -1072,7 +1072,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
len = this_frag->size;
mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag,
0, len, PCI_DMA_TODEVICE);
0, len, DMA_TO_DEVICE);
this_ctrl = ctrl;
if (frag == skb_shinfo(skb)->nr_frags - 1)
this_ctrl |= TXDCTRL_EOF;

View File

@ -1499,7 +1499,7 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
db->wptr->len = frag->size;
db->wptr->addr.dma = skb_frag_dma_map(&priv->pdev->dev, frag,
0, frag->size,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
pbl++;
pbl->len = CPU_CHIP_SWAP32(db->wptr->len);

View File

@ -2559,7 +2559,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev,
frag, 0,
frag->size,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
td_ptr->td_buf[i + 1].pa_high = 0;

View File

@ -750,7 +750,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
tbi->map_type = VMXNET3_MAP_PAGE;
tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
0, frag->size,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
tbi->len = frag->size;

View File

@ -524,7 +524,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
&frags[i - 1],
0,
frags[i - 1].size,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
}
}