2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-24 21:24:00 +08:00

myri10ge: check for DMA mapping errors

On IOMMU systems DMA mapping can fail, we need to check for
that possibility.

Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Stanislaw Gruszka 2014-08-12 10:35:19 +02:00 committed by David S. Miller
parent 3791b3f6fb
commit 10545937e8

View File

@ -873,6 +873,10 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
return -ENOMEM;
dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) {
__free_page(dmatest_page);
return -ENOMEM;
}
/* Run a small DMA test.
* The magic multipliers to the length tell the firmware
@ -1294,6 +1298,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
int bytes, int watchdog)
{
struct page *page;
dma_addr_t bus;
int idx;
#if MYRI10GE_ALLOC_SIZE > 4096
int end_offset;
@ -1318,11 +1323,21 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
rx->watchdog_needed = 1;
return;
}
bus = pci_map_page(mgp->pdev, page, 0,
MYRI10GE_ALLOC_SIZE,
PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
__free_pages(page, MYRI10GE_ALLOC_ORDER);
if (rx->fill_cnt - rx->cnt < 16)
rx->watchdog_needed = 1;
return;
}
rx->page = page;
rx->page_offset = 0;
rx->bus = pci_map_page(mgp->pdev, page, 0,
MYRI10GE_ALLOC_SIZE,
PCI_DMA_FROMDEVICE);
rx->bus = bus;
}
rx->info[idx].page = rx->page;
rx->info[idx].page_offset = rx->page_offset;
@ -2764,6 +2779,35 @@ myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
mb();
}
static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp,
struct myri10ge_tx_buf *tx, int idx)
{
unsigned int len;
int last_idx;
/* Free any DMA resources we've alloced and clear out the skb slot */
last_idx = (idx + 1) & tx->mask;
idx = tx->req & tx->mask;
do {
len = dma_unmap_len(&tx->info[idx], len);
if (len) {
if (tx->info[idx].skb != NULL)
pci_unmap_single(mgp->pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
else
pci_unmap_page(mgp->pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
dma_unmap_len_set(&tx->info[idx], len, 0);
tx->info[idx].skb = NULL;
}
idx = (idx + 1) & tx->mask;
} while (idx != last_idx);
}
/*
* Transmit a packet. We need to split the packet so that a single
* segment does not cross myri10ge->tx_boundary, so this makes segment
@ -2787,7 +2831,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
u32 low;
__be32 high_swapped;
unsigned int len;
int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
int idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
u16 pseudo_hdr_offset, cksum_offset, queue;
int cum_len, seglen, boundary, rdma_count;
u8 flags, odd_flag;
@ -2884,9 +2928,12 @@ again:
/* map the skb for DMA */
len = skb_headlen(skb);
bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
if (unlikely(pci_dma_mapping_error(mgp->pdev, bus)))
goto drop;
idx = tx->req & tx->mask;
tx->info[idx].skb = skb;
bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
dma_unmap_addr_set(&tx->info[idx], bus, bus);
dma_unmap_len_set(&tx->info[idx], len, len);
@ -2985,12 +3032,16 @@ again:
break;
/* map next fragment for DMA */
idx = (count + tx->req) & tx->mask;
frag = &skb_shinfo(skb)->frags[frag_idx];
frag_idx++;
len = skb_frag_size(frag);
bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
myri10ge_unmap_tx_dma(mgp, tx, idx);
goto drop;
}
idx = (count + tx->req) & tx->mask;
dma_unmap_addr_set(&tx->info[idx], bus, bus);
dma_unmap_len_set(&tx->info[idx], len, len);
}
@ -3021,31 +3072,8 @@ again:
return NETDEV_TX_OK;
abort_linearize:
/* Free any DMA resources we've alloced and clear out the skb
* slot so as to not trip up assertions, and to avoid a
* double-free if linearizing fails */
myri10ge_unmap_tx_dma(mgp, tx, idx);
last_idx = (idx + 1) & tx->mask;
idx = tx->req & tx->mask;
tx->info[idx].skb = NULL;
do {
len = dma_unmap_len(&tx->info[idx], len);
if (len) {
if (tx->info[idx].skb != NULL)
pci_unmap_single(mgp->pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
else
pci_unmap_page(mgp->pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
dma_unmap_len_set(&tx->info[idx], len, 0);
tx->info[idx].skb = NULL;
}
idx = (idx + 1) & tx->mask;
} while (idx != last_idx);
if (skb_is_gso(skb)) {
netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
goto drop;