mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 17:23:55 +08:00
dmaengine fixes for 4.4
Late fixes for 4.4 are three fixes for drivers which include a revert of mic-x100 fix which is causing regression, xgene fix for double IRQ and async_tx fix to use GFP_NOWAIT -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJWj7XNAAoJEHwUBw8lI4NHOEEQAIj9YoQjkHkPo2PviIeiX3Zy HNAIM0GepYSZrJO+duKAzFxK4mEGC92gpyOiv6NJ91RdC2ijYc9gb6Z7ig/g4znq odqMGFIdBEqZAq1PhN81BtJqdkw4EmzffS/GaEol0MM85Hvm5r91GfOx86AKs0HW emOJL5XXG8/byNY7xihCl+55u9l1E5c7+K8QNj3++bgKK07KIor4qtxBNUqejQUj /eFdi8Z1SFOHHDvdYKGVRRm70uq6BqfYcsLfLcgd7uEDkvz0Zcy7SF11hY67z8QU ood0eyyuBhuZjxzZN+5No9UkRf2BKfjXUex/wr2AnuqF2xYcGYC76tFKeAUbOc6s YF+SdfDoQVys3o6EhYj6hj9s1viepu41tpnOB8ldBE2B/Gyn0iq/ipj05rsMKIRp ivefoSb7KG1O6aesJ/n7vUBY8VWiE3KBcRYoIotW5jNQ83Jb/e40ougpshzxeWYR 08PekcG+CAUlnfaF8f0+c3xuo2Q23MMpf6e3XqlNxCGBpwIn8NEtLw1qcLdKI9zG x6zguGxEZDIoGYC0kjmP+PNAjrJmNVHutcJODgK73H4zNvY1O6Kg+dOBLrwkqXh5 Um6RRDzOcEWsGMHg0akPHWaaAt7slCvtTXLMtzMysHR1mkihqAbzq6Zr0QoGjekz j20fC8o8bArX9cCDO71F =jYJY -----END PGP SIGNATURE----- Merge tag 'dmaengine-fix-4.4' of git://git.infradead.org/users/vkoul/slave-dma Pull dmaengine fixes from Vinod Koul: "Late fixes for 4.4 are three fixes for drivers which include a revert of mic-x100 fix which is causing regression, xgene fix for double IRQ and async_tx fix to use GFP_NOWAIT" * tag 'dmaengine-fix-4.4' of git://git.infradead.org/users/vkoul/slave-dma: dmaengine: xgene-dma: Fix double IRQ issue by setting IRQ_DISABLE_UNLAZY flag async_tx: use GFP_NOWAIT rather than GFP_IO dmaengine: Revert "dmaengine: mic_x100: add missing spin_unlock"
This commit is contained in:
commit
212c7f66ec
@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
|
||||
struct dmaengine_unmap_data *unmap = NULL;
|
||||
|
||||
if (device)
|
||||
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
|
||||
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
|
||||
|
||||
if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
|
||||
unsigned long dma_prep_flags = 0;
|
||||
|
@ -188,7 +188,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
|
||||
BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
|
||||
|
||||
if (device)
|
||||
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
|
||||
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
|
||||
|
||||
/* XORing P/Q is only implemented in software */
|
||||
if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
|
||||
@ -307,7 +307,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
|
||||
BUG_ON(disks < 4);
|
||||
|
||||
if (device)
|
||||
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
|
||||
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
|
||||
|
||||
if (unmap && disks <= dma_maxpq(device, 0) &&
|
||||
is_dma_pq_aligned(device, offset, 0, len)) {
|
||||
|
@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
|
||||
u8 *a, *b, *c;
|
||||
|
||||
if (dma)
|
||||
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
|
||||
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
|
||||
|
||||
if (unmap) {
|
||||
struct device *dev = dma->dev;
|
||||
@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
|
||||
u8 *d, *s;
|
||||
|
||||
if (dma)
|
||||
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
|
||||
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
|
||||
|
||||
if (unmap) {
|
||||
dma_addr_t dma_dest[2];
|
||||
|
@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
|
||||
BUG_ON(src_cnt <= 1);
|
||||
|
||||
if (device)
|
||||
unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO);
|
||||
unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT);
|
||||
|
||||
if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
|
||||
BUG_ON(src_cnt <= 1);
|
||||
|
||||
if (device)
|
||||
unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO);
|
||||
unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT);
|
||||
|
||||
if (unmap && src_cnt <= device->max_xor &&
|
||||
is_dma_xor_aligned(device, offset, 0, len)) {
|
||||
|
@ -317,7 +317,6 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
|
||||
struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
|
||||
struct device *dev = mic_dma_ch_to_device(mic_ch);
|
||||
int result;
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
|
||||
if (!len && !flags)
|
||||
return NULL;
|
||||
@ -325,13 +324,10 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
|
||||
spin_lock(&mic_ch->prep_lock);
|
||||
result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len);
|
||||
if (result >= 0)
|
||||
tx = allocate_tx(mic_ch);
|
||||
|
||||
if (!tx)
|
||||
dev_err(dev, "Error enqueueing dma, error=%d\n", result);
|
||||
|
||||
return allocate_tx(mic_ch);
|
||||
dev_err(dev, "Error enqueueing dma, error=%d\n", result);
|
||||
spin_unlock(&mic_ch->prep_lock);
|
||||
return tx;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
@ -339,14 +335,13 @@ mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags)
|
||||
{
|
||||
struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
|
||||
int ret;
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
|
||||
spin_lock(&mic_ch->prep_lock);
|
||||
ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
|
||||
if (!ret)
|
||||
tx = allocate_tx(mic_ch);
|
||||
return allocate_tx(mic_ch);
|
||||
spin_unlock(&mic_ch->prep_lock);
|
||||
return tx;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Return the status of the transaction */
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
|
||||
@ -1610,6 +1611,7 @@ static int xgene_dma_request_irqs(struct xgene_dma *pdma)
|
||||
/* Register DMA channel rx irq */
|
||||
for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
|
||||
chan = &pdma->chan[i];
|
||||
irq_set_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
|
||||
ret = devm_request_irq(chan->dev, chan->rx_irq,
|
||||
xgene_dma_chan_ring_isr,
|
||||
0, chan->name, chan);
|
||||
@ -1620,6 +1622,7 @@ static int xgene_dma_request_irqs(struct xgene_dma *pdma)
|
||||
|
||||
for (j = 0; j < i; j++) {
|
||||
chan = &pdma->chan[i];
|
||||
irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
|
||||
devm_free_irq(chan->dev, chan->rx_irq, chan);
|
||||
}
|
||||
|
||||
@ -1640,6 +1643,7 @@ static void xgene_dma_free_irqs(struct xgene_dma *pdma)
|
||||
|
||||
for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
|
||||
chan = &pdma->chan[i];
|
||||
irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
|
||||
devm_free_irq(chan->dev, chan->rx_irq, chan);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user