mtd: nand: denali: skip driver internal bounce buffer when possible

For ecc->read_page() and ecc->write_page(), it is possible to call
dma_map_single() against the given buffer.  This bypasses the driver
internal bounce buffer and save the memcpy().

Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
This commit is contained in:
Masahiro Yamada 2017-06-13 22:45:47 +09:00 committed by Boris Brezillon
parent 57a4d8b5f6
commit 997cde2a22

View File

@ -600,12 +600,16 @@ static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
size_t size, int page, int raw, int write)
{
dma_addr_t dma_addr = denali->dma_addr;
dma_addr_t dma_addr;
uint32_t irq_mask, irq_status, ecc_err_mask;
enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
int ret = 0;
dma_sync_single_for_device(denali->dev, dma_addr, size, dir);
dma_addr = dma_map_single(denali->dev, buf, size, dir);
if (dma_mapping_error(denali->dev, dma_addr)) {
dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
return denali_pio_xfer(denali, buf, size, page, raw, write);
}
if (write) {
/*
@ -636,7 +640,7 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
ret = -EBADMSG;
denali_enable_dma(denali, false);
dma_sync_single_for_cpu(denali->dev, dma_addr, size, dir);
dma_unmap_single(denali->dev, dma_addr, size, dir);
if (irq_status & INTR__ERASED_PAGE)
memset(buf, 0xff, size);
@ -822,12 +826,10 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
int stat = 0;
int ret;
ret = denali_data_xfer(denali, denali->buf, mtd->writesize, page, 0, 0);
ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
if (ret && ret != -EBADMSG)
return ret;
memcpy(buf, denali->buf, mtd->writesize);
if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
else if (ret == -EBADMSG)
@ -931,10 +933,8 @@ static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
memcpy(denali->buf, buf, mtd->writesize);
return denali_data_xfer(denali, denali->buf, mtd->writesize, page,
0, 1);
return denali_data_xfer(denali, (void *)buf, mtd->writesize,
page, 0, 1);
}
static void denali_select_chip(struct mtd_info *mtd, int chip)
@ -1374,14 +1374,8 @@ int denali_init(struct denali_nand_info *denali)
}
if (denali->dma_avail) {
denali->dma_addr = dma_map_single(denali->dev, denali->buf,
mtd->writesize + mtd->oobsize,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(denali->dev, denali->dma_addr)) {
dev_info(denali->dev,
"Failed to map DMA buffer. Disabling DMA.\n");
denali->dma_avail = 0;
};
chip->options |= NAND_USE_BOUNCE_BUFFER;
chip->buf_align = 16;
}
/*
@ -1471,16 +1465,8 @@ EXPORT_SYMBOL(denali_init);
void denali_remove(struct denali_nand_info *denali)
{
struct mtd_info *mtd = nand_to_mtd(&denali->nand);
/*
* Pre-compute DMA buffer size to avoid any problems in case
* nand_release() ever changes in a way that mtd->writesize and
* mtd->oobsize are not reliable after this call.
*/
int bufsize = mtd->writesize + mtd->oobsize;
nand_release(mtd);
denali_disable_irq(denali);
dma_unmap_single(denali->dev, denali->dma_addr, bufsize,
DMA_BIDIRECTIONAL);
}
EXPORT_SYMBOL(denali_remove);