2008-02-14 15:48:23 +08:00
|
|
|
/*
|
|
|
|
* drivers/mtd/nand/pxa3xx_nand.c
|
|
|
|
*
|
|
|
|
* Copyright © 2005 Intel Corporation
|
|
|
|
* Copyright © 2006 Marvell International Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
2013-11-07 23:17:10 +08:00
|
|
|
*
|
|
|
|
* See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
|
2008-02-14 15:48:23 +08:00
|
|
|
*/
|
|
|
|
|
2009-09-11 19:33:58 +08:00
|
|
|
#include <linux/kernel.h>
|
2008-02-14 15:48:23 +08:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/platform_device.h>
|
2015-09-06 21:12:47 +08:00
|
|
|
#include <linux/dmaengine.h>
|
2008-02-14 15:48:23 +08:00
|
|
|
#include <linux/dma-mapping.h>
|
2015-09-06 21:12:47 +08:00
|
|
|
#include <linux/dma/pxa-dma.h>
|
2008-02-14 15:48:23 +08:00
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/mtd/mtd.h>
|
|
|
|
#include <linux/mtd/nand.h>
|
|
|
|
#include <linux/mtd/partitions.h>
|
2008-04-23 03:39:43 +08:00
|
|
|
#include <linux/io.h>
|
2015-04-07 21:32:45 +08:00
|
|
|
#include <linux/iopoll.h>
|
2008-04-23 03:39:43 +08:00
|
|
|
#include <linux/irq.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2012-07-23 01:51:02 +08:00
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/of_device.h>
|
2013-11-15 05:25:28 +08:00
|
|
|
#include <linux/of_mtd.h>
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2015-05-01 04:17:47 +08:00
|
|
|
#if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
|
2013-08-13 01:14:56 +08:00
|
|
|
#define ARCH_HAS_DMA
|
|
|
|
#endif
|
|
|
|
|
2012-08-24 21:16:48 +08:00
|
|
|
#include <linux/platform_data/mtd-nand-pxa3xx.h>
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2015-02-02 00:55:37 +08:00
|
|
|
#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
|
|
|
|
#define NAND_STOP_DELAY msecs_to_jiffies(40)
|
2011-02-28 10:32:13 +08:00
|
|
|
#define PAGE_CHUNK_SIZE (2048)
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2013-10-05 02:30:38 +08:00
|
|
|
/*
|
|
|
|
* Define a buffer size for the initial command that detects the flash device:
|
2015-08-03 22:31:26 +08:00
|
|
|
* STATUS, READID and PARAM.
|
|
|
|
* ONFI param page is 256 bytes, and there are three redundant copies
|
|
|
|
* to be read. JEDEC param page is 512 bytes, and there are also three
|
|
|
|
* redundant copies to be read.
|
|
|
|
* Hence this buffer should be at least 512 x 3. Let's pick 2048.
|
2013-10-05 02:30:38 +08:00
|
|
|
*/
|
2015-08-03 22:31:26 +08:00
|
|
|
#define INIT_BUFFER_SIZE 2048
|
2013-10-05 02:30:38 +08:00
|
|
|
|
2008-02-14 15:48:23 +08:00
|
|
|
/* registers and bit definitions */
|
|
|
|
#define NDCR (0x00) /* Control register */
|
|
|
|
#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
|
|
|
|
#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
|
|
|
|
#define NDSR (0x14) /* Status Register */
|
|
|
|
#define NDPCR (0x18) /* Page Count Register */
|
|
|
|
#define NDBDR0 (0x1C) /* Bad Block Register 0 */
|
|
|
|
#define NDBDR1 (0x20) /* Bad Block Register 1 */
|
2013-11-15 05:25:29 +08:00
|
|
|
#define NDECCCTRL (0x28) /* ECC control */
|
2008-02-14 15:48:23 +08:00
|
|
|
#define NDDB (0x40) /* Data Buffer */
|
|
|
|
#define NDCB0 (0x48) /* Command Buffer0 */
|
|
|
|
#define NDCB1 (0x4C) /* Command Buffer1 */
|
|
|
|
#define NDCB2 (0x50) /* Command Buffer2 */
|
|
|
|
|
|
|
|
#define NDCR_SPARE_EN (0x1 << 31)
|
|
|
|
#define NDCR_ECC_EN (0x1 << 30)
|
|
|
|
#define NDCR_DMA_EN (0x1 << 29)
|
|
|
|
#define NDCR_ND_RUN (0x1 << 28)
|
|
|
|
#define NDCR_DWIDTH_C (0x1 << 27)
|
|
|
|
#define NDCR_DWIDTH_M (0x1 << 26)
|
|
|
|
#define NDCR_PAGE_SZ (0x1 << 24)
|
|
|
|
#define NDCR_NCSX (0x1 << 23)
|
|
|
|
#define NDCR_ND_MODE (0x3 << 21)
|
|
|
|
#define NDCR_NAND_MODE (0x0)
|
|
|
|
#define NDCR_CLR_PG_CNT (0x1 << 20)
|
2015-09-29 04:56:51 +08:00
|
|
|
#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
|
|
|
|
#define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
|
2008-02-14 15:48:23 +08:00
|
|
|
#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
|
|
|
|
#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
|
|
|
|
|
|
|
|
#define NDCR_RA_START (0x1 << 15)
|
|
|
|
#define NDCR_PG_PER_BLK (0x1 << 14)
|
|
|
|
#define NDCR_ND_ARB_EN (0x1 << 12)
|
2011-02-28 10:32:11 +08:00
|
|
|
#define NDCR_INT_MASK (0xFFF)
|
2008-02-14 15:48:23 +08:00
|
|
|
|
|
|
|
#define NDSR_MASK (0xfff)
|
2013-11-15 05:25:39 +08:00
|
|
|
#define NDSR_ERR_CNT_OFF (16)
|
|
|
|
#define NDSR_ERR_CNT_MASK (0x1f)
|
|
|
|
#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
|
2011-02-28 10:32:11 +08:00
|
|
|
#define NDSR_RDY (0x1 << 12)
|
|
|
|
#define NDSR_FLASH_RDY (0x1 << 11)
|
2008-02-14 15:48:23 +08:00
|
|
|
#define NDSR_CS0_PAGED (0x1 << 10)
|
|
|
|
#define NDSR_CS1_PAGED (0x1 << 9)
|
|
|
|
#define NDSR_CS0_CMDD (0x1 << 8)
|
|
|
|
#define NDSR_CS1_CMDD (0x1 << 7)
|
|
|
|
#define NDSR_CS0_BBD (0x1 << 6)
|
|
|
|
#define NDSR_CS1_BBD (0x1 << 5)
|
2013-11-15 05:25:39 +08:00
|
|
|
#define NDSR_UNCORERR (0x1 << 4)
|
|
|
|
#define NDSR_CORERR (0x1 << 3)
|
2008-02-14 15:48:23 +08:00
|
|
|
#define NDSR_WRDREQ (0x1 << 2)
|
|
|
|
#define NDSR_RDDREQ (0x1 << 1)
|
|
|
|
#define NDSR_WRCMDREQ (0x1)
|
|
|
|
|
2013-08-13 01:14:51 +08:00
|
|
|
#define NDCB0_LEN_OVRD (0x1 << 28)
|
2011-02-28 10:32:13 +08:00
|
|
|
#define NDCB0_ST_ROW_EN (0x1 << 26)
|
2008-02-14 15:48:23 +08:00
|
|
|
#define NDCB0_AUTO_RS (0x1 << 25)
|
|
|
|
#define NDCB0_CSEL (0x1 << 24)
|
2013-11-15 05:25:37 +08:00
|
|
|
#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
|
|
|
|
#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
|
2008-02-14 15:48:23 +08:00
|
|
|
#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
|
|
|
|
#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
|
|
|
|
#define NDCB0_NC (0x1 << 20)
|
|
|
|
#define NDCB0_DBC (0x1 << 19)
|
|
|
|
#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
|
|
|
|
#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
|
|
|
|
#define NDCB0_CMD2_MASK (0xff << 8)
|
|
|
|
#define NDCB0_CMD1_MASK (0xff)
|
|
|
|
#define NDCB0_ADDR_CYC_SHIFT (16)
|
|
|
|
|
2013-11-15 05:25:37 +08:00
|
|
|
#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
|
|
|
|
#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
|
|
|
|
#define EXT_CMD_TYPE_READ 4 /* Read */
|
|
|
|
#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
|
|
|
|
#define EXT_CMD_TYPE_FINAL 3 /* Final command */
|
|
|
|
#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
|
|
|
|
#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
|
|
|
|
|
2015-08-20 06:40:09 +08:00
|
|
|
/*
|
|
|
|
* This should be large enough to read 'ONFI' and 'JEDEC'.
|
|
|
|
* Let's use 7 bytes, which is the maximum ID count supported
|
|
|
|
* by the controller (see NDCR_RD_ID_CNT_MASK).
|
|
|
|
*/
|
|
|
|
#define READ_ID_BYTES 7
|
|
|
|
|
2008-02-14 15:48:23 +08:00
|
|
|
/* macros for registers read/write */
|
|
|
|
#define nand_writel(info, off, val) \
|
2014-05-22 20:56:52 +08:00
|
|
|
writel_relaxed((val), (info)->mmio_base + (off))
|
2008-02-14 15:48:23 +08:00
|
|
|
|
|
|
|
#define nand_readl(info, off) \
|
2014-05-22 20:56:52 +08:00
|
|
|
readl_relaxed((info)->mmio_base + (off))
|
2008-02-14 15:48:23 +08:00
|
|
|
|
|
|
|
/* error code and state */
|
|
|
|
enum {
|
|
|
|
ERR_NONE = 0,
|
|
|
|
ERR_DMABUSERR = -1,
|
|
|
|
ERR_SENDCMD = -2,
|
2013-11-15 05:25:39 +08:00
|
|
|
ERR_UNCORERR = -3,
|
2008-02-14 15:48:23 +08:00
|
|
|
ERR_BBERR = -4,
|
2013-11-15 05:25:39 +08:00
|
|
|
ERR_CORERR = -5,
|
2008-02-14 15:48:23 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
2011-02-28 10:32:11 +08:00
|
|
|
STATE_IDLE = 0,
|
2011-07-15 11:44:32 +08:00
|
|
|
STATE_PREPARED,
|
2008-02-14 15:48:23 +08:00
|
|
|
STATE_CMD_HANDLE,
|
|
|
|
STATE_DMA_READING,
|
|
|
|
STATE_DMA_WRITING,
|
|
|
|
STATE_DMA_DONE,
|
|
|
|
STATE_PIO_READING,
|
|
|
|
STATE_PIO_WRITING,
|
2011-02-28 10:32:11 +08:00
|
|
|
STATE_CMD_DONE,
|
|
|
|
STATE_READY,
|
2008-02-14 15:48:23 +08:00
|
|
|
};
|
|
|
|
|
2013-08-11 03:34:52 +08:00
|
|
|
enum pxa3xx_nand_variant {
|
|
|
|
PXA3XX_NAND_VARIANT_PXA,
|
|
|
|
PXA3XX_NAND_VARIANT_ARMADA370,
|
|
|
|
};
|
|
|
|
|
2011-07-15 11:44:32 +08:00
|
|
|
struct pxa3xx_nand_host {
|
|
|
|
struct nand_chip chip;
|
|
|
|
struct mtd_info *mtd;
|
|
|
|
void *info_data;
|
|
|
|
|
|
|
|
/* page size of attached chip */
|
|
|
|
int use_ecc;
|
2011-07-15 11:44:33 +08:00
|
|
|
int cs;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2011-07-15 11:44:32 +08:00
|
|
|
/* calculated from pxa3xx_nand_flash data */
|
|
|
|
unsigned int col_addr_cycles;
|
|
|
|
unsigned int row_addr_cycles;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct pxa3xx_nand_info {
|
2011-02-28 10:32:14 +08:00
|
|
|
struct nand_hw_control controller;
|
2008-02-14 15:48:23 +08:00
|
|
|
struct platform_device *pdev;
|
|
|
|
|
|
|
|
struct clk *clk;
|
|
|
|
void __iomem *mmio_base;
|
2009-09-10 14:11:44 +08:00
|
|
|
unsigned long mmio_phys;
|
2013-11-15 05:25:26 +08:00
|
|
|
struct completion cmd_complete, dev_ready;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
|
|
|
unsigned int buf_start;
|
|
|
|
unsigned int buf_count;
|
2013-10-05 02:30:38 +08:00
|
|
|
unsigned int buf_size;
|
2013-11-15 05:25:36 +08:00
|
|
|
unsigned int data_buff_pos;
|
|
|
|
unsigned int oob_buff_pos;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
|
|
|
/* DMA information */
|
2015-09-06 21:12:47 +08:00
|
|
|
struct scatterlist sg;
|
|
|
|
enum dma_data_direction dma_dir;
|
|
|
|
struct dma_chan *dma_chan;
|
|
|
|
dma_cookie_t dma_cookie;
|
2008-02-14 15:48:23 +08:00
|
|
|
int drcmr_dat;
|
|
|
|
int drcmr_cmd;
|
|
|
|
|
|
|
|
unsigned char *data_buff;
|
2010-08-17 17:25:57 +08:00
|
|
|
unsigned char *oob_buff;
|
2008-02-14 15:48:23 +08:00
|
|
|
dma_addr_t data_buff_phys;
|
|
|
|
int data_dma_ch;
|
|
|
|
|
2011-07-15 11:44:33 +08:00
|
|
|
struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
|
2008-02-14 15:48:23 +08:00
|
|
|
unsigned int state;
|
|
|
|
|
2013-08-11 03:34:52 +08:00
|
|
|
/*
|
|
|
|
* This driver supports NFCv1 (as found in PXA SoC)
|
|
|
|
* and NFCv2 (as found in Armada 370/XP SoC).
|
|
|
|
*/
|
|
|
|
enum pxa3xx_nand_variant variant;
|
|
|
|
|
2011-07-15 11:44:33 +08:00
|
|
|
int cs;
|
2008-02-14 15:48:23 +08:00
|
|
|
int use_ecc; /* use HW ECC ? */
|
2013-11-15 05:25:29 +08:00
|
|
|
int ecc_bch; /* using BCH ECC? */
|
2008-02-14 15:48:23 +08:00
|
|
|
int use_dma; /* use DMA ? */
|
2013-08-13 01:14:49 +08:00
|
|
|
int use_spare; /* use spare ? */
|
2013-11-15 05:25:26 +08:00
|
|
|
int need_wait;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2013-11-07 23:17:16 +08:00
|
|
|
unsigned int data_size; /* data to be read from FIFO */
|
2013-11-15 05:25:37 +08:00
|
|
|
unsigned int chunk_size; /* split commands chunk size */
|
2011-07-15 11:44:32 +08:00
|
|
|
unsigned int oob_size;
|
2013-11-15 05:25:29 +08:00
|
|
|
unsigned int spare_size;
|
|
|
|
unsigned int ecc_size;
|
2013-11-15 05:25:39 +08:00
|
|
|
unsigned int ecc_err_cnt;
|
|
|
|
unsigned int max_bitflips;
|
2008-02-14 15:48:23 +08:00
|
|
|
int retcode;
|
|
|
|
|
2013-08-13 01:14:55 +08:00
|
|
|
/* cached register value */
|
|
|
|
uint32_t reg_ndcr;
|
|
|
|
uint32_t ndtr0cs0;
|
|
|
|
uint32_t ndtr1cs0;
|
|
|
|
|
2008-02-14 15:48:23 +08:00
|
|
|
/* generated NDCBx register values */
|
|
|
|
uint32_t ndcb0;
|
|
|
|
uint32_t ndcb1;
|
|
|
|
uint32_t ndcb2;
|
2013-08-13 01:14:50 +08:00
|
|
|
uint32_t ndcb3;
|
2008-02-14 15:48:23 +08:00
|
|
|
};
|
|
|
|
|
2012-01-13 07:02:20 +08:00
|
|
|
static bool use_dma = 1;
|
2008-02-14 15:48:23 +08:00
|
|
|
module_param(use_dma, bool, 0444);
|
2011-03-31 09:57:33 +08:00
|
|
|
MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2015-08-22 02:47:28 +08:00
|
|
|
struct pxa3xx_nand_timing {
|
|
|
|
unsigned int tCH; /* Enable signal hold time */
|
|
|
|
unsigned int tCS; /* Enable signal setup time */
|
|
|
|
unsigned int tWH; /* ND_nWE high duration */
|
|
|
|
unsigned int tWP; /* ND_nWE pulse time */
|
|
|
|
unsigned int tRH; /* ND_nRE high duration */
|
|
|
|
unsigned int tRP; /* ND_nRE pulse width */
|
|
|
|
unsigned int tR; /* ND_nWE high to ND_nRE low for read */
|
|
|
|
unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
|
|
|
|
unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct pxa3xx_nand_flash {
|
|
|
|
uint32_t chip_id;
|
|
|
|
unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
|
|
|
|
unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
|
|
|
|
struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
|
|
|
|
};
|
|
|
|
|
2010-08-17 13:50:23 +08:00
|
|
|
static struct pxa3xx_nand_timing timing[] = {
|
2010-08-18 18:00:03 +08:00
|
|
|
{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
|
|
|
|
{ 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
|
|
|
|
{ 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
|
|
|
|
{ 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
|
2009-09-10 14:33:30 +08:00
|
|
|
};
|
|
|
|
|
2010-08-17 13:50:23 +08:00
|
|
|
static struct pxa3xx_nand_flash builtin_flash_types[] = {
|
2015-10-21 16:29:04 +08:00
|
|
|
{ 0x46ec, 16, 16, &timing[1] },
|
|
|
|
{ 0xdaec, 8, 8, &timing[1] },
|
|
|
|
{ 0xd7ec, 8, 8, &timing[1] },
|
|
|
|
{ 0xa12c, 8, 8, &timing[2] },
|
|
|
|
{ 0xb12c, 16, 16, &timing[2] },
|
|
|
|
{ 0xdc2c, 8, 8, &timing[2] },
|
|
|
|
{ 0xcc2c, 16, 16, &timing[2] },
|
|
|
|
{ 0xba20, 16, 16, &timing[3] },
|
2009-09-10 14:33:30 +08:00
|
|
|
};
|
|
|
|
|
2013-11-15 05:25:28 +08:00
|
|
|
static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
|
|
|
|
static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
|
|
|
|
|
|
|
|
static struct nand_bbt_descr bbt_main_descr = {
|
|
|
|
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
|
|
|
|
| NAND_BBT_2BIT | NAND_BBT_VERSION,
|
|
|
|
.offs = 8,
|
|
|
|
.len = 6,
|
|
|
|
.veroffs = 14,
|
|
|
|
.maxblocks = 8, /* Last 8 blocks in each chip */
|
|
|
|
.pattern = bbt_pattern
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct nand_bbt_descr bbt_mirror_descr = {
|
|
|
|
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
|
|
|
|
| NAND_BBT_2BIT | NAND_BBT_VERSION,
|
|
|
|
.offs = 8,
|
|
|
|
.len = 6,
|
|
|
|
.veroffs = 14,
|
|
|
|
.maxblocks = 8, /* Last 8 blocks in each chip */
|
|
|
|
.pattern = bbt_mirror_pattern
|
|
|
|
};
|
|
|
|
|
2014-01-13 22:35:38 +08:00
|
|
|
static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
|
|
|
|
.eccbytes = 32,
|
|
|
|
.eccpos = {
|
|
|
|
32, 33, 34, 35, 36, 37, 38, 39,
|
|
|
|
40, 41, 42, 43, 44, 45, 46, 47,
|
|
|
|
48, 49, 50, 51, 52, 53, 54, 55,
|
|
|
|
56, 57, 58, 59, 60, 61, 62, 63},
|
|
|
|
.oobfree = { {2, 30} }
|
|
|
|
};
|
|
|
|
|
2013-11-15 05:25:37 +08:00
|
|
|
static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
|
|
|
|
.eccbytes = 64,
|
|
|
|
.eccpos = {
|
|
|
|
32, 33, 34, 35, 36, 37, 38, 39,
|
|
|
|
40, 41, 42, 43, 44, 45, 46, 47,
|
|
|
|
48, 49, 50, 51, 52, 53, 54, 55,
|
|
|
|
56, 57, 58, 59, 60, 61, 62, 63,
|
|
|
|
96, 97, 98, 99, 100, 101, 102, 103,
|
|
|
|
104, 105, 106, 107, 108, 109, 110, 111,
|
|
|
|
112, 113, 114, 115, 116, 117, 118, 119,
|
|
|
|
120, 121, 122, 123, 124, 125, 126, 127},
|
|
|
|
/* Bootrom looks in bytes 0 & 5 for bad blocks */
|
|
|
|
.oobfree = { {6, 26}, { 64, 32} }
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
|
|
|
|
.eccbytes = 128,
|
|
|
|
.eccpos = {
|
|
|
|
32, 33, 34, 35, 36, 37, 38, 39,
|
|
|
|
40, 41, 42, 43, 44, 45, 46, 47,
|
|
|
|
48, 49, 50, 51, 52, 53, 54, 55,
|
|
|
|
56, 57, 58, 59, 60, 61, 62, 63},
|
|
|
|
.oobfree = { }
|
|
|
|
};
|
|
|
|
|
2008-02-14 15:48:23 +08:00
|
|
|
#define NDTR0_tCH(c) (min((c), 7) << 19)
|
|
|
|
#define NDTR0_tCS(c) (min((c), 7) << 16)
|
|
|
|
#define NDTR0_tWH(c) (min((c), 7) << 11)
|
|
|
|
#define NDTR0_tWP(c) (min((c), 7) << 8)
|
|
|
|
#define NDTR0_tRH(c) (min((c), 7) << 3)
|
|
|
|
#define NDTR0_tRP(c) (min((c), 7) << 0)
|
|
|
|
|
|
|
|
#define NDTR1_tR(c) (min((c), 65535) << 16)
|
|
|
|
#define NDTR1_tWHR(c) (min((c), 15) << 4)
|
|
|
|
#define NDTR1_tAR(c) (min((c), 15) << 0)
|
|
|
|
|
|
|
|
/* convert nano-seconds to nand flash controller clock cycles */
|
2010-08-16 16:09:09 +08:00
|
|
|
#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2014-05-07 16:49:13 +08:00
|
|
|
static const struct of_device_id pxa3xx_nand_dt_ids[] = {
|
2013-11-07 23:17:14 +08:00
|
|
|
{
|
|
|
|
.compatible = "marvell,pxa3xx-nand",
|
|
|
|
.data = (void *)PXA3XX_NAND_VARIANT_PXA,
|
|
|
|
},
|
2013-12-24 23:40:07 +08:00
|
|
|
{
|
|
|
|
.compatible = "marvell,armada370-nand",
|
|
|
|
.data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
|
|
|
|
},
|
2013-11-07 23:17:14 +08:00
|
|
|
{}
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
|
|
|
|
|
|
|
|
static enum pxa3xx_nand_variant
|
|
|
|
pxa3xx_nand_get_variant(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
const struct of_device_id *of_id =
|
|
|
|
of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
|
|
|
|
if (!of_id)
|
|
|
|
return PXA3XX_NAND_VARIANT_PXA;
|
|
|
|
return (enum pxa3xx_nand_variant)of_id->data;
|
|
|
|
}
|
|
|
|
|
2011-07-15 11:44:32 +08:00
|
|
|
static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
|
2008-08-29 18:59:50 +08:00
|
|
|
const struct pxa3xx_nand_timing *t)
|
2008-02-14 15:48:23 +08:00
|
|
|
{
|
2011-07-15 11:44:32 +08:00
|
|
|
struct pxa3xx_nand_info *info = host->info_data;
|
2008-02-14 15:48:23 +08:00
|
|
|
unsigned long nand_clk = clk_get_rate(info->clk);
|
|
|
|
uint32_t ndtr0, ndtr1;
|
|
|
|
|
|
|
|
ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
|
|
|
|
NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
|
|
|
|
NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
|
|
|
|
NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
|
|
|
|
NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
|
|
|
|
NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
|
|
|
|
|
|
|
|
ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
|
|
|
|
NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
|
|
|
|
NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
|
|
|
|
|
2013-08-13 01:14:55 +08:00
|
|
|
info->ndtr0cs0 = ndtr0;
|
|
|
|
info->ndtr1cs0 = ndtr1;
|
2008-02-14 15:48:23 +08:00
|
|
|
nand_writel(info, NDTR0CS0, ndtr0);
|
|
|
|
nand_writel(info, NDTR1CS0, ndtr1);
|
|
|
|
}
|
|
|
|
|
2015-10-21 16:29:02 +08:00
|
|
|
static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
|
|
|
|
const struct nand_sdr_timings *t)
|
|
|
|
{
|
|
|
|
struct pxa3xx_nand_info *info = host->info_data;
|
|
|
|
struct nand_chip *chip = &host->chip;
|
|
|
|
unsigned long nand_clk = clk_get_rate(info->clk);
|
|
|
|
uint32_t ndtr0, ndtr1;
|
|
|
|
|
|
|
|
u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
|
|
|
|
u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
|
|
|
|
u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
|
|
|
|
u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
|
|
|
|
u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
|
|
|
|
u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
|
|
|
|
u32 tR = chip->chip_delay * 1000;
|
|
|
|
u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
|
|
|
|
u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
|
|
|
|
|
|
|
|
/* fallback to a default value if tR = 0 */
|
|
|
|
if (!tR)
|
|
|
|
tR = 20000;
|
|
|
|
|
|
|
|
ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
|
|
|
|
NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
|
|
|
|
NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
|
|
|
|
NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
|
|
|
|
NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
|
|
|
|
NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
|
|
|
|
|
|
|
|
ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
|
|
|
|
NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
|
|
|
|
NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
|
|
|
|
|
|
|
|
info->ndtr0cs0 = ndtr0;
|
|
|
|
info->ndtr1cs0 = ndtr1;
|
|
|
|
nand_writel(info, NDTR0CS0, ndtr0);
|
|
|
|
nand_writel(info, NDTR1CS0, ndtr1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
|
|
|
|
unsigned int *flash_width,
|
|
|
|
unsigned int *dfc_width)
|
|
|
|
{
|
|
|
|
struct nand_chip *chip = &host->chip;
|
|
|
|
struct pxa3xx_nand_info *info = host->info_data;
|
|
|
|
const struct pxa3xx_nand_flash *f = NULL;
|
|
|
|
int i, id, ntypes;
|
|
|
|
|
|
|
|
ntypes = ARRAY_SIZE(builtin_flash_types);
|
|
|
|
|
|
|
|
chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
|
|
|
|
|
|
|
|
id = chip->read_byte(host->mtd);
|
|
|
|
id |= chip->read_byte(host->mtd) << 0x8;
|
|
|
|
|
|
|
|
for (i = 0; i < ntypes; i++) {
|
|
|
|
f = &builtin_flash_types[i];
|
|
|
|
|
|
|
|
if (f->chip_id == id)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i == ntypes) {
|
|
|
|
dev_err(&info->pdev->dev, "Error: timings not found\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
pxa3xx_nand_set_timing(host, f->timing);
|
|
|
|
|
|
|
|
*flash_width = f->flash_width;
|
|
|
|
*dfc_width = f->dfc_width;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
|
|
|
|
int mode)
|
|
|
|
{
|
|
|
|
const struct nand_sdr_timings *timings;
|
|
|
|
|
|
|
|
mode = fls(mode) - 1;
|
|
|
|
if (mode < 0)
|
|
|
|
mode = 0;
|
|
|
|
|
|
|
|
timings = onfi_async_timing_mode_to_sdr_timings(mode);
|
|
|
|
if (IS_ERR(timings))
|
|
|
|
return PTR_ERR(timings);
|
|
|
|
|
|
|
|
pxa3xx_nand_set_sdr_timing(host, timings);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
|
|
|
|
{
|
|
|
|
struct nand_chip *chip = &host->chip;
|
|
|
|
struct pxa3xx_nand_info *info = host->info_data;
|
|
|
|
unsigned int flash_width = 0, dfc_width = 0;
|
|
|
|
int mode, err;
|
|
|
|
|
|
|
|
mode = onfi_get_async_timing_mode(chip);
|
|
|
|
if (mode == ONFI_TIMING_MODE_UNKNOWN) {
|
|
|
|
err = pxa3xx_nand_init_timings_compat(host, &flash_width,
|
|
|
|
&dfc_width);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (flash_width == 16) {
|
|
|
|
info->reg_ndcr |= NDCR_DWIDTH_M;
|
|
|
|
chip->options |= NAND_BUSWIDTH_16;
|
|
|
|
}
|
|
|
|
|
|
|
|
info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
|
|
|
|
} else {
|
|
|
|
err = pxa3xx_nand_init_timings_onfi(host, mode);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-07 23:17:18 +08:00
|
|
|
/*
|
|
|
|
* Set the data and OOB size, depending on the selected
|
|
|
|
* spare and ECC configuration.
|
|
|
|
* Only applicable to READ0, READOOB and PAGEPROG commands.
|
|
|
|
*/
|
2013-11-15 05:25:36 +08:00
|
|
|
static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
|
|
|
|
struct mtd_info *mtd)
|
2008-02-14 15:48:23 +08:00
|
|
|
{
|
2013-08-13 01:14:55 +08:00
|
|
|
int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
|
2010-08-17 14:09:30 +08:00
|
|
|
|
2013-11-15 05:25:36 +08:00
|
|
|
info->data_size = mtd->writesize;
|
2013-11-15 05:25:29 +08:00
|
|
|
if (!oob_enable)
|
2010-08-17 14:09:30 +08:00
|
|
|
return;
|
|
|
|
|
2013-11-15 05:25:29 +08:00
|
|
|
info->oob_size = info->spare_size;
|
|
|
|
if (!info->use_ecc)
|
|
|
|
info->oob_size += info->ecc_size;
|
2010-08-17 17:25:57 +08:00
|
|
|
}
|
|
|
|
|
2011-02-28 10:32:11 +08:00
|
|
|
/**
|
|
|
|
* NOTE: it is a must to set ND_RUN firstly, then write
|
|
|
|
* command buffer, otherwise, it does not work.
|
|
|
|
* We enable all the interrupt at the same time, and
|
|
|
|
* let pxa3xx_nand_irq to handle all logic.
|
|
|
|
*/
|
|
|
|
static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
|
|
|
|
{
|
|
|
|
uint32_t ndcr;
|
|
|
|
|
2013-08-13 01:14:55 +08:00
|
|
|
ndcr = info->reg_ndcr;
|
2013-08-13 01:14:48 +08:00
|
|
|
|
2013-11-15 05:25:29 +08:00
|
|
|
if (info->use_ecc) {
|
2013-08-13 01:14:48 +08:00
|
|
|
ndcr |= NDCR_ECC_EN;
|
2013-11-15 05:25:29 +08:00
|
|
|
if (info->ecc_bch)
|
|
|
|
nand_writel(info, NDECCCTRL, 0x1);
|
|
|
|
} else {
|
2013-08-13 01:14:48 +08:00
|
|
|
ndcr &= ~NDCR_ECC_EN;
|
2013-11-15 05:25:29 +08:00
|
|
|
if (info->ecc_bch)
|
|
|
|
nand_writel(info, NDECCCTRL, 0x0);
|
|
|
|
}
|
2013-08-13 01:14:48 +08:00
|
|
|
|
|
|
|
if (info->use_dma)
|
|
|
|
ndcr |= NDCR_DMA_EN;
|
|
|
|
else
|
|
|
|
ndcr &= ~NDCR_DMA_EN;
|
|
|
|
|
2013-08-13 01:14:49 +08:00
|
|
|
if (info->use_spare)
|
|
|
|
ndcr |= NDCR_SPARE_EN;
|
|
|
|
else
|
|
|
|
ndcr &= ~NDCR_SPARE_EN;
|
|
|
|
|
2011-02-28 10:32:11 +08:00
|
|
|
ndcr |= NDCR_ND_RUN;
|
|
|
|
|
|
|
|
/* clear status bits and run */
|
|
|
|
nand_writel(info, NDSR, NDSR_MASK);
|
2015-08-20 02:30:14 +08:00
|
|
|
nand_writel(info, NDCR, 0);
|
2011-02-28 10:32:11 +08:00
|
|
|
nand_writel(info, NDCR, ndcr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
|
|
|
|
{
|
|
|
|
uint32_t ndcr;
|
|
|
|
int timeout = NAND_STOP_DELAY;
|
|
|
|
|
|
|
|
/* wait RUN bit in NDCR become 0 */
|
|
|
|
ndcr = nand_readl(info, NDCR);
|
|
|
|
while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
|
|
|
|
ndcr = nand_readl(info, NDCR);
|
|
|
|
udelay(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (timeout <= 0) {
|
|
|
|
ndcr &= ~NDCR_ND_RUN;
|
|
|
|
nand_writel(info, NDCR, ndcr);
|
|
|
|
}
|
2015-09-06 21:12:47 +08:00
|
|
|
if (info->dma_chan)
|
|
|
|
dmaengine_terminate_all(info->dma_chan);
|
|
|
|
|
2011-02-28 10:32:11 +08:00
|
|
|
/* clear status bits */
|
|
|
|
nand_writel(info, NDSR, NDSR_MASK);
|
|
|
|
}
|
|
|
|
|
2013-08-13 01:14:57 +08:00
|
|
|
static void __maybe_unused
|
|
|
|
enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
|
2008-02-14 15:48:23 +08:00
|
|
|
{
|
|
|
|
uint32_t ndcr;
|
|
|
|
|
|
|
|
ndcr = nand_readl(info, NDCR);
|
|
|
|
nand_writel(info, NDCR, ndcr & ~int_mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
|
|
|
|
{
|
|
|
|
uint32_t ndcr;
|
|
|
|
|
|
|
|
ndcr = nand_readl(info, NDCR);
|
|
|
|
nand_writel(info, NDCR, ndcr | int_mask);
|
|
|
|
}
|
|
|
|
|
2015-02-18 18:32:07 +08:00
|
|
|
static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
|
|
|
|
{
|
|
|
|
if (info->ecc_bch) {
|
2015-04-07 21:32:45 +08:00
|
|
|
u32 val;
|
|
|
|
int ret;
|
2015-02-18 18:32:07 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* According to the datasheet, when reading from NDDB
|
|
|
|
* with BCH enabled, after each 32 bytes reads, we
|
|
|
|
* have to make sure that the NDSR.RDDREQ bit is set.
|
|
|
|
*
|
|
|
|
* Drain the FIFO 8 32 bits reads at a time, and skip
|
|
|
|
* the polling on the last read.
|
|
|
|
*/
|
|
|
|
while (len > 8) {
|
2015-10-21 16:29:00 +08:00
|
|
|
ioread32_rep(info->mmio_base + NDDB, data, 8);
|
2015-02-18 18:32:07 +08:00
|
|
|
|
2015-04-07 21:32:45 +08:00
|
|
|
ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
|
|
|
|
val & NDSR_RDDREQ, 1000, 5000);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&info->pdev->dev,
|
|
|
|
"Timeout on RDDREQ while draining the FIFO\n");
|
|
|
|
return;
|
2015-02-18 18:32:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
data += 32;
|
|
|
|
len -= 8;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-21 16:29:00 +08:00
|
|
|
ioread32_rep(info->mmio_base + NDDB, data, len);
|
2015-02-18 18:32:07 +08:00
|
|
|
}
|
|
|
|
|
2011-02-28 10:32:11 +08:00
|
|
|
static void handle_data_pio(struct pxa3xx_nand_info *info)
|
2008-02-14 15:48:23 +08:00
|
|
|
{
|
2013-11-15 05:25:37 +08:00
|
|
|
unsigned int do_bytes = min(info->data_size, info->chunk_size);
|
2013-11-15 05:25:36 +08:00
|
|
|
|
2008-02-14 15:48:23 +08:00
|
|
|
switch (info->state) {
|
|
|
|
case STATE_PIO_WRITING:
|
2015-05-01 04:17:47 +08:00
|
|
|
writesl(info->mmio_base + NDDB,
|
|
|
|
info->data_buff + info->data_buff_pos,
|
|
|
|
DIV_ROUND_UP(do_bytes, 4));
|
2013-11-15 05:25:36 +08:00
|
|
|
|
2010-08-17 14:09:30 +08:00
|
|
|
if (info->oob_size > 0)
|
2015-05-01 04:17:47 +08:00
|
|
|
writesl(info->mmio_base + NDDB,
|
|
|
|
info->oob_buff + info->oob_buff_pos,
|
|
|
|
DIV_ROUND_UP(info->oob_size, 4));
|
2008-02-14 15:48:23 +08:00
|
|
|
break;
|
|
|
|
case STATE_PIO_READING:
|
2015-02-18 18:32:07 +08:00
|
|
|
drain_fifo(info,
|
|
|
|
info->data_buff + info->data_buff_pos,
|
|
|
|
DIV_ROUND_UP(do_bytes, 4));
|
2013-11-15 05:25:36 +08:00
|
|
|
|
2010-08-17 14:09:30 +08:00
|
|
|
if (info->oob_size > 0)
|
2015-02-18 18:32:07 +08:00
|
|
|
drain_fifo(info,
|
|
|
|
info->oob_buff + info->oob_buff_pos,
|
|
|
|
DIV_ROUND_UP(info->oob_size, 4));
|
2008-02-14 15:48:23 +08:00
|
|
|
break;
|
|
|
|
default:
|
2011-07-15 11:44:31 +08:00
|
|
|
dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
|
2008-02-14 15:48:23 +08:00
|
|
|
info->state);
|
2011-02-28 10:32:11 +08:00
|
|
|
BUG();
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
2013-11-15 05:25:36 +08:00
|
|
|
|
|
|
|
/* Update buffer pointers for multi-page read/write */
|
|
|
|
info->data_buff_pos += do_bytes;
|
|
|
|
info->oob_buff_pos += info->oob_size;
|
|
|
|
info->data_size -= do_bytes;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
|
|
|
|
2015-09-06 21:12:47 +08:00
|
|
|
static void pxa3xx_nand_data_dma_irq(void *data)
|
2008-02-14 15:48:23 +08:00
|
|
|
{
|
2015-09-06 21:12:47 +08:00
|
|
|
struct pxa3xx_nand_info *info = data;
|
|
|
|
struct dma_tx_state state;
|
|
|
|
enum dma_status status;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2015-09-06 21:12:47 +08:00
|
|
|
status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
|
|
|
|
if (likely(status == DMA_COMPLETE)) {
|
|
|
|
info->state = STATE_DMA_DONE;
|
|
|
|
} else {
|
|
|
|
dev_err(&info->pdev->dev, "DMA error on data channel\n");
|
|
|
|
info->retcode = ERR_DMABUSERR;
|
|
|
|
}
|
|
|
|
dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
|
|
|
|
|
|
|
|
nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
|
|
|
|
enable_int(info, NDCR_INT_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void start_data_dma(struct pxa3xx_nand_info *info)
|
|
|
|
{
|
|
|
|
enum dma_transfer_direction direction;
|
|
|
|
struct dma_async_tx_descriptor *tx;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2011-02-28 10:32:11 +08:00
|
|
|
switch (info->state) {
|
|
|
|
case STATE_DMA_WRITING:
|
2015-09-06 21:12:47 +08:00
|
|
|
info->dma_dir = DMA_TO_DEVICE;
|
|
|
|
direction = DMA_MEM_TO_DEV;
|
2011-02-28 10:32:11 +08:00
|
|
|
break;
|
|
|
|
case STATE_DMA_READING:
|
2015-09-06 21:12:47 +08:00
|
|
|
info->dma_dir = DMA_FROM_DEVICE;
|
|
|
|
direction = DMA_DEV_TO_MEM;
|
2011-02-28 10:32:11 +08:00
|
|
|
break;
|
|
|
|
default:
|
2011-07-15 11:44:31 +08:00
|
|
|
dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
|
2011-02-28 10:32:11 +08:00
|
|
|
info->state);
|
|
|
|
BUG();
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
2015-09-06 21:12:47 +08:00
|
|
|
info->sg.length = info->data_size +
|
|
|
|
(info->oob_size ? info->spare_size + info->ecc_size : 0);
|
|
|
|
dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
|
|
|
|
|
|
|
|
tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
|
|
|
|
DMA_PREP_INTERRUPT);
|
|
|
|
if (!tx) {
|
|
|
|
dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
|
|
|
|
return;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
2015-09-06 21:12:47 +08:00
|
|
|
tx->callback = pxa3xx_nand_data_dma_irq;
|
|
|
|
tx->callback_param = info;
|
|
|
|
info->dma_cookie = dmaengine_submit(tx);
|
|
|
|
dma_async_issue_pending(info->dma_chan);
|
|
|
|
dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
|
|
|
|
__func__, direction, info->dma_cookie, info->sg.length);
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
|
|
|
|
2015-02-21 02:36:43 +08:00
|
|
|
static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
|
|
|
|
{
|
|
|
|
struct pxa3xx_nand_info *info = data;
|
|
|
|
|
|
|
|
handle_data_pio(info);
|
|
|
|
|
|
|
|
info->state = STATE_CMD_DONE;
|
|
|
|
nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2008-02-14 15:48:23 +08:00
|
|
|
static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
|
|
|
|
{
|
|
|
|
struct pxa3xx_nand_info *info = devid;
|
2013-11-15 05:25:26 +08:00
|
|
|
unsigned int status, is_completed = 0, is_ready = 0;
|
2011-07-15 11:44:33 +08:00
|
|
|
unsigned int ready, cmd_done;
|
2015-02-21 02:36:43 +08:00
|
|
|
irqreturn_t ret = IRQ_HANDLED;
|
2011-07-15 11:44:33 +08:00
|
|
|
|
|
|
|
if (info->cs == 0) {
|
|
|
|
ready = NDSR_FLASH_RDY;
|
|
|
|
cmd_done = NDSR_CS0_CMDD;
|
|
|
|
} else {
|
|
|
|
ready = NDSR_RDY;
|
|
|
|
cmd_done = NDSR_CS1_CMDD;
|
|
|
|
}
|
2008-02-14 15:48:23 +08:00
|
|
|
|
|
|
|
status = nand_readl(info, NDSR);
|
|
|
|
|
2013-11-15 05:25:39 +08:00
|
|
|
if (status & NDSR_UNCORERR)
|
|
|
|
info->retcode = ERR_UNCORERR;
|
|
|
|
if (status & NDSR_CORERR) {
|
|
|
|
info->retcode = ERR_CORERR;
|
|
|
|
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
|
|
|
|
info->ecc_bch)
|
|
|
|
info->ecc_err_cnt = NDSR_ERR_CNT(status);
|
|
|
|
else
|
|
|
|
info->ecc_err_cnt = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Each chunk composing a page is corrected independently,
|
|
|
|
* and we need to store maximum number of corrected bitflips
|
|
|
|
* to return it to the MTD layer in ecc.read_page().
|
|
|
|
*/
|
|
|
|
info->max_bitflips = max_t(unsigned int,
|
|
|
|
info->max_bitflips,
|
|
|
|
info->ecc_err_cnt);
|
|
|
|
}
|
2011-02-28 10:32:11 +08:00
|
|
|
if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
|
|
|
|
/* whether use dma to transfer data */
|
2008-02-14 15:48:23 +08:00
|
|
|
if (info->use_dma) {
|
2011-02-28 10:32:11 +08:00
|
|
|
disable_int(info, NDCR_INT_MASK);
|
|
|
|
info->state = (status & NDSR_RDDREQ) ?
|
|
|
|
STATE_DMA_READING : STATE_DMA_WRITING;
|
|
|
|
start_data_dma(info);
|
|
|
|
goto NORMAL_IRQ_EXIT;
|
2008-02-14 15:48:23 +08:00
|
|
|
} else {
|
2011-02-28 10:32:11 +08:00
|
|
|
info->state = (status & NDSR_RDDREQ) ?
|
|
|
|
STATE_PIO_READING : STATE_PIO_WRITING;
|
2015-02-21 02:36:43 +08:00
|
|
|
ret = IRQ_WAKE_THREAD;
|
|
|
|
goto NORMAL_IRQ_EXIT;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
|
|
|
}
|
2011-07-15 11:44:33 +08:00
|
|
|
if (status & cmd_done) {
|
2011-02-28 10:32:11 +08:00
|
|
|
info->state = STATE_CMD_DONE;
|
|
|
|
is_completed = 1;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
2011-07-15 11:44:33 +08:00
|
|
|
if (status & ready) {
|
2011-02-28 10:32:11 +08:00
|
|
|
info->state = STATE_READY;
|
2013-11-15 05:25:26 +08:00
|
|
|
is_ready = 1;
|
2011-02-28 10:32:14 +08:00
|
|
|
}
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2015-08-20 02:30:15 +08:00
|
|
|
/*
|
|
|
|
* Clear all status bit before issuing the next command, which
|
|
|
|
* can and will alter the status bits and will deserve a new
|
|
|
|
* interrupt on its own. This lets the controller exit the IRQ
|
|
|
|
*/
|
|
|
|
nand_writel(info, NDSR, status);
|
|
|
|
|
2011-02-28 10:32:11 +08:00
|
|
|
if (status & NDSR_WRCMDREQ) {
|
|
|
|
status &= ~NDSR_WRCMDREQ;
|
|
|
|
info->state = STATE_CMD_HANDLE;
|
2013-08-13 01:14:50 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Command buffer registers NDCB{0-2} (and optionally NDCB3)
|
|
|
|
* must be loaded by writing directly either 12 or 16
|
|
|
|
* bytes directly to NDCB0, four bytes at a time.
|
|
|
|
*
|
|
|
|
* Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
|
|
|
|
* but each NDCBx register can be read.
|
|
|
|
*/
|
2011-02-28 10:32:11 +08:00
|
|
|
nand_writel(info, NDCB0, info->ndcb0);
|
|
|
|
nand_writel(info, NDCB0, info->ndcb1);
|
|
|
|
nand_writel(info, NDCB0, info->ndcb2);
|
2013-08-13 01:14:50 +08:00
|
|
|
|
|
|
|
/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
|
|
|
|
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
|
|
|
|
nand_writel(info, NDCB0, info->ndcb3);
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
|
|
|
|
2011-02-28 10:32:11 +08:00
|
|
|
if (is_completed)
|
|
|
|
complete(&info->cmd_complete);
|
2013-11-15 05:25:26 +08:00
|
|
|
if (is_ready)
|
|
|
|
complete(&info->dev_ready);
|
2011-02-28 10:32:11 +08:00
|
|
|
NORMAL_IRQ_EXIT:
|
2015-02-21 02:36:43 +08:00
|
|
|
return ret;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_buf_blank(uint8_t *buf, size_t len)
|
|
|
|
{
|
|
|
|
for (; len > 0; len--)
|
|
|
|
if (*buf++ != 0xff)
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2013-11-15 05:25:31 +08:00
|
|
|
static void set_command_address(struct pxa3xx_nand_info *info,
|
|
|
|
unsigned int page_size, uint16_t column, int page_addr)
|
|
|
|
{
|
|
|
|
/* small page addr setting */
|
|
|
|
if (page_size < PAGE_CHUNK_SIZE) {
|
|
|
|
info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
|
|
|
|
| (column & 0xFF);
|
|
|
|
|
|
|
|
info->ndcb2 = 0;
|
|
|
|
} else {
|
|
|
|
info->ndcb1 = ((page_addr & 0xFFFF) << 16)
|
|
|
|
| (column & 0xFFFF);
|
|
|
|
|
|
|
|
if (page_addr & 0xFF0000)
|
|
|
|
info->ndcb2 = (page_addr & 0xFF0000) >> 16;
|
|
|
|
else
|
|
|
|
info->ndcb2 = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-15 05:25:33 +08:00
|
|
|
static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
|
2008-02-14 15:48:23 +08:00
|
|
|
{
|
2013-11-15 05:25:34 +08:00
|
|
|
struct pxa3xx_nand_host *host = info->host[info->cs];
|
|
|
|
struct mtd_info *mtd = host->mtd;
|
|
|
|
|
2011-02-28 10:32:13 +08:00
|
|
|
/* reset data and oob column point to handle data */
|
2011-02-28 10:32:14 +08:00
|
|
|
info->buf_start = 0;
|
|
|
|
info->buf_count = 0;
|
2011-02-28 10:32:13 +08:00
|
|
|
info->oob_size = 0;
|
2013-11-15 05:25:36 +08:00
|
|
|
info->data_buff_pos = 0;
|
|
|
|
info->oob_buff_pos = 0;
|
2011-02-28 10:32:13 +08:00
|
|
|
info->use_ecc = 0;
|
2013-08-13 01:14:49 +08:00
|
|
|
info->use_spare = 1;
|
2011-02-28 10:32:13 +08:00
|
|
|
info->retcode = ERR_NONE;
|
2013-11-15 05:25:39 +08:00
|
|
|
info->ecc_err_cnt = 0;
|
2013-11-15 05:25:30 +08:00
|
|
|
info->ndcb3 = 0;
|
2013-12-19 05:44:08 +08:00
|
|
|
info->need_wait = 0;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
|
|
|
switch (command) {
|
2011-02-28 10:32:13 +08:00
|
|
|
case NAND_CMD_READ0:
|
|
|
|
case NAND_CMD_PAGEPROG:
|
|
|
|
info->use_ecc = 1;
|
2008-02-14 15:48:23 +08:00
|
|
|
case NAND_CMD_READOOB:
|
2013-11-15 05:25:36 +08:00
|
|
|
pxa3xx_set_datasize(info, mtd);
|
2008-02-14 15:48:23 +08:00
|
|
|
break;
|
2013-08-13 01:14:51 +08:00
|
|
|
case NAND_CMD_PARAM:
|
|
|
|
info->use_spare = 0;
|
|
|
|
break;
|
2011-02-28 10:32:13 +08:00
|
|
|
default:
|
|
|
|
info->ndcb1 = 0;
|
|
|
|
info->ndcb2 = 0;
|
|
|
|
break;
|
|
|
|
}
|
2013-11-15 05:25:34 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are about to issue a read command, or about to set
|
|
|
|
* the write address, then clean the data buffer.
|
|
|
|
*/
|
|
|
|
if (command == NAND_CMD_READ0 ||
|
|
|
|
command == NAND_CMD_READOOB ||
|
|
|
|
command == NAND_CMD_SEQIN) {
|
|
|
|
|
|
|
|
info->buf_count = mtd->writesize + mtd->oobsize;
|
|
|
|
memset(info->data_buff, 0xFF, info->buf_count);
|
|
|
|
}
|
|
|
|
|
2013-11-15 05:25:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
|
2013-11-15 05:25:37 +08:00
|
|
|
int ext_cmd_type, uint16_t column, int page_addr)
|
2013-11-15 05:25:33 +08:00
|
|
|
{
|
|
|
|
int addr_cycle, exec_cmd;
|
|
|
|
struct pxa3xx_nand_host *host;
|
|
|
|
struct mtd_info *mtd;
|
|
|
|
|
|
|
|
host = info->host[info->cs];
|
|
|
|
mtd = host->mtd;
|
|
|
|
addr_cycle = 0;
|
|
|
|
exec_cmd = 1;
|
|
|
|
|
|
|
|
if (info->cs != 0)
|
|
|
|
info->ndcb0 = NDCB0_CSEL;
|
|
|
|
else
|
|
|
|
info->ndcb0 = 0;
|
|
|
|
|
|
|
|
if (command == NAND_CMD_SEQIN)
|
|
|
|
exec_cmd = 0;
|
2011-02-28 10:32:13 +08:00
|
|
|
|
2011-07-15 11:44:32 +08:00
|
|
|
addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
|
|
|
|
+ host->col_addr_cycles);
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2011-02-28 10:32:13 +08:00
|
|
|
switch (command) {
|
|
|
|
case NAND_CMD_READOOB:
|
2008-02-14 15:48:23 +08:00
|
|
|
case NAND_CMD_READ0:
|
2013-08-13 01:14:54 +08:00
|
|
|
info->buf_start = column;
|
|
|
|
info->ndcb0 |= NDCB0_CMD_TYPE(0)
|
|
|
|
| addr_cycle
|
|
|
|
| NAND_CMD_READ0;
|
|
|
|
|
2011-02-28 10:32:13 +08:00
|
|
|
if (command == NAND_CMD_READOOB)
|
2013-08-13 01:14:54 +08:00
|
|
|
info->buf_start += mtd->writesize;
|
2011-02-28 10:32:13 +08:00
|
|
|
|
2013-11-15 05:25:37 +08:00
|
|
|
/*
|
|
|
|
* Multiple page read needs an 'extended command type' field,
|
|
|
|
* which is either naked-read or last-read according to the
|
|
|
|
* state.
|
|
|
|
*/
|
|
|
|
if (mtd->writesize == PAGE_CHUNK_SIZE) {
|
2013-08-13 01:14:54 +08:00
|
|
|
info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
|
2013-11-15 05:25:37 +08:00
|
|
|
} else if (mtd->writesize > PAGE_CHUNK_SIZE) {
|
|
|
|
info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
|
|
|
|
| NDCB0_LEN_OVRD
|
|
|
|
| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
|
|
|
|
info->ndcb3 = info->chunk_size +
|
|
|
|
info->oob_size;
|
|
|
|
}
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2013-11-15 05:25:32 +08:00
|
|
|
set_command_address(info, mtd->writesize, column, page_addr);
|
|
|
|
break;
|
|
|
|
|
2008-02-14 15:48:23 +08:00
|
|
|
case NAND_CMD_SEQIN:
|
2011-02-28 10:32:13 +08:00
|
|
|
|
2013-11-15 05:25:35 +08:00
|
|
|
info->buf_start = column;
|
|
|
|
set_command_address(info, mtd->writesize, 0, page_addr);
|
2013-11-15 05:25:38 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Multiple page programming needs to execute the initial
|
|
|
|
* SEQIN command that sets the page address.
|
|
|
|
*/
|
|
|
|
if (mtd->writesize > PAGE_CHUNK_SIZE) {
|
|
|
|
info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
|
|
|
|
| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
|
|
|
|
| addr_cycle
|
|
|
|
| command;
|
|
|
|
/* No data transfer in this case */
|
|
|
|
info->data_size = 0;
|
|
|
|
exec_cmd = 1;
|
|
|
|
}
|
2008-02-14 15:48:23 +08:00
|
|
|
break;
|
2011-02-28 10:32:13 +08:00
|
|
|
|
2008-02-14 15:48:23 +08:00
|
|
|
case NAND_CMD_PAGEPROG:
|
2011-02-28 10:32:13 +08:00
|
|
|
if (is_buf_blank(info->data_buff,
|
|
|
|
(mtd->writesize + mtd->oobsize))) {
|
|
|
|
exec_cmd = 0;
|
|
|
|
break;
|
|
|
|
}
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2013-11-15 05:25:38 +08:00
|
|
|
/* Second command setting for large pages */
|
|
|
|
if (mtd->writesize > PAGE_CHUNK_SIZE) {
|
|
|
|
/*
|
|
|
|
* Multiple page write uses the 'extended command'
|
|
|
|
* field. This can be used to issue a command dispatch
|
|
|
|
* or a naked-write depending on the current stage.
|
|
|
|
*/
|
|
|
|
info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
|
|
|
|
| NDCB0_LEN_OVRD
|
|
|
|
| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
|
|
|
|
info->ndcb3 = info->chunk_size +
|
|
|
|
info->oob_size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the command dispatch that completes a chunked
|
|
|
|
* page program operation.
|
|
|
|
*/
|
|
|
|
if (info->data_size == 0) {
|
|
|
|
info->ndcb0 = NDCB0_CMD_TYPE(0x1)
|
|
|
|
| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
|
|
|
|
| command;
|
|
|
|
info->ndcb1 = 0;
|
|
|
|
info->ndcb2 = 0;
|
|
|
|
info->ndcb3 = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
|
|
|
|
| NDCB0_AUTO_RS
|
|
|
|
| NDCB0_ST_ROW_EN
|
|
|
|
| NDCB0_DBC
|
|
|
|
| (NAND_CMD_PAGEPROG << 8)
|
|
|
|
| NAND_CMD_SEQIN
|
|
|
|
| addr_cycle;
|
|
|
|
}
|
2008-02-14 15:48:23 +08:00
|
|
|
break;
|
2011-02-28 10:32:13 +08:00
|
|
|
|
2013-05-14 19:15:25 +08:00
|
|
|
case NAND_CMD_PARAM:
|
2015-08-03 22:31:26 +08:00
|
|
|
info->buf_count = INIT_BUFFER_SIZE;
|
2013-05-14 19:15:25 +08:00
|
|
|
info->ndcb0 |= NDCB0_CMD_TYPE(0)
|
|
|
|
| NDCB0_ADDR_CYC(1)
|
2013-08-13 01:14:51 +08:00
|
|
|
| NDCB0_LEN_OVRD
|
2013-08-13 01:14:54 +08:00
|
|
|
| command;
|
2013-05-14 19:15:25 +08:00
|
|
|
info->ndcb1 = (column & 0xFF);
|
2015-08-03 22:31:26 +08:00
|
|
|
info->ndcb3 = INIT_BUFFER_SIZE;
|
|
|
|
info->data_size = INIT_BUFFER_SIZE;
|
2013-05-14 19:15:25 +08:00
|
|
|
break;
|
|
|
|
|
2008-02-14 15:48:23 +08:00
|
|
|
case NAND_CMD_READID:
|
2015-08-20 06:40:09 +08:00
|
|
|
info->buf_count = READ_ID_BYTES;
|
2011-02-28 10:32:13 +08:00
|
|
|
info->ndcb0 |= NDCB0_CMD_TYPE(3)
|
|
|
|
| NDCB0_ADDR_CYC(1)
|
2013-08-13 01:14:54 +08:00
|
|
|
| command;
|
2013-05-14 19:15:24 +08:00
|
|
|
info->ndcb1 = (column & 0xFF);
|
2011-02-28 10:32:13 +08:00
|
|
|
|
|
|
|
info->data_size = 8;
|
|
|
|
break;
|
2008-02-14 15:48:23 +08:00
|
|
|
case NAND_CMD_STATUS:
|
2011-02-28 10:32:13 +08:00
|
|
|
info->buf_count = 1;
|
|
|
|
info->ndcb0 |= NDCB0_CMD_TYPE(4)
|
|
|
|
| NDCB0_ADDR_CYC(1)
|
2013-08-13 01:14:54 +08:00
|
|
|
| command;
|
2011-02-28 10:32:13 +08:00
|
|
|
|
|
|
|
info->data_size = 8;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NAND_CMD_ERASE1:
|
|
|
|
info->ndcb0 |= NDCB0_CMD_TYPE(2)
|
|
|
|
| NDCB0_AUTO_RS
|
|
|
|
| NDCB0_ADDR_CYC(3)
|
|
|
|
| NDCB0_DBC
|
2013-08-13 01:14:54 +08:00
|
|
|
| (NAND_CMD_ERASE2 << 8)
|
|
|
|
| NAND_CMD_ERASE1;
|
2011-02-28 10:32:13 +08:00
|
|
|
info->ndcb1 = page_addr;
|
|
|
|
info->ndcb2 = 0;
|
|
|
|
|
2008-02-14 15:48:23 +08:00
|
|
|
break;
|
|
|
|
case NAND_CMD_RESET:
|
2011-02-28 10:32:13 +08:00
|
|
|
info->ndcb0 |= NDCB0_CMD_TYPE(5)
|
2013-08-13 01:14:54 +08:00
|
|
|
| command;
|
2011-02-28 10:32:13 +08:00
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NAND_CMD_ERASE2:
|
|
|
|
exec_cmd = 0;
|
2008-02-14 15:48:23 +08:00
|
|
|
break;
|
2011-02-28 10:32:13 +08:00
|
|
|
|
2008-02-14 15:48:23 +08:00
|
|
|
default:
|
2011-02-28 10:32:13 +08:00
|
|
|
exec_cmd = 0;
|
2011-07-15 11:44:31 +08:00
|
|
|
dev_err(&info->pdev->dev, "non-supported command %x\n",
|
|
|
|
command);
|
2008-02-14 15:48:23 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-02-28 10:32:13 +08:00
|
|
|
return exec_cmd;
|
|
|
|
}
|
|
|
|
|
mtd: nand: pxa3xx: Use extended cmdfunc() only if needed
Currently, we have two different cmdfunc's implementations:
one for PXA3xx SoC variant and one for Armada 370/XP SoC variant.
The former is the legacy one, typically constrained to devices
with page sizes smaller or equal to the controller's FIFO buffer.
On the other side, the latter _only_ supports the so-called extended
command semantics, which allow to handle devices with larger
page sizes (4 KiB, 8 KiB, ...).
This means we currently don't support devices with smaller pages on the
A370/XP SoC. Fix it by first renaming the cmdfuncs variants, and then
make the choice based on device page size (and SoC variant), rather than
SoC variant alone.
While at it, add a check for page size, to make sure we don't allow larger
pages sizes on the PXA3xx variant.
Tested-by: Arnaud Ebalard <arno@natisbad.org>
Signed-off-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2013-12-19 05:44:09 +08:00
|
|
|
static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
|
|
|
|
int column, int page_addr)
|
2011-02-28 10:32:13 +08:00
|
|
|
{
|
2011-07-15 11:44:32 +08:00
|
|
|
struct pxa3xx_nand_host *host = mtd->priv;
|
|
|
|
struct pxa3xx_nand_info *info = host->info_data;
|
2015-02-02 00:55:37 +08:00
|
|
|
int exec_cmd;
|
2011-02-28 10:32:13 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if this is a x16 device ,then convert the input
|
|
|
|
* "byte" address into a "word" address appropriate
|
|
|
|
* for indexing a word-oriented device
|
|
|
|
*/
|
2013-08-13 01:14:55 +08:00
|
|
|
if (info->reg_ndcr & NDCR_DWIDTH_M)
|
2011-02-28 10:32:13 +08:00
|
|
|
column /= 2;
|
|
|
|
|
2011-07-15 11:44:33 +08:00
|
|
|
/*
|
|
|
|
* There may be different NAND chip hooked to
|
|
|
|
* different chip select, so check whether
|
|
|
|
* chip select has been changed, if yes, reset the timing
|
|
|
|
*/
|
|
|
|
if (info->cs != host->cs) {
|
|
|
|
info->cs = host->cs;
|
2013-08-13 01:14:55 +08:00
|
|
|
nand_writel(info, NDTR0CS0, info->ndtr0cs0);
|
|
|
|
nand_writel(info, NDTR1CS0, info->ndtr1cs0);
|
2011-07-15 11:44:33 +08:00
|
|
|
}
|
|
|
|
|
2013-11-15 05:25:33 +08:00
|
|
|
prepare_start_command(info, command);
|
|
|
|
|
2011-07-15 11:44:32 +08:00
|
|
|
info->state = STATE_PREPARED;
|
2013-11-15 05:25:37 +08:00
|
|
|
exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
|
|
|
|
|
2011-02-28 10:32:11 +08:00
|
|
|
if (exec_cmd) {
|
|
|
|
init_completion(&info->cmd_complete);
|
2013-11-15 05:25:26 +08:00
|
|
|
init_completion(&info->dev_ready);
|
|
|
|
info->need_wait = 1;
|
2011-02-28 10:32:11 +08:00
|
|
|
pxa3xx_nand_start(info);
|
|
|
|
|
2015-02-02 00:55:37 +08:00
|
|
|
if (!wait_for_completion_timeout(&info->cmd_complete,
|
|
|
|
CHIP_DELAY_TIMEOUT)) {
|
2011-07-15 11:44:31 +08:00
|
|
|
dev_err(&info->pdev->dev, "Wait time out!!!\n");
|
2011-02-28 10:32:11 +08:00
|
|
|
/* Stop State Machine for next command cycle */
|
|
|
|
pxa3xx_nand_stop(info);
|
|
|
|
}
|
|
|
|
}
|
2011-07-15 11:44:32 +08:00
|
|
|
info->state = STATE_IDLE;
|
2011-02-28 10:32:11 +08:00
|
|
|
}
|
|
|
|
|
mtd: nand: pxa3xx: Use extended cmdfunc() only if needed
Currently, we have two different cmdfunc's implementations:
one for PXA3xx SoC variant and one for Armada 370/XP SoC variant.
The former is the legacy one, typically constrained to devices
with page sizes smaller or equal to the controller's FIFO buffer.
On the other side, the latter _only_ supports the so-called extended
command semantics, which allow to handle devices with larger
page sizes (4 KiB, 8 KiB, ...).
This means we currently don't support devices with smaller pages on the
A370/XP SoC. Fix it by first renaming the cmdfuncs variants, and then
make the choice based on device page size (and SoC variant), rather than
SoC variant alone.
While at it, add a check for page size, to make sure we don't allow larger
pages sizes on the PXA3xx variant.
Tested-by: Arnaud Ebalard <arno@natisbad.org>
Signed-off-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2013-12-19 05:44:09 +08:00
|
|
|
static void nand_cmdfunc_extended(struct mtd_info *mtd,
|
|
|
|
const unsigned command,
|
|
|
|
int column, int page_addr)
|
2013-11-15 05:25:37 +08:00
|
|
|
{
|
|
|
|
struct pxa3xx_nand_host *host = mtd->priv;
|
|
|
|
struct pxa3xx_nand_info *info = host->info_data;
|
2015-02-02 00:55:37 +08:00
|
|
|
int exec_cmd, ext_cmd_type;
|
2013-11-15 05:25:37 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if this is a x16 device then convert the input
|
|
|
|
* "byte" address into a "word" address appropriate
|
|
|
|
* for indexing a word-oriented device
|
|
|
|
*/
|
|
|
|
if (info->reg_ndcr & NDCR_DWIDTH_M)
|
|
|
|
column /= 2;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There may be different NAND chip hooked to
|
|
|
|
* different chip select, so check whether
|
|
|
|
* chip select has been changed, if yes, reset the timing
|
|
|
|
*/
|
|
|
|
if (info->cs != host->cs) {
|
|
|
|
info->cs = host->cs;
|
|
|
|
nand_writel(info, NDTR0CS0, info->ndtr0cs0);
|
|
|
|
nand_writel(info, NDTR1CS0, info->ndtr1cs0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Select the extended command for the first command */
|
|
|
|
switch (command) {
|
|
|
|
case NAND_CMD_READ0:
|
|
|
|
case NAND_CMD_READOOB:
|
|
|
|
ext_cmd_type = EXT_CMD_TYPE_MONO;
|
|
|
|
break;
|
2013-11-15 05:25:38 +08:00
|
|
|
case NAND_CMD_SEQIN:
|
|
|
|
ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
|
|
|
|
break;
|
|
|
|
case NAND_CMD_PAGEPROG:
|
|
|
|
ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
|
|
|
|
break;
|
2013-11-15 05:25:37 +08:00
|
|
|
default:
|
|
|
|
ext_cmd_type = 0;
|
2013-11-15 05:25:38 +08:00
|
|
|
break;
|
2013-11-15 05:25:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
prepare_start_command(info, command);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prepare the "is ready" completion before starting a command
|
|
|
|
* transaction sequence. If the command is not executed the
|
|
|
|
* completion will be completed, see below.
|
|
|
|
*
|
|
|
|
* We can do that inside the loop because the command variable
|
|
|
|
* is invariant and thus so is the exec_cmd.
|
|
|
|
*/
|
|
|
|
info->need_wait = 1;
|
|
|
|
init_completion(&info->dev_ready);
|
|
|
|
do {
|
|
|
|
info->state = STATE_PREPARED;
|
|
|
|
exec_cmd = prepare_set_command(info, command, ext_cmd_type,
|
|
|
|
column, page_addr);
|
|
|
|
if (!exec_cmd) {
|
|
|
|
info->need_wait = 0;
|
|
|
|
complete(&info->dev_ready);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
init_completion(&info->cmd_complete);
|
|
|
|
pxa3xx_nand_start(info);
|
|
|
|
|
2015-02-02 00:55:37 +08:00
|
|
|
if (!wait_for_completion_timeout(&info->cmd_complete,
|
|
|
|
CHIP_DELAY_TIMEOUT)) {
|
2013-11-15 05:25:37 +08:00
|
|
|
dev_err(&info->pdev->dev, "Wait time out!!!\n");
|
|
|
|
/* Stop State Machine for next command cycle */
|
|
|
|
pxa3xx_nand_stop(info);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if the sequence is complete */
|
2013-11-15 05:25:38 +08:00
|
|
|
if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* After a splitted program command sequence has issued
|
|
|
|
* the command dispatch, the command sequence is complete.
|
|
|
|
*/
|
|
|
|
if (info->data_size == 0 &&
|
|
|
|
command == NAND_CMD_PAGEPROG &&
|
|
|
|
ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
|
2013-11-15 05:25:37 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
|
|
|
|
/* Last read: issue a 'last naked read' */
|
|
|
|
if (info->data_size == info->chunk_size)
|
|
|
|
ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
|
|
|
|
else
|
|
|
|
ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
|
2013-11-15 05:25:38 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If a splitted program command has no more data to transfer,
|
|
|
|
* the command dispatch must be issued to complete.
|
|
|
|
*/
|
|
|
|
} else if (command == NAND_CMD_PAGEPROG &&
|
|
|
|
info->data_size == 0) {
|
|
|
|
ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
|
2013-11-15 05:25:37 +08:00
|
|
|
}
|
|
|
|
} while (1);
|
|
|
|
|
|
|
|
info->state = STATE_IDLE;
|
|
|
|
}
|
|
|
|
|
2012-06-25 18:07:45 +08:00
|
|
|
static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
|
2015-10-13 17:22:18 +08:00
|
|
|
struct nand_chip *chip, const uint8_t *buf, int oob_required,
|
|
|
|
int page)
|
2011-02-28 10:32:11 +08:00
|
|
|
{
|
|
|
|
chip->write_buf(mtd, buf, mtd->writesize);
|
|
|
|
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
|
2012-06-25 18:07:45 +08:00
|
|
|
|
|
|
|
return 0;
|
2011-02-28 10:32:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
|
2012-05-03 01:14:55 +08:00
|
|
|
struct nand_chip *chip, uint8_t *buf, int oob_required,
|
|
|
|
int page)
|
2011-02-28 10:32:11 +08:00
|
|
|
{
|
2011-07-15 11:44:32 +08:00
|
|
|
struct pxa3xx_nand_host *host = mtd->priv;
|
|
|
|
struct pxa3xx_nand_info *info = host->info_data;
|
2011-02-28 10:32:11 +08:00
|
|
|
|
|
|
|
chip->read_buf(mtd, buf, mtd->writesize);
|
|
|
|
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
|
|
|
|
|
2013-11-15 05:25:39 +08:00
|
|
|
if (info->retcode == ERR_CORERR && info->use_ecc) {
|
|
|
|
mtd->ecc_stats.corrected += info->ecc_err_cnt;
|
|
|
|
|
|
|
|
} else if (info->retcode == ERR_UNCORERR) {
|
2011-02-28 10:32:11 +08:00
|
|
|
/*
|
|
|
|
* for blank page (all 0xff), HW will calculate its ECC as
|
|
|
|
* 0, which is different from the ECC information within
|
2013-11-15 05:25:39 +08:00
|
|
|
* OOB, ignore such uncorrectable errors
|
2011-02-28 10:32:11 +08:00
|
|
|
*/
|
|
|
|
if (is_buf_blank(buf, mtd->writesize))
|
2011-06-07 18:01:07 +08:00
|
|
|
info->retcode = ERR_NONE;
|
|
|
|
else
|
2011-02-28 10:32:11 +08:00
|
|
|
mtd->ecc_stats.failed++;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
2011-02-28 10:32:11 +08:00
|
|
|
|
2013-11-15 05:25:39 +08:00
|
|
|
return info->max_bitflips;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
|
|
|
|
{
|
2011-07-15 11:44:32 +08:00
|
|
|
struct pxa3xx_nand_host *host = mtd->priv;
|
|
|
|
struct pxa3xx_nand_info *info = host->info_data;
|
2008-02-14 15:48:23 +08:00
|
|
|
char retval = 0xFF;
|
|
|
|
|
|
|
|
if (info->buf_start < info->buf_count)
|
|
|
|
/* Has just send a new command? */
|
|
|
|
retval = info->data_buff[info->buf_start++];
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
|
|
|
|
{
|
2011-07-15 11:44:32 +08:00
|
|
|
struct pxa3xx_nand_host *host = mtd->priv;
|
|
|
|
struct pxa3xx_nand_info *info = host->info_data;
|
2008-02-14 15:48:23 +08:00
|
|
|
u16 retval = 0xFFFF;
|
|
|
|
|
|
|
|
if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
|
|
|
|
retval = *((u16 *)(info->data_buff+info->buf_start));
|
|
|
|
info->buf_start += 2;
|
|
|
|
}
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
|
|
|
|
{
|
2011-07-15 11:44:32 +08:00
|
|
|
struct pxa3xx_nand_host *host = mtd->priv;
|
|
|
|
struct pxa3xx_nand_info *info = host->info_data;
|
2008-02-14 15:48:23 +08:00
|
|
|
int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
|
|
|
|
|
|
|
|
memcpy(buf, info->data_buff + info->buf_start, real_len);
|
|
|
|
info->buf_start += real_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
|
|
|
|
const uint8_t *buf, int len)
|
|
|
|
{
|
2011-07-15 11:44:32 +08:00
|
|
|
struct pxa3xx_nand_host *host = mtd->priv;
|
|
|
|
struct pxa3xx_nand_info *info = host->info_data;
|
2008-02-14 15:48:23 +08:00
|
|
|
int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
|
|
|
|
|
|
|
|
memcpy(info->data_buff + info->buf_start, buf, real_len);
|
|
|
|
info->buf_start += real_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
|
|
|
|
{
|
2011-07-15 11:44:32 +08:00
|
|
|
struct pxa3xx_nand_host *host = mtd->priv;
|
|
|
|
struct pxa3xx_nand_info *info = host->info_data;
|
2013-11-15 05:25:26 +08:00
|
|
|
|
|
|
|
if (info->need_wait) {
|
|
|
|
info->need_wait = 0;
|
2015-02-02 00:55:37 +08:00
|
|
|
if (!wait_for_completion_timeout(&info->dev_ready,
|
|
|
|
CHIP_DELAY_TIMEOUT)) {
|
2013-11-15 05:25:26 +08:00
|
|
|
dev_err(&info->pdev->dev, "Ready time out!!!\n");
|
|
|
|
return NAND_STATUS_FAIL;
|
|
|
|
}
|
|
|
|
}
|
2008-02-14 15:48:23 +08:00
|
|
|
|
|
|
|
/* pxa3xx_nand_send_command has waited for command complete */
|
|
|
|
if (this->state == FL_WRITING || this->state == FL_ERASING) {
|
|
|
|
if (info->retcode == ERR_NONE)
|
|
|
|
return 0;
|
2013-11-15 05:25:26 +08:00
|
|
|
else
|
|
|
|
return NAND_STATUS_FAIL;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
|
|
|
|
2013-11-15 05:25:26 +08:00
|
|
|
return NAND_STATUS_READY;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
|
|
|
|
2015-11-05 00:13:42 +08:00
|
|
|
static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
|
2008-02-14 15:48:23 +08:00
|
|
|
{
|
|
|
|
struct platform_device *pdev = info->pdev;
|
2013-07-30 16:18:33 +08:00
|
|
|
struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2015-11-05 00:13:42 +08:00
|
|
|
/* Configure default flash values */
|
|
|
|
info->chunk_size = PAGE_CHUNK_SIZE;
|
2015-10-21 16:29:03 +08:00
|
|
|
info->reg_ndcr = 0x0; /* enable all interrupts */
|
|
|
|
info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
|
|
|
|
info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
|
2015-11-05 00:13:42 +08:00
|
|
|
info->reg_ndcr |= NDCR_SPARE_EN;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
|
|
|
|
{
|
|
|
|
struct pxa3xx_nand_host *host = info->host[info->cs];
|
|
|
|
struct mtd_info *mtd = host->mtd;
|
|
|
|
struct nand_chip *chip = mtd->priv;
|
|
|
|
|
2015-10-21 16:29:03 +08:00
|
|
|
info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
|
|
|
|
info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
|
|
|
|
info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
|
|
|
|
2009-02-17 19:54:47 +08:00
|
|
|
static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
|
|
|
|
{
|
2015-11-05 00:13:42 +08:00
|
|
|
struct platform_device *pdev = info->pdev;
|
|
|
|
struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
|
2009-02-17 19:54:47 +08:00
|
|
|
uint32_t ndcr = nand_readl(info, NDCR);
|
|
|
|
|
2013-11-15 05:25:37 +08:00
|
|
|
/* Set an initial chunk size */
|
2015-08-20 06:40:09 +08:00
|
|
|
info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
|
2015-09-29 04:56:51 +08:00
|
|
|
info->reg_ndcr = ndcr &
|
|
|
|
~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
|
2015-11-05 00:13:42 +08:00
|
|
|
info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
|
2013-08-13 01:14:55 +08:00
|
|
|
info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
|
|
|
|
info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
|
2009-02-17 19:54:47 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-02-14 15:48:23 +08:00
|
|
|
static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
|
|
|
|
{
|
|
|
|
struct platform_device *pdev = info->pdev;
|
2015-09-06 21:12:47 +08:00
|
|
|
struct dma_slave_config config;
|
|
|
|
dma_cap_mask_t mask;
|
|
|
|
struct pxad_param param;
|
|
|
|
int ret;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2015-09-06 21:12:47 +08:00
|
|
|
info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
|
|
|
|
if (info->data_buff == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
if (use_dma == 0)
|
2008-02-14 15:48:23 +08:00
|
|
|
return 0;
|
|
|
|
|
2015-09-06 21:12:47 +08:00
|
|
|
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2015-09-06 21:12:47 +08:00
|
|
|
sg_init_one(&info->sg, info->data_buff, info->buf_size);
|
|
|
|
dma_cap_zero(mask);
|
|
|
|
dma_cap_set(DMA_SLAVE, mask);
|
|
|
|
param.prio = PXAD_PRIO_LOWEST;
|
|
|
|
param.drcmr = info->drcmr_dat;
|
|
|
|
info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
|
|
|
|
¶m, &pdev->dev,
|
|
|
|
"data");
|
|
|
|
if (!info->dma_chan) {
|
|
|
|
dev_err(&pdev->dev, "unable to request data dma channel\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2015-09-06 21:12:47 +08:00
|
|
|
memset(&config, 0, sizeof(config));
|
|
|
|
config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
|
|
|
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
|
|
|
config.src_addr = info->mmio_phys + NDDB;
|
|
|
|
config.dst_addr = info->mmio_phys + NDDB;
|
|
|
|
config.src_maxburst = 32;
|
|
|
|
config.dst_maxburst = 32;
|
|
|
|
ret = dmaengine_slave_config(info->dma_chan, &config);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(&info->pdev->dev,
|
|
|
|
"dma channel configuration failed: %d\n",
|
|
|
|
ret);
|
|
|
|
return ret;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
|
|
|
|
2013-10-05 02:30:37 +08:00
|
|
|
/*
|
|
|
|
* Now that DMA buffers are allocated we turn on
|
|
|
|
* DMA proper for I/O operations.
|
|
|
|
*/
|
|
|
|
info->use_dma = 1;
|
2008-02-14 15:48:23 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-04-18 00:38:14 +08:00
|
|
|
static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
|
|
|
|
{
|
2013-12-10 20:57:15 +08:00
|
|
|
if (info->use_dma) {
|
2015-09-06 21:12:47 +08:00
|
|
|
dmaengine_terminate_all(info->dma_chan);
|
|
|
|
dma_release_channel(info->dma_chan);
|
2013-04-18 00:38:14 +08:00
|
|
|
}
|
2013-08-13 01:14:56 +08:00
|
|
|
kfree(info->data_buff);
|
|
|
|
}
|
2013-04-18 00:38:14 +08:00
|
|
|
|
2015-10-21 16:29:03 +08:00
|
|
|
static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
|
2011-02-28 10:32:14 +08:00
|
|
|
{
|
2015-10-21 16:29:03 +08:00
|
|
|
struct pxa3xx_nand_info *info = host->info_data;
|
2011-07-15 11:44:33 +08:00
|
|
|
struct mtd_info *mtd;
|
2013-11-07 23:17:15 +08:00
|
|
|
struct nand_chip *chip;
|
2015-10-21 16:29:03 +08:00
|
|
|
const struct nand_sdr_timings *timings;
|
2011-07-15 11:44:32 +08:00
|
|
|
int ret;
|
2013-11-07 23:17:15 +08:00
|
|
|
|
2011-07-15 11:44:33 +08:00
|
|
|
mtd = info->host[info->cs]->mtd;
|
2013-11-07 23:17:15 +08:00
|
|
|
chip = mtd->priv;
|
|
|
|
|
2011-02-28 10:32:14 +08:00
|
|
|
/* use the common timing to make a try */
|
2015-10-21 16:29:03 +08:00
|
|
|
timings = onfi_async_timing_mode_to_sdr_timings(0);
|
|
|
|
if (IS_ERR(timings))
|
|
|
|
return PTR_ERR(timings);
|
|
|
|
|
|
|
|
pxa3xx_nand_set_sdr_timing(host, timings);
|
2011-07-15 11:44:32 +08:00
|
|
|
|
2013-11-07 23:17:15 +08:00
|
|
|
chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
|
2013-11-15 05:25:27 +08:00
|
|
|
ret = chip->waitfunc(mtd, chip);
|
|
|
|
if (ret & NAND_STATUS_FAIL)
|
|
|
|
return -ENODEV;
|
2011-07-15 11:44:32 +08:00
|
|
|
|
2013-11-15 05:25:27 +08:00
|
|
|
return 0;
|
2011-02-28 10:32:14 +08:00
|
|
|
}
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2013-11-15 05:25:29 +08:00
|
|
|
static int pxa_ecc_init(struct pxa3xx_nand_info *info,
|
|
|
|
struct nand_ecc_ctrl *ecc,
|
2013-12-19 05:44:10 +08:00
|
|
|
int strength, int ecc_stepsize, int page_size)
|
2013-11-15 05:25:29 +08:00
|
|
|
{
|
2013-12-19 05:44:10 +08:00
|
|
|
if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
|
2013-11-15 05:25:37 +08:00
|
|
|
info->chunk_size = 2048;
|
2013-11-15 05:25:29 +08:00
|
|
|
info->spare_size = 40;
|
|
|
|
info->ecc_size = 24;
|
|
|
|
ecc->mode = NAND_ECC_HW;
|
|
|
|
ecc->size = 512;
|
|
|
|
ecc->strength = 1;
|
|
|
|
|
2013-12-19 05:44:10 +08:00
|
|
|
} else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
|
2013-11-15 05:25:37 +08:00
|
|
|
info->chunk_size = 512;
|
2013-11-15 05:25:29 +08:00
|
|
|
info->spare_size = 8;
|
|
|
|
info->ecc_size = 8;
|
|
|
|
ecc->mode = NAND_ECC_HW;
|
|
|
|
ecc->size = 512;
|
|
|
|
ecc->strength = 1;
|
|
|
|
|
2013-11-15 06:41:32 +08:00
|
|
|
/*
|
|
|
|
* Required ECC: 4-bit correction per 512 bytes
|
|
|
|
* Select: 16-bit correction per 2048 bytes
|
|
|
|
*/
|
2014-01-13 22:35:38 +08:00
|
|
|
} else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
|
|
|
|
info->ecc_bch = 1;
|
|
|
|
info->chunk_size = 2048;
|
|
|
|
info->spare_size = 32;
|
|
|
|
info->ecc_size = 32;
|
|
|
|
ecc->mode = NAND_ECC_HW;
|
|
|
|
ecc->size = info->chunk_size;
|
|
|
|
ecc->layout = &ecc_layout_2KB_bch4bit;
|
|
|
|
ecc->strength = 16;
|
|
|
|
|
2013-12-19 05:44:10 +08:00
|
|
|
} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
|
2013-11-15 05:25:37 +08:00
|
|
|
info->ecc_bch = 1;
|
|
|
|
info->chunk_size = 2048;
|
|
|
|
info->spare_size = 32;
|
|
|
|
info->ecc_size = 32;
|
|
|
|
ecc->mode = NAND_ECC_HW;
|
|
|
|
ecc->size = info->chunk_size;
|
|
|
|
ecc->layout = &ecc_layout_4KB_bch4bit;
|
|
|
|
ecc->strength = 16;
|
|
|
|
|
2013-11-15 06:41:32 +08:00
|
|
|
/*
|
|
|
|
* Required ECC: 8-bit correction per 512 bytes
|
|
|
|
* Select: 16-bit correction per 1024 bytes
|
|
|
|
*/
|
|
|
|
} else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
|
2013-11-15 05:25:37 +08:00
|
|
|
info->ecc_bch = 1;
|
|
|
|
info->chunk_size = 1024;
|
|
|
|
info->spare_size = 0;
|
|
|
|
info->ecc_size = 32;
|
|
|
|
ecc->mode = NAND_ECC_HW;
|
|
|
|
ecc->size = info->chunk_size;
|
|
|
|
ecc->layout = &ecc_layout_4KB_bch8bit;
|
|
|
|
ecc->strength = 16;
|
2014-05-15 01:58:07 +08:00
|
|
|
} else {
|
|
|
|
dev_err(&info->pdev->dev,
|
|
|
|
"ECC strength %d at page size %d is not supported\n",
|
|
|
|
strength, page_size);
|
|
|
|
return -ENODEV;
|
2013-11-15 05:25:37 +08:00
|
|
|
}
|
2014-05-15 01:58:07 +08:00
|
|
|
|
|
|
|
dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
|
|
|
|
ecc->strength, ecc->size);
|
2013-11-15 05:25:29 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-02-28 10:32:14 +08:00
|
|
|
static int pxa3xx_nand_scan(struct mtd_info *mtd)
|
2008-02-14 15:48:23 +08:00
|
|
|
{
|
2011-07-15 11:44:32 +08:00
|
|
|
struct pxa3xx_nand_host *host = mtd->priv;
|
|
|
|
struct pxa3xx_nand_info *info = host->info_data;
|
2011-02-28 10:32:14 +08:00
|
|
|
struct platform_device *pdev = info->pdev;
|
2013-07-30 16:18:33 +08:00
|
|
|
struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
|
2011-02-28 10:32:14 +08:00
|
|
|
struct nand_chip *chip = mtd->priv;
|
2015-10-21 16:29:03 +08:00
|
|
|
int ret;
|
2013-12-19 05:44:10 +08:00
|
|
|
uint16_t ecc_strength, ecc_step;
|
2011-02-28 10:32:14 +08:00
|
|
|
|
|
|
|
if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
|
2011-03-03 11:27:01 +08:00
|
|
|
goto KEEP_CONFIG;
|
2011-02-28 10:32:14 +08:00
|
|
|
|
2015-11-05 00:13:42 +08:00
|
|
|
ret = pxa3xx_nand_config_ident(info);
|
2015-10-21 16:29:03 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = pxa3xx_nand_sensing(host);
|
2011-07-15 11:44:32 +08:00
|
|
|
if (ret) {
|
2011-07-15 11:44:33 +08:00
|
|
|
dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
|
|
|
|
info->cs);
|
2011-02-28 10:32:14 +08:00
|
|
|
|
2011-07-15 11:44:32 +08:00
|
|
|
return ret;
|
2011-02-28 10:32:14 +08:00
|
|
|
}
|
|
|
|
|
2011-03-03 11:27:01 +08:00
|
|
|
KEEP_CONFIG:
|
2013-08-13 01:14:55 +08:00
|
|
|
if (info->reg_ndcr & NDCR_DWIDTH_M)
|
2011-07-15 11:44:32 +08:00
|
|
|
chip->options |= NAND_BUSWIDTH_16;
|
|
|
|
|
2013-11-15 05:25:29 +08:00
|
|
|
/* Device detection must be done with ECC disabled */
|
|
|
|
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
|
|
|
|
nand_writel(info, NDECCCTRL, 0x0);
|
|
|
|
|
2015-10-21 16:29:03 +08:00
|
|
|
if (nand_scan_ident(mtd, 1, NULL))
|
2011-03-03 11:27:01 +08:00
|
|
|
return -ENODEV;
|
2013-11-15 05:25:28 +08:00
|
|
|
|
2015-10-21 16:29:03 +08:00
|
|
|
if (!pdata->keep_config) {
|
|
|
|
ret = pxa3xx_nand_init(host);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
|
|
|
|
ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-15 05:25:28 +08:00
|
|
|
if (pdata->flash_bbt) {
|
|
|
|
/*
|
|
|
|
* We'll use a bad block table stored in-flash and don't
|
|
|
|
* allow writing the bad block marker to the flash.
|
|
|
|
*/
|
|
|
|
chip->bbt_options |= NAND_BBT_USE_FLASH |
|
|
|
|
NAND_BBT_NO_OOB_BBM;
|
|
|
|
chip->bbt_td = &bbt_main_descr;
|
|
|
|
chip->bbt_md = &bbt_mirror_descr;
|
|
|
|
}
|
|
|
|
|
mtd: nand: pxa3xx: Use extended cmdfunc() only if needed
Currently, we have two different cmdfunc's implementations:
one for PXA3xx SoC variant and one for Armada 370/XP SoC variant.
The former is the legacy one, typically constrained to devices
with page sizes smaller or equal to the controller's FIFO buffer.
On the other side, the latter _only_ supports the so-called extended
command semantics, which allow to handle devices with larger
page sizes (4 KiB, 8 KiB, ...).
This means we currently don't support devices with smaller pages on the
A370/XP SoC. Fix it by first renaming the cmdfuncs variants, and then
make the choice based on device page size (and SoC variant), rather than
SoC variant alone.
While at it, add a check for page size, to make sure we don't allow larger
pages sizes on the PXA3xx variant.
Tested-by: Arnaud Ebalard <arno@natisbad.org>
Signed-off-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2013-12-19 05:44:09 +08:00
|
|
|
/*
|
|
|
|
* If the page size is bigger than the FIFO size, let's check
|
|
|
|
* we are given the right variant and then switch to the extended
|
|
|
|
* (aka splitted) command handling,
|
|
|
|
*/
|
|
|
|
if (mtd->writesize > PAGE_CHUNK_SIZE) {
|
|
|
|
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
|
|
|
|
chip->cmdfunc = nand_cmdfunc_extended;
|
|
|
|
} else {
|
|
|
|
dev_err(&info->pdev->dev,
|
|
|
|
"unsupported page size on this variant\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-15 01:58:08 +08:00
|
|
|
if (pdata->ecc_strength && pdata->ecc_step_size) {
|
|
|
|
ecc_strength = pdata->ecc_strength;
|
|
|
|
ecc_step = pdata->ecc_step_size;
|
|
|
|
} else {
|
|
|
|
ecc_strength = chip->ecc_strength_ds;
|
|
|
|
ecc_step = chip->ecc_step_ds;
|
|
|
|
}
|
2013-12-19 05:44:10 +08:00
|
|
|
|
|
|
|
/* Set default ECC strength requirements on non-ONFI devices */
|
|
|
|
if (ecc_strength < 1 && ecc_step < 1) {
|
|
|
|
ecc_strength = 1;
|
|
|
|
ecc_step = 512;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
|
|
|
|
ecc_step, mtd->writesize);
|
2014-05-15 01:58:07 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2013-11-15 05:25:29 +08:00
|
|
|
|
2011-03-03 11:27:01 +08:00
|
|
|
/* calculate addressing information */
|
2011-07-15 11:44:32 +08:00
|
|
|
if (mtd->writesize >= 2048)
|
|
|
|
host->col_addr_cycles = 2;
|
|
|
|
else
|
|
|
|
host->col_addr_cycles = 1;
|
|
|
|
|
2013-10-05 02:30:38 +08:00
|
|
|
/* release the initial buffer */
|
|
|
|
kfree(info->data_buff);
|
|
|
|
|
|
|
|
/* allocate the real data + oob buffer */
|
|
|
|
info->buf_size = mtd->writesize + mtd->oobsize;
|
|
|
|
ret = pxa3xx_nand_init_buff(info);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2011-03-03 11:27:01 +08:00
|
|
|
info->oob_buff = info->data_buff + mtd->writesize;
|
2013-10-05 02:30:38 +08:00
|
|
|
|
2011-03-03 11:27:01 +08:00
|
|
|
if ((mtd->size >> chip->page_shift) > 65536)
|
2011-07-15 11:44:32 +08:00
|
|
|
host->row_addr_cycles = 3;
|
2011-03-03 11:27:01 +08:00
|
|
|
else
|
2011-07-15 11:44:32 +08:00
|
|
|
host->row_addr_cycles = 2;
|
2015-11-05 00:13:42 +08:00
|
|
|
|
|
|
|
if (!pdata->keep_config)
|
|
|
|
pxa3xx_nand_config_tail(info);
|
|
|
|
|
2011-02-28 10:32:14 +08:00
|
|
|
return nand_scan_tail(mtd);
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
|
|
|
|
2011-07-15 11:44:32 +08:00
|
|
|
static int alloc_nand_resource(struct platform_device *pdev)
|
2008-02-14 15:48:23 +08:00
|
|
|
{
|
2015-10-31 11:33:25 +08:00
|
|
|
struct device_node *np = pdev->dev.of_node;
|
2011-07-15 11:44:33 +08:00
|
|
|
struct pxa3xx_nand_platform_data *pdata;
|
2008-02-14 15:48:23 +08:00
|
|
|
struct pxa3xx_nand_info *info;
|
2011-07-15 11:44:32 +08:00
|
|
|
struct pxa3xx_nand_host *host;
|
2012-08-20 13:40:31 +08:00
|
|
|
struct nand_chip *chip = NULL;
|
2008-02-14 15:48:23 +08:00
|
|
|
struct mtd_info *mtd;
|
|
|
|
struct resource *r;
|
2011-07-15 11:44:33 +08:00
|
|
|
int ret, irq, cs;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2013-07-30 16:18:33 +08:00
|
|
|
pdata = dev_get_platdata(&pdev->dev);
|
2015-02-09 04:02:09 +08:00
|
|
|
if (pdata->num_cs <= 0)
|
|
|
|
return -ENODEV;
|
2013-04-18 00:38:09 +08:00
|
|
|
info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
|
|
|
|
sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
|
|
|
|
if (!info)
|
2011-07-15 11:44:32 +08:00
|
|
|
return -ENOMEM;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
|
|
|
info->pdev = pdev;
|
2013-11-07 23:17:14 +08:00
|
|
|
info->variant = pxa3xx_nand_get_variant(pdev);
|
2011-07-15 11:44:33 +08:00
|
|
|
for (cs = 0; cs < pdata->num_cs; cs++) {
|
2015-05-01 04:17:47 +08:00
|
|
|
mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
|
2011-07-15 11:44:33 +08:00
|
|
|
chip = (struct nand_chip *)(&mtd[1]);
|
|
|
|
host = (struct pxa3xx_nand_host *)chip;
|
|
|
|
info->host[cs] = host;
|
|
|
|
host->mtd = mtd;
|
|
|
|
host->cs = cs;
|
|
|
|
host->info_data = info;
|
|
|
|
mtd->priv = host;
|
2015-06-11 04:39:01 +08:00
|
|
|
mtd->dev.parent = &pdev->dev;
|
2015-10-31 11:33:25 +08:00
|
|
|
/* FIXME: all chips use the same device tree partitions */
|
|
|
|
nand_set_flash_node(chip, np);
|
2011-07-15 11:44:33 +08:00
|
|
|
|
|
|
|
chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
|
|
|
|
chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
|
|
|
|
chip->controller = &info->controller;
|
|
|
|
chip->waitfunc = pxa3xx_nand_waitfunc;
|
|
|
|
chip->select_chip = pxa3xx_nand_select_chip;
|
|
|
|
chip->read_word = pxa3xx_nand_read_word;
|
|
|
|
chip->read_byte = pxa3xx_nand_read_byte;
|
|
|
|
chip->read_buf = pxa3xx_nand_read_buf;
|
|
|
|
chip->write_buf = pxa3xx_nand_write_buf;
|
2013-11-07 23:17:12 +08:00
|
|
|
chip->options |= NAND_NO_SUBPAGE_WRITE;
|
mtd: nand: pxa3xx: Use extended cmdfunc() only if needed
Currently, we have two different cmdfunc's implementations:
one for PXA3xx SoC variant and one for Armada 370/XP SoC variant.
The former is the legacy one, typically constrained to devices
with page sizes smaller or equal to the controller's FIFO buffer.
On the other side, the latter _only_ supports the so-called extended
command semantics, which allow to handle devices with larger
page sizes (4 KiB, 8 KiB, ...).
This means we currently don't support devices with smaller pages on the
A370/XP SoC. Fix it by first renaming the cmdfuncs variants, and then
make the choice based on device page size (and SoC variant), rather than
SoC variant alone.
While at it, add a check for page size, to make sure we don't allow larger
pages sizes on the PXA3xx variant.
Tested-by: Arnaud Ebalard <arno@natisbad.org>
Signed-off-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2013-12-19 05:44:09 +08:00
|
|
|
chip->cmdfunc = nand_cmdfunc;
|
2011-07-15 11:44:33 +08:00
|
|
|
}
|
2011-02-28 10:32:14 +08:00
|
|
|
|
|
|
|
spin_lock_init(&chip->controller->lock);
|
|
|
|
init_waitqueue_head(&chip->controller->wq);
|
2013-04-18 00:38:11 +08:00
|
|
|
info->clk = devm_clk_get(&pdev->dev, NULL);
|
2008-02-14 15:48:23 +08:00
|
|
|
if (IS_ERR(info->clk)) {
|
|
|
|
dev_err(&pdev->dev, "failed to get nand clock\n");
|
2013-04-18 00:38:09 +08:00
|
|
|
return PTR_ERR(info->clk);
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
2013-04-18 00:38:13 +08:00
|
|
|
ret = clk_prepare_enable(info->clk);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2013-08-13 01:14:58 +08:00
|
|
|
if (use_dma) {
|
2015-09-06 21:12:47 +08:00
|
|
|
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
|
|
|
|
if (r == NULL) {
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"no resource defined for data DMA\n");
|
|
|
|
ret = -ENXIO;
|
|
|
|
goto fail_disable_clk;
|
2012-07-23 01:51:02 +08:00
|
|
|
}
|
2015-09-06 21:12:47 +08:00
|
|
|
info->drcmr_dat = r->start;
|
|
|
|
|
|
|
|
r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
|
|
|
|
if (r == NULL) {
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"no resource defined for cmd DMA\n");
|
|
|
|
ret = -ENXIO;
|
|
|
|
goto fail_disable_clk;
|
|
|
|
}
|
|
|
|
info->drcmr_cmd = r->start;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
irq = platform_get_irq(pdev, 0);
|
|
|
|
if (irq < 0) {
|
|
|
|
dev_err(&pdev->dev, "no IRQ resource defined\n");
|
|
|
|
ret = -ENXIO;
|
2013-04-18 00:38:11 +08:00
|
|
|
goto fail_disable_clk;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
2013-04-18 00:38:10 +08:00
|
|
|
info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
|
|
|
|
if (IS_ERR(info->mmio_base)) {
|
|
|
|
ret = PTR_ERR(info->mmio_base);
|
2013-04-18 00:38:11 +08:00
|
|
|
goto fail_disable_clk;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
2009-09-10 14:11:44 +08:00
|
|
|
info->mmio_phys = r->start;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2013-10-05 02:30:38 +08:00
|
|
|
/* Allocate a buffer to allow flash detection */
|
|
|
|
info->buf_size = INIT_BUFFER_SIZE;
|
|
|
|
info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
|
|
|
|
if (info->data_buff == NULL) {
|
|
|
|
ret = -ENOMEM;
|
2013-04-18 00:38:11 +08:00
|
|
|
goto fail_disable_clk;
|
2013-10-05 02:30:38 +08:00
|
|
|
}
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2009-09-10 14:27:23 +08:00
|
|
|
/* initialize all interrupts to be disabled */
|
|
|
|
disable_int(info, NDSR_MASK);
|
|
|
|
|
2015-02-21 02:36:43 +08:00
|
|
|
ret = request_threaded_irq(irq, pxa3xx_nand_irq,
|
|
|
|
pxa3xx_nand_irq_thread, IRQF_ONESHOT,
|
|
|
|
pdev->name, info);
|
2008-02-14 15:48:23 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(&pdev->dev, "failed to request IRQ\n");
|
|
|
|
goto fail_free_buf;
|
|
|
|
}
|
|
|
|
|
2011-03-03 11:08:30 +08:00
|
|
|
platform_set_drvdata(pdev, info);
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2011-07-15 11:44:32 +08:00
|
|
|
return 0;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
|
|
|
fail_free_buf:
|
2011-02-28 10:32:14 +08:00
|
|
|
free_irq(irq, info);
|
2013-10-05 02:30:38 +08:00
|
|
|
kfree(info->data_buff);
|
2013-04-18 00:38:11 +08:00
|
|
|
fail_disable_clk:
|
2013-04-18 00:38:12 +08:00
|
|
|
clk_disable_unprepare(info->clk);
|
2011-07-15 11:44:32 +08:00
|
|
|
return ret;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int pxa3xx_nand_remove(struct platform_device *pdev)
|
|
|
|
{
|
2011-03-03 11:08:30 +08:00
|
|
|
struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
|
2011-07-15 11:44:33 +08:00
|
|
|
struct pxa3xx_nand_platform_data *pdata;
|
|
|
|
int irq, cs;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2011-07-15 11:44:32 +08:00
|
|
|
if (!info)
|
|
|
|
return 0;
|
|
|
|
|
2013-07-30 16:18:33 +08:00
|
|
|
pdata = dev_get_platdata(&pdev->dev);
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2009-09-10 14:22:55 +08:00
|
|
|
irq = platform_get_irq(pdev, 0);
|
|
|
|
if (irq >= 0)
|
|
|
|
free_irq(irq, info);
|
2013-04-18 00:38:14 +08:00
|
|
|
pxa3xx_nand_free_buff(info);
|
2009-02-17 19:54:46 +08:00
|
|
|
|
2015-09-29 04:56:51 +08:00
|
|
|
/*
|
|
|
|
* In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
|
|
|
|
* In order to prevent a lockup of the system bus, the DFI bus
|
|
|
|
* arbitration is granted to SMC upon driver removal. This is done by
|
|
|
|
* setting the x_ARB_CNTL bit, which also prevents the NAND to have
|
|
|
|
* access to the bus anymore.
|
|
|
|
*/
|
|
|
|
nand_writel(info, NDCR,
|
|
|
|
(nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
|
|
|
|
NFCV1_NDCR_ARB_CNTL);
|
2013-04-18 00:38:12 +08:00
|
|
|
clk_disable_unprepare(info->clk);
|
2009-02-17 19:54:46 +08:00
|
|
|
|
2011-07-15 11:44:33 +08:00
|
|
|
for (cs = 0; cs < pdata->num_cs; cs++)
|
|
|
|
nand_release(info->host[cs]->mtd);
|
2008-02-14 15:48:23 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-07-23 01:51:02 +08:00
|
|
|
static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct pxa3xx_nand_platform_data *pdata;
|
|
|
|
struct device_node *np = pdev->dev.of_node;
|
|
|
|
const struct of_device_id *of_id =
|
|
|
|
of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
|
|
|
|
|
|
|
|
if (!of_id)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
|
|
|
|
if (!pdata)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
|
|
|
|
pdata->enable_arbiter = 1;
|
|
|
|
if (of_get_property(np, "marvell,nand-keep-config", NULL))
|
|
|
|
pdata->keep_config = 1;
|
|
|
|
of_property_read_u32(np, "num-cs", &pdata->num_cs);
|
2013-11-15 05:25:28 +08:00
|
|
|
pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
|
2012-07-23 01:51:02 +08:00
|
|
|
|
2014-05-15 01:58:08 +08:00
|
|
|
pdata->ecc_strength = of_get_nand_ecc_strength(np);
|
|
|
|
if (pdata->ecc_strength < 0)
|
|
|
|
pdata->ecc_strength = 0;
|
|
|
|
|
|
|
|
pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
|
|
|
|
if (pdata->ecc_step_size < 0)
|
|
|
|
pdata->ecc_step_size = 0;
|
|
|
|
|
2012-07-23 01:51:02 +08:00
|
|
|
pdev->dev.platform_data = pdata;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-03-03 11:08:30 +08:00
|
|
|
static int pxa3xx_nand_probe(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct pxa3xx_nand_platform_data *pdata;
|
|
|
|
struct pxa3xx_nand_info *info;
|
2015-09-06 21:12:47 +08:00
|
|
|
int ret, cs, probe_success, dma_available;
|
2011-03-03 11:08:30 +08:00
|
|
|
|
2015-09-06 21:12:47 +08:00
|
|
|
dma_available = IS_ENABLED(CONFIG_ARM) &&
|
|
|
|
(IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
|
|
|
|
if (use_dma && !dma_available) {
|
2013-08-13 01:14:56 +08:00
|
|
|
use_dma = 0;
|
|
|
|
dev_warn(&pdev->dev,
|
|
|
|
"This platform can't do DMA on this device\n");
|
|
|
|
}
|
2015-09-06 21:12:47 +08:00
|
|
|
|
2012-07-23 01:51:02 +08:00
|
|
|
ret = pxa3xx_nand_probe_dt(pdev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2013-07-30 16:18:33 +08:00
|
|
|
pdata = dev_get_platdata(&pdev->dev);
|
2011-03-03 11:08:30 +08:00
|
|
|
if (!pdata) {
|
|
|
|
dev_err(&pdev->dev, "no platform data defined\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2011-07-15 11:44:32 +08:00
|
|
|
ret = alloc_nand_resource(pdev);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&pdev->dev, "alloc nand resource failed\n");
|
|
|
|
return ret;
|
|
|
|
}
|
2011-03-03 11:08:30 +08:00
|
|
|
|
2011-07-15 11:44:32 +08:00
|
|
|
info = platform_get_drvdata(pdev);
|
2011-07-15 11:44:33 +08:00
|
|
|
probe_success = 0;
|
|
|
|
for (cs = 0; cs < pdata->num_cs; cs++) {
|
2013-08-13 01:14:52 +08:00
|
|
|
struct mtd_info *mtd = info->host[cs]->mtd;
|
2013-08-13 01:14:53 +08:00
|
|
|
|
2013-10-20 05:19:25 +08:00
|
|
|
/*
|
|
|
|
* The mtd name matches the one used in 'mtdparts' kernel
|
|
|
|
* parameter. This name cannot be changed or otherwise
|
|
|
|
* user's mtd partitions configuration would get broken.
|
|
|
|
*/
|
|
|
|
mtd->name = "pxa3xx_nand-0";
|
2011-07-15 11:44:33 +08:00
|
|
|
info->cs = cs;
|
2013-08-13 01:14:52 +08:00
|
|
|
ret = pxa3xx_nand_scan(mtd);
|
2011-07-15 11:44:33 +08:00
|
|
|
if (ret) {
|
|
|
|
dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
|
|
|
|
cs);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-10-31 11:33:25 +08:00
|
|
|
ret = mtd_device_register(mtd, pdata->parts[cs],
|
|
|
|
pdata->nr_parts[cs]);
|
2011-07-15 11:44:33 +08:00
|
|
|
if (!ret)
|
|
|
|
probe_success = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!probe_success) {
|
2011-03-03 11:08:30 +08:00
|
|
|
pxa3xx_nand_remove(pdev);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2011-07-15 11:44:33 +08:00
|
|
|
return 0;
|
2011-03-03 11:08:30 +08:00
|
|
|
}
|
|
|
|
|
2008-02-14 15:48:23 +08:00
|
|
|
#ifdef CONFIG_PM
|
2015-10-13 05:07:41 +08:00
|
|
|
static int pxa3xx_nand_suspend(struct device *dev)
|
2008-02-14 15:48:23 +08:00
|
|
|
{
|
2015-10-13 05:07:41 +08:00
|
|
|
struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2011-02-28 10:32:11 +08:00
|
|
|
if (info->state) {
|
2015-10-13 05:07:41 +08:00
|
|
|
dev_err(dev, "driver busy, state = %d\n", info->state);
|
2008-02-14 15:48:23 +08:00
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-13 05:07:41 +08:00
|
|
|
static int pxa3xx_nand_resume(struct device *dev)
|
2008-02-14 15:48:23 +08:00
|
|
|
{
|
2015-10-13 05:07:41 +08:00
|
|
|
struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
|
2011-07-15 11:44:30 +08:00
|
|
|
|
|
|
|
/* We don't want to handle interrupt without calling mtd routine */
|
|
|
|
disable_int(info, NDCR_INT_MASK);
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2011-07-15 11:44:33 +08:00
|
|
|
/*
|
|
|
|
* Directly set the chip select to a invalid value,
|
|
|
|
* then the driver would reset the timing according
|
|
|
|
* to current chip select at the beginning of cmdfunc
|
|
|
|
*/
|
|
|
|
info->cs = 0xff;
|
2008-02-14 15:48:23 +08:00
|
|
|
|
2011-07-15 11:44:30 +08:00
|
|
|
/*
|
|
|
|
* As the spec says, the NDSR would be updated to 0x1800 when
|
|
|
|
* doing the nand_clk disable/enable.
|
|
|
|
* To prevent it damaging state machine of the driver, clear
|
|
|
|
* all status before resume
|
|
|
|
*/
|
|
|
|
nand_writel(info, NDSR, NDSR_MASK);
|
2011-07-15 11:44:33 +08:00
|
|
|
|
2010-08-17 17:25:57 +08:00
|
|
|
return 0;
|
2008-02-14 15:48:23 +08:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define pxa3xx_nand_suspend NULL
|
|
|
|
#define pxa3xx_nand_resume NULL
|
|
|
|
#endif
|
|
|
|
|
2015-10-13 05:07:41 +08:00
|
|
|
static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
|
|
|
|
.suspend = pxa3xx_nand_suspend,
|
|
|
|
.resume = pxa3xx_nand_resume,
|
|
|
|
};
|
|
|
|
|
2008-02-14 15:48:23 +08:00
|
|
|
static struct platform_driver pxa3xx_nand_driver = {
|
|
|
|
.driver = {
|
|
|
|
.name = "pxa3xx-nand",
|
2013-09-30 17:40:24 +08:00
|
|
|
.of_match_table = pxa3xx_nand_dt_ids,
|
2015-10-13 05:07:41 +08:00
|
|
|
.pm = &pxa3xx_nand_pm_ops,
|
2008-02-14 15:48:23 +08:00
|
|
|
},
|
|
|
|
.probe = pxa3xx_nand_probe,
|
|
|
|
.remove = pxa3xx_nand_remove,
|
|
|
|
};
|
|
|
|
|
2011-11-27 20:45:03 +08:00
|
|
|
module_platform_driver(pxa3xx_nand_driver);
|
2008-02-14 15:48:23 +08:00
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_DESCRIPTION("PXA3xx NAND controller driver");
|