2018-12-06 18:41:20 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2014-02-24 18:37:37 +08:00
|
|
|
/*
|
2014-04-10 16:27:28 +08:00
|
|
|
* Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
|
|
|
|
* influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2005, Intec Automation Inc.
|
|
|
|
* Copyright (C) 2014, Freescale Semiconductor, Inc.
|
2014-02-24 18:37:37 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/math64.h>
|
mtd: spi-nor: scale up timeout for full-chip erase
This patch fixes timeout issues seen on large NOR flash (e.g., 16MB
w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The
input parameters matter because spi_nor_erase() uses a different code
path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7)
opcode.
Fix: use a different timeout for full-chip erase than for other
commands.
While most operations can be expected to perform relatively similarly
across a variety of NOR flash types and sizes (and therefore might as
well use a similar timeout to keep things simple), full-chip erase is
unique, because the time it typically takes to complete:
(1) is much larger than most operations and
(2) scales with the size of the flash.
Let's base our timeout on the original comments stuck here -- that a 2MB
flash requires max 40s to erase.
Small survey of a few flash datasheets I have lying around:
Chip Size (MB) Max chip erase (seconds)
---- -------- ------------------------
w25q32fw 4 50
w25q64cv 8 30
w25q64fw 8 100
w25q128fw 16 200
s25fl128s 16 ~256
s25fl256s 32 ~512
From this data, it seems plenty sufficient to say we need to wait for
40 seconds for each 2MB of flash.
After this change, it might make some sense to decrease the timeout for
everything else, as even the most extreme operations (single block
erase?) shouldn't take more than a handful of seconds. But for safety,
let's leave it as-is. It's only an error case, after all, so we don't
exactly need to optimize it.
Signed-off-by: Furquan Shaikh <furquan@google.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-19 05:59:17 +08:00
|
|
|
#include <linux/sizes.h>
|
2017-06-26 21:10:00 +08:00
|
|
|
#include <linux/slab.h>
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
#include <linux/mtd/mtd.h>
|
|
|
|
#include <linux/of_platform.h>
|
2019-08-06 13:10:40 +08:00
|
|
|
#include <linux/sched/task_stack.h>
|
2014-02-24 18:37:37 +08:00
|
|
|
#include <linux/spi/flash.h>
|
|
|
|
#include <linux/mtd/spi-nor.h>
|
|
|
|
|
2020-03-14 03:42:37 +08:00
|
|
|
#include "core.h"
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
/* Define max times to check status register before we give up. */
|
mtd: spi-nor: scale up timeout for full-chip erase
This patch fixes timeout issues seen on large NOR flash (e.g., 16MB
w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The
input parameters matter because spi_nor_erase() uses a different code
path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7)
opcode.
Fix: use a different timeout for full-chip erase than for other
commands.
While most operations can be expected to perform relatively similarly
across a variety of NOR flash types and sizes (and therefore might as
well use a similar timeout to keep things simple), full-chip erase is
unique, because the time it typically takes to complete:
(1) is much larger than most operations and
(2) scales with the size of the flash.
Let's base our timeout on the original comments stuck here -- that a 2MB
flash requires max 40s to erase.
Small survey of a few flash datasheets I have lying around:
Chip Size (MB) Max chip erase (seconds)
---- -------- ------------------------
w25q32fw 4 50
w25q64cv 8 30
w25q64fw 8 100
w25q128fw 16 200
s25fl128s 16 ~256
s25fl256s 32 ~512
From this data, it seems plenty sufficient to say we need to wait for
40 seconds for each 2MB of flash.
After this change, it might make some sense to decrease the timeout for
everything else, as even the most extreme operations (single block
erase?) shouldn't take more than a handful of seconds. But for safety,
let's leave it as-is. It's only an error case, after all, so we don't
exactly need to optimize it.
Signed-off-by: Furquan Shaikh <furquan@google.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-19 05:59:17 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For everything but full-chip erase; probably could be much smaller, but kept
|
|
|
|
* around for safety for now
|
|
|
|
*/
|
|
|
|
#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
|
|
|
|
* for larger flash
|
|
|
|
*/
|
|
|
|
#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2015-11-11 04:15:27 +08:00
|
|
|
#define SPI_NOR_MAX_ADDR_WIDTH 4
|
2014-11-06 11:24:33 +08:00
|
|
|
|
2020-10-05 23:31:35 +08:00
|
|
|
#define SPI_NOR_SRST_SLEEP_MIN 200
|
|
|
|
#define SPI_NOR_SRST_SLEEP_MAX 400
|
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_get_cmd_ext() - Get the command opcode extension based on the
|
|
|
|
* extension type.
|
|
|
|
* @nor: pointer to a 'struct spi_nor'
|
|
|
|
* @op: pointer to the 'struct spi_mem_op' whose properties
|
|
|
|
* need to be initialized.
|
|
|
|
*
|
|
|
|
* Right now, only "repeat" and "invert" are supported.
|
|
|
|
*
|
|
|
|
* Return: The opcode extension.
|
|
|
|
*/
|
|
|
|
static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor,
|
|
|
|
const struct spi_mem_op *op)
|
|
|
|
{
|
|
|
|
switch (nor->cmd_ext_type) {
|
|
|
|
case SPI_NOR_EXT_INVERT:
|
|
|
|
return ~op->cmd.opcode;
|
|
|
|
|
|
|
|
case SPI_NOR_EXT_REPEAT:
|
|
|
|
return op->cmd.opcode;
|
|
|
|
|
|
|
|
default:
|
|
|
|
dev_err(nor->dev, "Unknown command extension type\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op.
|
|
|
|
* @nor: pointer to a 'struct spi_nor'
|
|
|
|
* @op: pointer to the 'struct spi_mem_op' whose properties
|
|
|
|
* need to be initialized.
|
|
|
|
* @proto: the protocol from which the properties need to be set.
|
|
|
|
*/
|
|
|
|
void spi_nor_spimem_setup_op(const struct spi_nor *nor,
|
|
|
|
struct spi_mem_op *op,
|
|
|
|
const enum spi_nor_protocol proto)
|
|
|
|
{
|
|
|
|
u8 ext;
|
|
|
|
|
|
|
|
op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto);
|
|
|
|
|
|
|
|
if (op->addr.nbytes)
|
|
|
|
op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
|
|
|
|
|
|
|
|
if (op->dummy.nbytes)
|
|
|
|
op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
|
|
|
|
|
|
|
|
if (op->data.nbytes)
|
|
|
|
op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
|
|
|
|
|
|
|
|
if (spi_nor_protocol_is_dtr(proto)) {
|
|
|
|
/*
|
|
|
|
* SPIMEM supports mixed DTR modes, but right now we can only
|
|
|
|
* have all phases either DTR or STR. IOW, SPIMEM can have
|
|
|
|
* something like 4S-4D-4D, but SPI NOR can't. So, set all 4
|
|
|
|
* phases to either DTR or STR.
|
|
|
|
*/
|
|
|
|
op->cmd.dtr = true;
|
|
|
|
op->addr.dtr = true;
|
|
|
|
op->dummy.dtr = true;
|
|
|
|
op->data.dtr = true;
|
|
|
|
|
|
|
|
/* 2 bytes per clock cycle in DTR mode. */
|
|
|
|
op->dummy.nbytes *= 2;
|
|
|
|
|
|
|
|
ext = spi_nor_get_cmd_ext(nor, op);
|
|
|
|
op->cmd.opcode = (op->cmd.opcode << 8) | ext;
|
|
|
|
op->cmd.nbytes = 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
/**
|
2020-02-19 05:21:00 +08:00
|
|
|
* spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
|
|
|
|
* transfer
|
2019-08-06 13:10:40 +08:00
|
|
|
* @nor: pointer to 'struct spi_nor'
|
|
|
|
* @op: pointer to 'struct spi_mem_op' template for transfer
|
|
|
|
*
|
2020-02-19 05:21:00 +08:00
|
|
|
* If we have to use the bounce buffer, the data field in @op will be updated.
|
|
|
|
*
|
|
|
|
* Return: true if the bounce buffer is needed, false if not
|
2019-08-06 13:10:40 +08:00
|
|
|
*/
|
2020-02-19 05:21:00 +08:00
|
|
|
static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
|
2019-08-06 13:10:40 +08:00
|
|
|
{
|
2020-02-19 05:21:00 +08:00
|
|
|
/* op->data.buf.in occupies the same memory as op->data.buf.out */
|
|
|
|
if (object_is_on_stack(op->data.buf.in) ||
|
|
|
|
!virt_addr_valid(op->data.buf.in)) {
|
2019-08-06 13:10:40 +08:00
|
|
|
if (op->data.nbytes > nor->bouncebuf_size)
|
|
|
|
op->data.nbytes = nor->bouncebuf_size;
|
2020-02-19 05:21:00 +08:00
|
|
|
op->data.buf.in = nor->bouncebuf;
|
|
|
|
return true;
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2020-02-19 05:21:00 +08:00
|
|
|
return false;
|
|
|
|
}
|
2019-08-06 13:10:40 +08:00
|
|
|
|
2020-02-19 05:21:00 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_spimem_exec_op() - execute a memory operation
|
|
|
|
* @nor: pointer to 'struct spi_nor'
|
|
|
|
* @op: pointer to 'struct spi_mem_op' template for transfer
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -error otherwise.
|
|
|
|
*/
|
|
|
|
static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
|
|
|
|
{
|
|
|
|
int error;
|
2019-08-06 13:10:40 +08:00
|
|
|
|
2020-02-19 05:21:00 +08:00
|
|
|
error = spi_mem_adjust_op_size(nor->spimem, op);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2019-08-06 13:10:40 +08:00
|
|
|
|
2020-02-19 05:21:00 +08:00
|
|
|
return spi_mem_exec_op(nor->spimem, op);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2020-10-05 23:31:25 +08:00
|
|
|
static int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
|
|
|
|
u8 *buf, size_t len)
|
|
|
|
{
|
2020-10-05 23:31:26 +08:00
|
|
|
if (spi_nor_protocol_is_dtr(nor->reg_proto))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2020-10-05 23:31:25 +08:00
|
|
|
return nor->controller_ops->read_reg(nor, opcode, buf, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
|
|
|
|
const u8 *buf, size_t len)
|
|
|
|
{
|
2020-10-05 23:31:26 +08:00
|
|
|
if (spi_nor_protocol_is_dtr(nor->reg_proto))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2020-10-05 23:31:25 +08:00
|
|
|
return nor->controller_ops->write_reg(nor, opcode, buf, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
|
|
|
|
{
|
2020-10-05 23:31:26 +08:00
|
|
|
if (spi_nor_protocol_is_dtr(nor->write_proto))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2020-10-05 23:31:25 +08:00
|
|
|
return nor->controller_ops->erase(nor, offs);
|
|
|
|
}
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_spimem_read_data() - read data from flash's memory region via
|
|
|
|
* spi-mem
|
|
|
|
* @nor: pointer to 'struct spi_nor'
|
|
|
|
* @from: offset to read from
|
|
|
|
* @len: number of bytes to read
|
|
|
|
* @buf: pointer to dst buffer
|
|
|
|
*
|
|
|
|
* Return: number of bytes read successfully, -errno otherwise
|
|
|
|
*/
|
|
|
|
static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
|
|
|
|
size_t len, u8 *buf)
|
|
|
|
{
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
|
|
|
|
SPI_MEM_OP_ADDR(nor->addr_width, from, 0),
|
|
|
|
SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
|
|
|
|
SPI_MEM_OP_DATA_IN(len, buf, 0));
|
2020-02-19 05:21:00 +08:00
|
|
|
bool usebouncebuf;
|
2020-02-19 05:24:10 +08:00
|
|
|
ssize_t nbytes;
|
2020-02-19 05:21:00 +08:00
|
|
|
int error;
|
2019-08-06 13:10:40 +08:00
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->read_proto);
|
2019-08-06 13:10:40 +08:00
|
|
|
|
|
|
|
/* convert the dummy cycles to the number of bytes */
|
|
|
|
op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
|
2020-10-05 23:31:26 +08:00
|
|
|
if (spi_nor_protocol_is_dtr(nor->read_proto))
|
|
|
|
op.dummy.nbytes *= 2;
|
2019-08-06 13:10:40 +08:00
|
|
|
|
2020-02-19 05:21:00 +08:00
|
|
|
usebouncebuf = spi_nor_spimem_bounce(nor, &op);
|
|
|
|
|
2020-02-19 05:24:10 +08:00
|
|
|
if (nor->dirmap.rdesc) {
|
|
|
|
nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
|
|
|
|
op.data.nbytes, op.data.buf.in);
|
|
|
|
} else {
|
|
|
|
error = spi_nor_spimem_exec_op(nor, &op);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
nbytes = op.data.nbytes;
|
|
|
|
}
|
2020-02-19 05:21:00 +08:00
|
|
|
|
2020-02-19 05:24:10 +08:00
|
|
|
if (usebouncebuf && nbytes > 0)
|
|
|
|
memcpy(buf, op.data.buf.in, nbytes);
|
2020-02-19 05:21:00 +08:00
|
|
|
|
2020-02-19 05:24:10 +08:00
|
|
|
return nbytes;
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_read_data() - read data from flash memory
|
|
|
|
* @nor: pointer to 'struct spi_nor'
|
|
|
|
* @from: offset to read from
|
|
|
|
* @len: number of bytes to read
|
|
|
|
* @buf: pointer to dst buffer
|
|
|
|
*
|
|
|
|
* Return: number of bytes read successfully, -errno otherwise
|
|
|
|
*/
|
2020-03-14 03:42:37 +08:00
|
|
|
ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
|
2019-08-06 13:10:40 +08:00
|
|
|
{
|
|
|
|
if (nor->spimem)
|
|
|
|
return spi_nor_spimem_read_data(nor, from, len, buf);
|
|
|
|
|
2019-09-24 15:45:53 +08:00
|
|
|
return nor->controller_ops->read(nor, from, len, buf);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_spimem_write_data() - write data to flash memory via
|
|
|
|
* spi-mem
|
|
|
|
* @nor: pointer to 'struct spi_nor'
|
|
|
|
* @to: offset to write to
|
|
|
|
* @len: number of bytes to write
|
|
|
|
* @buf: pointer to src buffer
|
|
|
|
*
|
|
|
|
* Return: number of bytes written successfully, -errno otherwise
|
|
|
|
*/
|
|
|
|
static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
|
|
|
|
size_t len, const u8 *buf)
|
|
|
|
{
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
|
|
|
|
SPI_MEM_OP_ADDR(nor->addr_width, to, 0),
|
2019-08-06 13:10:40 +08:00
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP_DATA_OUT(len, buf, 0));
|
2020-02-19 05:24:10 +08:00
|
|
|
ssize_t nbytes;
|
2020-02-19 05:21:00 +08:00
|
|
|
int error;
|
2019-08-06 13:10:40 +08:00
|
|
|
|
|
|
|
if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
|
|
|
|
op.addr.nbytes = 0;
|
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
|
|
|
|
|
2020-02-19 05:21:00 +08:00
|
|
|
if (spi_nor_spimem_bounce(nor, &op))
|
|
|
|
memcpy(nor->bouncebuf, buf, op.data.nbytes);
|
|
|
|
|
2020-02-19 05:24:10 +08:00
|
|
|
if (nor->dirmap.wdesc) {
|
|
|
|
nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
|
|
|
|
op.data.nbytes, op.data.buf.out);
|
|
|
|
} else {
|
|
|
|
error = spi_nor_spimem_exec_op(nor, &op);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
nbytes = op.data.nbytes;
|
|
|
|
}
|
2020-02-19 05:21:00 +08:00
|
|
|
|
2020-02-19 05:24:10 +08:00
|
|
|
return nbytes;
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_write_data() - write data to flash memory
|
|
|
|
* @nor: pointer to 'struct spi_nor'
|
|
|
|
* @to: offset to write to
|
|
|
|
* @len: number of bytes to write
|
|
|
|
* @buf: pointer to src buffer
|
|
|
|
*
|
|
|
|
* Return: number of bytes written successfully, -errno otherwise
|
|
|
|
*/
|
2020-03-14 03:42:38 +08:00
|
|
|
ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
|
|
|
|
const u8 *buf)
|
2019-08-06 13:10:40 +08:00
|
|
|
{
|
|
|
|
if (nor->spimem)
|
|
|
|
return spi_nor_spimem_write_data(nor, to, len, buf);
|
|
|
|
|
2019-09-24 15:45:53 +08:00
|
|
|
return nor->controller_ops->write(nor, to, len, buf);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_write_enable() - Set write enable latch with Write Enable command.
|
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
2019-10-29 19:16:52 +08:00
|
|
|
*/
|
2020-03-14 03:42:38 +08:00
|
|
|
int spi_nor_write_enable(struct spi_nor *nor)
|
2019-10-29 19:16:52 +08:00
|
|
|
{
|
2019-11-02 19:23:27 +08:00
|
|
|
int ret;
|
|
|
|
|
2019-10-29 19:16:52 +08:00
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 0),
|
2019-10-29 19:16:52 +08:00
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
|
|
|
SPI_MEM_OP_NO_DATA);
|
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
2020-10-05 23:31:25 +08:00
|
|
|
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN,
|
|
|
|
NULL, 0);
|
2019-10-29 19:16:52 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
if (ret)
|
|
|
|
dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
|
|
|
|
|
|
|
|
return ret;
|
2019-10-29 19:16:52 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_write_disable() - Send Write Disable instruction to the chip.
|
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
2019-10-29 19:16:52 +08:00
|
|
|
*/
|
2020-03-14 03:42:38 +08:00
|
|
|
int spi_nor_write_disable(struct spi_nor *nor)
|
2019-10-29 19:16:52 +08:00
|
|
|
{
|
2019-11-02 19:23:27 +08:00
|
|
|
int ret;
|
|
|
|
|
2019-10-29 19:16:52 +08:00
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 0),
|
2019-10-29 19:16:52 +08:00
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
|
|
|
SPI_MEM_OP_NO_DATA);
|
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
2020-10-05 23:31:25 +08:00
|
|
|
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI,
|
|
|
|
NULL, 0);
|
2019-10-29 19:16:52 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
if (ret)
|
|
|
|
dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
|
|
|
|
|
|
|
|
return ret;
|
2019-10-29 19:16:52 +08:00
|
|
|
}
|
|
|
|
|
2019-10-24 20:55:34 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_read_sr() - Read the Status Register.
|
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
* @sr: pointer to a DMA-able buffer where the value of the
|
2020-10-05 23:31:30 +08:00
|
|
|
* Status Register will be written. Should be at least 2 bytes.
|
2019-10-24 20:55:34 +08:00
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
2014-02-24 18:37:37 +08:00
|
|
|
*/
|
mtd: spi-nor: keep lock bits if they are non-volatile
Traditionally, Linux unlocks the whole flash because there are legacy
devices which has the write protection bits set by default at startup.
If you actually want to use the flash protection bits, eg. because there
is a read-only part for a bootloader, this automatic unlocking is
harmful. If there is no hardware write protection in place (usually
called WP#), a startup of the kernel just discards this protection.
I've gone through the datasheets of all the flashes (except the Intel
ones where I could not find any datasheet nor reference) which supports
the unlocking feature and looked how the sector protection was
implemented. The currently supported flashes can be divided into the
following two categories:
(1) block protection bits are non-volatile. Thus they keep their values
at reset and power-cycle
(2) flashes where these bits are volatile. After reset or power-cycle,
the whole memory array is protected.
(a) some devices needs a special "Global Unprotect" command, eg.
the Atmel AT25DF041A.
(b) some devices require to clear the BPn bits in the status
register.
Due to the reasons above, we do not want to clear the bits for flashes
which belong to category (1). Fortunately for us, only Atmel flashes
fall into category (2a). Implement the "Global Protect" and "Global
Unprotect" commands for these. For (2b) we can use normal block
protection locking scheme.
This patch adds a new flag to indicate the case (2). Only if we have
such a flash we unlock the whole flash array. To be backwards compatible
it also introduces a kernel configuration option which restores the
complete legacy behavior ("Disable write protection on any flashes").
Hopefully, this will clean up "unlock the entire flash for legacy
devices" once and for all.
For reference here are the actually commits which introduced the legacy
behavior (and extended the behavior to other chip manufacturers):
commit f80e521c916cb ("mtd: m25p80: add support for the Intel/Numonyx {16,32,64}0S33B SPI flash chips")
commit ea60658a08f8f ("mtd: m25p80: disable SST software protection bits by default")
commit 7228982442365 ("[MTD] m25p80: fix bug - ATmel spi flash fails to be copied to")
Actually, this might also fix handling of the Atmel AT25DF flashes,
because the original commit 7228982442365 ("[MTD] m25p80: fix bug -
ATmel spi flash fails to be copied to") was writing a 0 to the status
register, which is a "Global Unprotect". This might not be the case in
the current code which only handles the block protection bits BP2, BP1
and BP0. Thus, it depends on the current contents of the status register
if this unlock actually corresponds to a "Global Unprotect" command. In
the worst case, the current code might leave the AT25DF flashes in a
write protected state.
The commit 191f5c2ed4b6f ("mtd: spi-nor: use 16-bit WRR command when QE
is set on spansion flashes") changed that behavior by just clearing BP2
to BP0 instead of writing a 0 to the status register.
Further, the commit 3e0930f109e76 ("mtd: spi-nor: Rework the disabling
of block write protection") expanded the unlock_all() feature to ANY
flash which supports locking.
Signed-off-by: Michael Walle <michael@walle.cc>
Signed-off-by: Vignesh Raghavendra <vigneshr@ti.com>
Reviewed-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Link: https://lore.kernel.org/r/20201203162959.29589-8-michael@walle.cc
2020-12-04 00:29:59 +08:00
|
|
|
int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 0),
|
2019-08-06 13:10:40 +08:00
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP_DATA_IN(1, sr, 0));
|
|
|
|
|
2020-10-05 23:31:29 +08:00
|
|
|
if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
|
|
|
|
op.addr.nbytes = nor->params->rdsr_addr_nbytes;
|
|
|
|
op.dummy.nbytes = nor->params->rdsr_dummy;
|
2020-10-05 23:31:30 +08:00
|
|
|
/*
|
|
|
|
* We don't want to read only one byte in DTR mode. So,
|
|
|
|
* read 2 and then discard the second byte.
|
|
|
|
*/
|
|
|
|
op.data.nbytes = 2;
|
2020-10-05 23:31:29 +08:00
|
|
|
}
|
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
2019-08-06 13:10:40 +08:00
|
|
|
|
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
2020-10-05 23:31:25 +08:00
|
|
|
ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr,
|
|
|
|
1);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-10-24 20:55:34 +08:00
|
|
|
if (ret)
|
2019-11-02 19:23:25 +08:00
|
|
|
dev_dbg(nor->dev, "error %d reading SR\n", ret);
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2019-10-24 20:55:34 +08:00
|
|
|
return ret;
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
2019-10-24 21:40:13 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_read_fsr() - Read the Flag Status Register.
|
|
|
|
* @nor: pointer to 'struct spi_nor'
|
|
|
|
* @fsr: pointer to a DMA-able buffer where the value of the
|
2020-10-05 23:31:30 +08:00
|
|
|
* Flag Status Register will be written. Should be at least 2
|
|
|
|
* bytes.
|
2019-10-24 21:40:13 +08:00
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
2014-04-29 23:29:51 +08:00
|
|
|
*/
|
2019-10-24 21:40:13 +08:00
|
|
|
static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
|
2014-04-29 23:29:51 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0),
|
2019-08-06 13:10:40 +08:00
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP_DATA_IN(1, fsr, 0));
|
|
|
|
|
2020-10-05 23:31:29 +08:00
|
|
|
if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
|
|
|
|
op.addr.nbytes = nor->params->rdsr_addr_nbytes;
|
|
|
|
op.dummy.nbytes = nor->params->rdsr_dummy;
|
2020-10-05 23:31:30 +08:00
|
|
|
/*
|
|
|
|
* We don't want to read only one byte in DTR mode. So,
|
|
|
|
* read 2 and then discard the second byte.
|
|
|
|
*/
|
|
|
|
op.data.nbytes = 2;
|
2020-10-05 23:31:29 +08:00
|
|
|
}
|
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
2019-08-06 13:10:40 +08:00
|
|
|
|
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
2020-10-05 23:31:25 +08:00
|
|
|
ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr,
|
|
|
|
1);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-10-24 21:40:13 +08:00
|
|
|
if (ret)
|
2019-11-02 19:23:25 +08:00
|
|
|
dev_dbg(nor->dev, "error %d reading FSR\n", ret);
|
2014-04-29 23:29:51 +08:00
|
|
|
|
2019-10-24 21:40:13 +08:00
|
|
|
return ret;
|
2014-04-29 23:29:51 +08:00
|
|
|
}
|
|
|
|
|
2019-10-24 21:59:55 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_read_cr() - Read the Configuration Register using the
|
|
|
|
* SPINOR_OP_RDCR (35h) command.
|
|
|
|
* @nor: pointer to 'struct spi_nor'
|
|
|
|
* @cr: pointer to a DMA-able buffer where the value of the
|
|
|
|
* Configuration Register will be written.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
2014-02-24 18:37:37 +08:00
|
|
|
*/
|
2021-01-21 19:05:46 +08:00
|
|
|
int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 0),
|
2019-08-06 13:10:40 +08:00
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP_DATA_IN(1, cr, 0));
|
|
|
|
|
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
2019-08-06 13:10:40 +08:00
|
|
|
|
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
2020-10-05 23:31:25 +08:00
|
|
|
ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr,
|
|
|
|
1);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-10-24 21:59:55 +08:00
|
|
|
if (ret)
|
2019-11-02 19:23:25 +08:00
|
|
|
dev_dbg(nor->dev, "error %d reading CR\n", ret);
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2019-10-24 21:59:55 +08:00
|
|
|
return ret;
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
2020-03-14 03:42:35 +08:00
|
|
|
* spi_nor_set_4byte_addr_mode() - Enter/Exit 4-byte address mode.
|
2019-11-02 19:23:37 +08:00
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
* @enable: true to enter the 4-byte address mode, false to exit the 4-byte
|
|
|
|
* address mode.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
2020-03-14 03:42:38 +08:00
|
|
|
int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
|
2019-08-06 13:10:40 +08:00
|
|
|
{
|
2019-11-02 19:23:27 +08:00
|
|
|
int ret;
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
|
|
|
|
SPINOR_OP_EN4B :
|
|
|
|
SPINOR_OP_EX4B,
|
2020-10-05 23:31:26 +08:00
|
|
|
0),
|
2019-08-06 13:10:40 +08:00
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
|
|
|
SPI_MEM_OP_NO_DATA);
|
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
2020-10-05 23:31:25 +08:00
|
|
|
ret = spi_nor_controller_ops_write_reg(nor,
|
|
|
|
enable ? SPINOR_OP_EN4B :
|
|
|
|
SPINOR_OP_EX4B,
|
|
|
|
NULL, 0);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
if (ret)
|
|
|
|
dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
|
|
|
|
|
|
|
|
return ret;
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
2020-03-14 03:42:35 +08:00
|
|
|
* spansion_set_4byte_addr_mode() - Set 4-byte address mode for Spansion
|
|
|
|
* flashes.
|
2019-11-02 19:23:37 +08:00
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
* @enable: true to enter the 4-byte address mode, false to exit the 4-byte
|
|
|
|
* address mode.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
2020-03-14 03:42:35 +08:00
|
|
|
static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
|
2019-08-06 13:10:40 +08:00
|
|
|
{
|
2019-11-02 19:23:27 +08:00
|
|
|
int ret;
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
nor->bouncebuf[0] = enable << 7;
|
|
|
|
|
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 0),
|
2019-08-06 13:10:40 +08:00
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0));
|
|
|
|
|
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
2019-08-06 13:10:40 +08:00
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
2020-10-05 23:31:25 +08:00
|
|
|
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR,
|
|
|
|
nor->bouncebuf, 1);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
if (ret)
|
|
|
|
dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
|
|
|
|
|
|
|
|
return ret;
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_write_ear() - Write Extended Address Register.
|
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
* @ear: value to write to the Extended Address Register.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
2020-03-14 03:42:38 +08:00
|
|
|
int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
|
2019-08-06 13:10:40 +08:00
|
|
|
{
|
2019-11-02 19:23:27 +08:00
|
|
|
int ret;
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
nor->bouncebuf[0] = ear;
|
|
|
|
|
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 0),
|
2019-08-06 13:10:40 +08:00
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0));
|
|
|
|
|
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
2019-08-06 13:10:40 +08:00
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
2020-10-05 23:31:25 +08:00
|
|
|
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREAR,
|
|
|
|
nor->bouncebuf, 1);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
if (ret)
|
|
|
|
dev_dbg(nor->dev, "error %d writing EAR\n", ret);
|
|
|
|
|
|
|
|
return ret;
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
|
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
* @sr: pointer to a DMA-able buffer where the value of the
|
|
|
|
* Status Register will be written.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
2020-03-14 03:42:38 +08:00
|
|
|
int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
|
2019-08-06 13:10:40 +08:00
|
|
|
{
|
2019-11-02 19:23:27 +08:00
|
|
|
int ret;
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 0),
|
2019-08-06 13:10:40 +08:00
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP_DATA_IN(1, sr, 0));
|
|
|
|
|
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
2019-08-06 13:10:40 +08:00
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
2020-10-05 23:31:25 +08:00
|
|
|
ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_XRDSR, sr,
|
|
|
|
1);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
if (ret)
|
|
|
|
dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
|
|
|
|
|
|
|
|
return ret;
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
2020-03-14 03:42:34 +08:00
|
|
|
* spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if
|
|
|
|
* the flash is ready for new commands.
|
2019-11-02 19:23:37 +08:00
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
*
|
2020-04-21 14:31:32 +08:00
|
|
|
* Return: 1 if ready, 0 if not ready, -errno on errors.
|
2019-11-02 19:23:37 +08:00
|
|
|
*/
|
2020-03-14 03:42:34 +08:00
|
|
|
static int spi_nor_xsr_ready(struct spi_nor *nor)
|
2016-12-02 19:31:44 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
ret = spi_nor_xread_sr(nor, nor->bouncebuf);
|
2019-11-02 19:23:27 +08:00
|
|
|
if (ret)
|
2016-12-02 19:31:44 +08:00
|
|
|
return ret;
|
|
|
|
|
2019-08-06 13:10:39 +08:00
|
|
|
return !!(nor->bouncebuf[0] & XSR_RDY);
|
2016-12-02 19:31:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_clear_sr() - Clear the Status Register.
|
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
*/
|
2019-11-02 19:23:32 +08:00
|
|
|
static void spi_nor_clear_sr(struct spi_nor *nor)
|
2019-08-06 13:10:40 +08:00
|
|
|
{
|
2019-11-02 19:23:27 +08:00
|
|
|
int ret;
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0),
|
2019-08-06 13:10:40 +08:00
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
|
|
|
SPI_MEM_OP_NO_DATA);
|
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
2020-10-05 23:31:25 +08:00
|
|
|
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR,
|
|
|
|
NULL, 0);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
if (ret)
|
|
|
|
dev_dbg(nor->dev, "error %d clearing SR\n", ret);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
|
|
|
|
* for new commands.
|
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
*
|
2020-04-21 14:31:32 +08:00
|
|
|
* Return: 1 if ready, 0 if not ready, -errno on errors.
|
2019-11-02 19:23:37 +08:00
|
|
|
*/
|
2018-12-06 18:41:16 +08:00
|
|
|
static int spi_nor_sr_ready(struct spi_nor *nor)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
2019-10-24 20:55:34 +08:00
|
|
|
int ret = spi_nor_read_sr(nor, nor->bouncebuf);
|
2017-07-17 23:54:07 +08:00
|
|
|
|
2019-10-24 20:55:34 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (nor->flags & SNOR_F_USE_CLSR &&
|
|
|
|
nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
|
|
|
|
if (nor->bouncebuf[0] & SR_E_ERR)
|
2017-07-17 23:54:07 +08:00
|
|
|
dev_err(nor->dev, "Erase Error occurred\n");
|
|
|
|
else
|
|
|
|
dev_err(nor->dev, "Programming Error occurred\n");
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
spi_nor_clear_sr(nor);
|
2020-03-24 01:50:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* WEL bit remains set to one when an erase or page program
|
|
|
|
* error occurs. Issue a Write Disable command to protect
|
|
|
|
* against inadvertent writes that can possibly corrupt the
|
|
|
|
* contents of the memory.
|
|
|
|
*/
|
|
|
|
ret = spi_nor_write_disable(nor);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-07-17 23:54:07 +08:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2019-10-24 20:55:34 +08:00
|
|
|
return !(nor->bouncebuf[0] & SR_WIP);
|
2014-09-10 15:26:16 +08:00
|
|
|
}
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_clear_fsr() - Clear the Flag Status Register.
|
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
*/
|
2019-11-02 19:23:32 +08:00
|
|
|
static void spi_nor_clear_fsr(struct spi_nor *nor)
|
2019-08-06 13:10:40 +08:00
|
|
|
{
|
2019-11-02 19:23:27 +08:00
|
|
|
int ret;
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0),
|
2019-08-06 13:10:40 +08:00
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
|
|
|
SPI_MEM_OP_NO_DATA);
|
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
2020-10-05 23:31:25 +08:00
|
|
|
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR,
|
|
|
|
NULL, 0);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
if (ret)
|
|
|
|
dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
|
|
|
|
* ready for new commands.
|
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
*
|
2020-04-21 14:31:32 +08:00
|
|
|
* Return: 1 if ready, 0 if not ready, -errno on errors.
|
2019-11-02 19:23:37 +08:00
|
|
|
*/
|
2018-12-06 18:41:16 +08:00
|
|
|
static int spi_nor_fsr_ready(struct spi_nor *nor)
|
2014-09-10 15:26:16 +08:00
|
|
|
{
|
2019-10-24 21:40:13 +08:00
|
|
|
int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-12-04 20:34:47 +08:00
|
|
|
|
2019-10-24 21:40:13 +08:00
|
|
|
if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
|
|
|
|
if (nor->bouncebuf[0] & FSR_E_ERR)
|
2017-12-04 20:34:47 +08:00
|
|
|
dev_err(nor->dev, "Erase operation failed.\n");
|
|
|
|
else
|
|
|
|
dev_err(nor->dev, "Program operation failed.\n");
|
|
|
|
|
2019-10-24 21:40:13 +08:00
|
|
|
if (nor->bouncebuf[0] & FSR_PT_ERR)
|
2017-12-04 20:34:47 +08:00
|
|
|
dev_err(nor->dev,
|
|
|
|
"Attempted to modify a protected sector.\n");
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
spi_nor_clear_fsr(nor);
|
2020-03-24 01:50:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* WEL bit remains set to one when an erase or page program
|
|
|
|
* error occurs. Issue a Write Disable command to protect
|
|
|
|
* against inadvertent writes that can possibly corrupt the
|
|
|
|
* contents of the memory.
|
|
|
|
*/
|
|
|
|
ret = spi_nor_write_disable(nor);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-12-04 20:34:47 +08:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2020-04-21 14:31:31 +08:00
|
|
|
return !!(nor->bouncebuf[0] & FSR_READY);
|
2014-09-10 15:26:16 +08:00
|
|
|
}
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_ready() - Query the flash to see if it is ready for new commands.
|
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
*
|
2020-04-21 14:31:32 +08:00
|
|
|
* Return: 1 if ready, 0 if not ready, -errno on errors.
|
2019-11-02 19:23:37 +08:00
|
|
|
*/
|
2014-09-10 15:26:16 +08:00
|
|
|
static int spi_nor_ready(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
int sr, fsr;
|
2016-12-02 19:31:44 +08:00
|
|
|
|
|
|
|
if (nor->flags & SNOR_F_READY_XSR_RDY)
|
2020-03-14 03:42:34 +08:00
|
|
|
sr = spi_nor_xsr_ready(nor);
|
2016-12-02 19:31:44 +08:00
|
|
|
else
|
|
|
|
sr = spi_nor_sr_ready(nor);
|
2014-09-10 15:26:16 +08:00
|
|
|
if (sr < 0)
|
|
|
|
return sr;
|
|
|
|
fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
|
|
|
|
if (fsr < 0)
|
|
|
|
return fsr;
|
|
|
|
return sr && fsr;
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_wait_till_ready_with_timeout() - Service routine to read the
|
|
|
|
* Status Register until ready, or timeout occurs.
|
|
|
|
* @nor: pointer to "struct spi_nor".
|
|
|
|
* @timeout_jiffies: jiffies to wait until timeout.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
2014-08-07 09:17:00 +08:00
|
|
|
*/
|
mtd: spi-nor: scale up timeout for full-chip erase
This patch fixes timeout issues seen on large NOR flash (e.g., 16MB
w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The
input parameters matter because spi_nor_erase() uses a different code
path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7)
opcode.
Fix: use a different timeout for full-chip erase than for other
commands.
While most operations can be expected to perform relatively similarly
across a variety of NOR flash types and sizes (and therefore might as
well use a similar timeout to keep things simple), full-chip erase is
unique, because the time it typically takes to complete:
(1) is much larger than most operations and
(2) scales with the size of the flash.
Let's base our timeout on the original comments stuck here -- that a 2MB
flash requires max 40s to erase.
Small survey of a few flash datasheets I have lying around:
Chip Size (MB) Max chip erase (seconds)
---- -------- ------------------------
w25q32fw 4 50
w25q64cv 8 30
w25q64fw 8 100
w25q128fw 16 200
s25fl128s 16 ~256
s25fl256s 32 ~512
From this data, it seems plenty sufficient to say we need to wait for
40 seconds for each 2MB of flash.
After this change, it might make some sense to decrease the timeout for
everything else, as even the most extreme operations (single block
erase?) shouldn't take more than a handful of seconds. But for safety,
let's leave it as-is. It's only an error case, after all, so we don't
exactly need to optimize it.
Signed-off-by: Furquan Shaikh <furquan@google.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-19 05:59:17 +08:00
|
|
|
static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
|
|
|
|
unsigned long timeout_jiffies)
|
2014-04-29 23:29:51 +08:00
|
|
|
{
|
|
|
|
unsigned long deadline;
|
2014-11-05 18:32:03 +08:00
|
|
|
int timeout = 0, ret;
|
2014-04-29 23:29:51 +08:00
|
|
|
|
mtd: spi-nor: scale up timeout for full-chip erase
This patch fixes timeout issues seen on large NOR flash (e.g., 16MB
w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The
input parameters matter because spi_nor_erase() uses a different code
path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7)
opcode.
Fix: use a different timeout for full-chip erase than for other
commands.
While most operations can be expected to perform relatively similarly
across a variety of NOR flash types and sizes (and therefore might as
well use a similar timeout to keep things simple), full-chip erase is
unique, because the time it typically takes to complete:
(1) is much larger than most operations and
(2) scales with the size of the flash.
Let's base our timeout on the original comments stuck here -- that a 2MB
flash requires max 40s to erase.
Small survey of a few flash datasheets I have lying around:
Chip Size (MB) Max chip erase (seconds)
---- -------- ------------------------
w25q32fw 4 50
w25q64cv 8 30
w25q64fw 8 100
w25q128fw 16 200
s25fl128s 16 ~256
s25fl256s 32 ~512
From this data, it seems plenty sufficient to say we need to wait for
40 seconds for each 2MB of flash.
After this change, it might make some sense to decrease the timeout for
everything else, as even the most extreme operations (single block
erase?) shouldn't take more than a handful of seconds. But for safety,
let's leave it as-is. It's only an error case, after all, so we don't
exactly need to optimize it.
Signed-off-by: Furquan Shaikh <furquan@google.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-19 05:59:17 +08:00
|
|
|
deadline = jiffies + timeout_jiffies;
|
2014-04-29 23:29:51 +08:00
|
|
|
|
2014-11-05 18:32:03 +08:00
|
|
|
while (!timeout) {
|
|
|
|
if (time_after_eq(jiffies, deadline))
|
|
|
|
timeout = 1;
|
2014-04-29 23:29:51 +08:00
|
|
|
|
2014-09-10 15:26:16 +08:00
|
|
|
ret = spi_nor_ready(nor);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (ret)
|
|
|
|
return 0;
|
2014-11-05 18:32:03 +08:00
|
|
|
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:25 +08:00
|
|
|
dev_dbg(nor->dev, "flash operation timed out\n");
|
2014-04-29 23:29:51 +08:00
|
|
|
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
|
|
|
|
* flash to be ready, or timeout occurs.
|
|
|
|
* @nor: pointer to "struct spi_nor".
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
2020-03-14 03:42:38 +08:00
|
|
|
int spi_nor_wait_till_ready(struct spi_nor *nor)
|
mtd: spi-nor: scale up timeout for full-chip erase
This patch fixes timeout issues seen on large NOR flash (e.g., 16MB
w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The
input parameters matter because spi_nor_erase() uses a different code
path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7)
opcode.
Fix: use a different timeout for full-chip erase than for other
commands.
While most operations can be expected to perform relatively similarly
across a variety of NOR flash types and sizes (and therefore might as
well use a similar timeout to keep things simple), full-chip erase is
unique, because the time it typically takes to complete:
(1) is much larger than most operations and
(2) scales with the size of the flash.
Let's base our timeout on the original comments stuck here -- that a 2MB
flash requires max 40s to erase.
Small survey of a few flash datasheets I have lying around:
Chip Size (MB) Max chip erase (seconds)
---- -------- ------------------------
w25q32fw 4 50
w25q64cv 8 30
w25q64fw 8 100
w25q128fw 16 200
s25fl128s 16 ~256
s25fl256s 32 ~512
From this data, it seems plenty sufficient to say we need to wait for
40 seconds for each 2MB of flash.
After this change, it might make some sense to decrease the timeout for
everything else, as even the most extreme operations (single block
erase?) shouldn't take more than a handful of seconds. But for safety,
let's leave it as-is. It's only an error case, after all, so we don't
exactly need to optimize it.
Signed-off-by: Furquan Shaikh <furquan@google.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-19 05:59:17 +08:00
|
|
|
{
|
|
|
|
return spi_nor_wait_till_ready_with_timeout(nor,
|
|
|
|
DEFAULT_READY_WAIT_JIFFIES);
|
|
|
|
}
|
|
|
|
|
2021-01-21 19:05:45 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_global_block_unlock() - Unlock Global Block Protection.
|
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
|
|
|
int spi_nor_global_block_unlock(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = spi_nor_write_enable(nor);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_GBULK, 0),
|
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
|
|
|
SPI_MEM_OP_NO_DATA);
|
|
|
|
|
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
|
|
|
|
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
|
|
|
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK,
|
|
|
|
NULL, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return spi_nor_wait_till_ready(nor);
|
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:35 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_write_sr() - Write the Status Register.
|
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
* @sr: pointer to DMA-able buffer to write to the Status Register.
|
|
|
|
* @len: number of bytes to write to the Status Register.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
2019-11-02 19:23:34 +08:00
|
|
|
*/
|
mtd: spi-nor: keep lock bits if they are non-volatile
Traditionally, Linux unlocks the whole flash because there are legacy
devices which has the write protection bits set by default at startup.
If you actually want to use the flash protection bits, eg. because there
is a read-only part for a bootloader, this automatic unlocking is
harmful. If there is no hardware write protection in place (usually
called WP#), a startup of the kernel just discards this protection.
I've gone through the datasheets of all the flashes (except the Intel
ones where I could not find any datasheet nor reference) which supports
the unlocking feature and looked how the sector protection was
implemented. The currently supported flashes can be divided into the
following two categories:
(1) block protection bits are non-volatile. Thus they keep their values
at reset and power-cycle
(2) flashes where these bits are volatile. After reset or power-cycle,
the whole memory array is protected.
(a) some devices needs a special "Global Unprotect" command, eg.
the Atmel AT25DF041A.
(b) some devices require to clear the BPn bits in the status
register.
Due to the reasons above, we do not want to clear the bits for flashes
which belong to category (1). Fortunately for us, only Atmel flashes
fall into category (2a). Implement the "Global Protect" and "Global
Unprotect" commands for these. For (2b) we can use normal block
protection locking scheme.
This patch adds a new flag to indicate the case (2). Only if we have
such a flash we unlock the whole flash array. To be backwards compatible
it also introduces a kernel configuration option which restores the
complete legacy behavior ("Disable write protection on any flashes").
Hopefully, this will clean up "unlock the entire flash for legacy
devices" once and for all.
For reference here are the actually commits which introduced the legacy
behavior (and extended the behavior to other chip manufacturers):
commit f80e521c916cb ("mtd: m25p80: add support for the Intel/Numonyx {16,32,64}0S33B SPI flash chips")
commit ea60658a08f8f ("mtd: m25p80: disable SST software protection bits by default")
commit 7228982442365 ("[MTD] m25p80: fix bug - ATmel spi flash fails to be copied to")
Actually, this might also fix handling of the Atmel AT25DF flashes,
because the original commit 7228982442365 ("[MTD] m25p80: fix bug -
ATmel spi flash fails to be copied to") was writing a 0 to the status
register, which is a "Global Unprotect". This might not be the case in
the current code which only handles the block protection bits BP2, BP1
and BP0. Thus, it depends on the current contents of the status register
if this unlock actually corresponds to a "Global Unprotect" command. In
the worst case, the current code might leave the AT25DF flashes in a
write protected state.
The commit 191f5c2ed4b6f ("mtd: spi-nor: use 16-bit WRR command when QE
is set on spansion flashes") changed that behavior by just clearing BP2
to BP0 instead of writing a 0 to the status register.
Further, the commit 3e0930f109e76 ("mtd: spi-nor: Rework the disabling
of block write protection") expanded the unlock_all() feature to ANY
flash which supports locking.
Signed-off-by: Michael Walle <michael@walle.cc>
Signed-off-by: Vignesh Raghavendra <vigneshr@ti.com>
Reviewed-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Link: https://lore.kernel.org/r/20201203162959.29589-8-michael@walle.cc
2020-12-04 00:29:59 +08:00
|
|
|
int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
|
2019-11-02 19:23:34 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = spi_nor_write_enable(nor);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 0),
|
2019-11-02 19:23:34 +08:00
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP_DATA_OUT(len, sr, 0));
|
|
|
|
|
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
2019-11-02 19:23:34 +08:00
|
|
|
|
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
2020-10-05 23:31:25 +08:00
|
|
|
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr,
|
|
|
|
len);
|
2019-11-02 19:23:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
dev_dbg(nor->dev, "error %d writing SR\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return spi_nor_wait_till_ready(nor);
|
|
|
|
}
|
|
|
|
|
2019-11-07 16:41:51 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
|
|
|
|
* ensure that the byte written match the received value.
|
|
|
|
* @nor: pointer to a 'struct spi_nor'.
|
|
|
|
* @sr1: byte value to be written to the Status Register.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
|
|
|
static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
|
2019-10-29 19:16:52 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2019-11-07 16:41:51 +08:00
|
|
|
nor->bouncebuf[0] = sr1;
|
2019-11-02 19:23:35 +08:00
|
|
|
|
|
|
|
ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
|
2019-10-29 19:16:52 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2019-10-24 20:55:34 +08:00
|
|
|
ret = spi_nor_read_sr(nor, nor->bouncebuf);
|
|
|
|
if (ret)
|
2019-10-29 19:16:52 +08:00
|
|
|
return ret;
|
|
|
|
|
2019-11-07 16:41:51 +08:00
|
|
|
if (nor->bouncebuf[0] != sr1) {
|
|
|
|
dev_dbg(nor->dev, "SR1: read back test failed\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
|
|
|
|
* Status Register 2 in one shot. Ensure that the byte written in the Status
|
|
|
|
* Register 1 match the received value, and that the 16-bit Write did not
|
|
|
|
* affect what was already in the Status Register 2.
|
|
|
|
* @nor: pointer to a 'struct spi_nor'.
|
|
|
|
* @sr1: byte value to be written to the Status Register 1.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
|
|
|
static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u8 *sr_cr = nor->bouncebuf;
|
|
|
|
u8 cr_written;
|
|
|
|
|
|
|
|
/* Make sure we don't overwrite the contents of Status Register 2. */
|
|
|
|
if (!(nor->flags & SNOR_F_NO_READ_CR)) {
|
|
|
|
ret = spi_nor_read_cr(nor, &sr_cr[1]);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2020-03-14 03:42:53 +08:00
|
|
|
} else if (nor->params->quad_enable) {
|
2019-11-07 16:41:51 +08:00
|
|
|
/*
|
|
|
|
* If the Status Register 2 Read command (35h) is not
|
|
|
|
* supported, we should at least be sure we don't
|
|
|
|
* change the value of the SR2 Quad Enable bit.
|
|
|
|
*
|
|
|
|
* We can safely assume that when the Quad Enable method is
|
|
|
|
* set, the value of the QE bit is one, as a consequence of the
|
2020-03-14 03:42:53 +08:00
|
|
|
* nor->params->quad_enable() call.
|
2019-11-07 16:41:51 +08:00
|
|
|
*
|
|
|
|
* We can safely assume that the Quad Enable bit is present in
|
|
|
|
* the Status Register 2 at BIT(1). According to the JESD216
|
|
|
|
* revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
|
|
|
|
* Write Status (01h) command is available just for the cases
|
|
|
|
* in which the QE bit is described in SR2 at BIT(1).
|
|
|
|
*/
|
2019-11-07 16:42:01 +08:00
|
|
|
sr_cr[1] = SR2_QUAD_EN_BIT1;
|
2019-11-07 16:41:51 +08:00
|
|
|
} else {
|
|
|
|
sr_cr[1] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
sr_cr[0] = sr1;
|
|
|
|
|
|
|
|
ret = spi_nor_write_sr(nor, sr_cr, 2);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (nor->flags & SNOR_F_NO_READ_CR)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cr_written = sr_cr[1];
|
|
|
|
|
|
|
|
ret = spi_nor_read_cr(nor, &sr_cr[1]);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (cr_written != sr_cr[1]) {
|
|
|
|
dev_dbg(nor->dev, "CR: read back test failed\n");
|
2019-11-02 19:23:44 +08:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2019-10-29 19:16:52 +08:00
|
|
|
}
|
|
|
|
|
2019-11-07 16:42:05 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
|
|
|
|
* Configuration Register in one shot. Ensure that the byte written in the
|
|
|
|
* Configuration Register match the received value, and that the 16-bit Write
|
|
|
|
* did not affect what was already in the Status Register 1.
|
|
|
|
* @nor: pointer to a 'struct spi_nor'.
|
|
|
|
* @cr: byte value to be written to the Configuration Register.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
2021-03-22 07:51:39 +08:00
|
|
|
int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
|
2019-11-07 16:42:05 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u8 *sr_cr = nor->bouncebuf;
|
|
|
|
u8 sr_written;
|
|
|
|
|
|
|
|
/* Keep the current value of the Status Register 1. */
|
|
|
|
ret = spi_nor_read_sr(nor, sr_cr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
sr_cr[1] = cr;
|
|
|
|
|
|
|
|
ret = spi_nor_write_sr(nor, sr_cr, 2);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
sr_written = sr_cr[0];
|
|
|
|
|
|
|
|
ret = spi_nor_read_sr(nor, sr_cr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (sr_written != sr_cr[0]) {
|
|
|
|
dev_dbg(nor->dev, "SR: Read back test failed\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nor->flags & SNOR_F_NO_READ_CR)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = spi_nor_read_cr(nor, &sr_cr[1]);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (cr != sr_cr[1]) {
|
|
|
|
dev_dbg(nor->dev, "CR: read back test failed\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-07 16:41:51 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
|
|
|
|
* the byte written match the received value without affecting other bits in the
|
|
|
|
* Status Register 1 and 2.
|
|
|
|
* @nor: pointer to a 'struct spi_nor'.
|
|
|
|
* @sr1: byte value to be written to the Status Register.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
2020-12-04 00:29:58 +08:00
|
|
|
int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
|
2019-11-07 16:41:51 +08:00
|
|
|
{
|
|
|
|
if (nor->flags & SNOR_F_HAS_16BIT_SR)
|
|
|
|
return spi_nor_write_16bit_sr_and_check(nor, sr1);
|
|
|
|
|
|
|
|
return spi_nor_write_sr1_and_check(nor, sr1);
|
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_write_sr2() - Write the Status Register 2 using the
|
|
|
|
* SPINOR_OP_WRSR2 (3eh) command.
|
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
* @sr2: pointer to DMA-able buffer to write to the Status Register 2.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
2019-10-26 18:34:00 +08:00
|
|
|
static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
|
2019-10-29 19:16:52 +08:00
|
|
|
{
|
2019-11-02 19:23:27 +08:00
|
|
|
int ret;
|
|
|
|
|
2019-11-02 19:23:34 +08:00
|
|
|
ret = spi_nor_write_enable(nor);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2019-10-29 19:16:52 +08:00
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 0),
|
2019-10-29 19:16:52 +08:00
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP_DATA_OUT(1, sr2, 0));
|
|
|
|
|
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
2019-10-29 19:16:52 +08:00
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
2020-10-05 23:31:25 +08:00
|
|
|
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2,
|
|
|
|
sr2, 1);
|
2019-10-29 19:16:52 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:34 +08:00
|
|
|
if (ret) {
|
2019-11-02 19:23:27 +08:00
|
|
|
dev_dbg(nor->dev, "error %d writing SR2\n", ret);
|
2019-11-02 19:23:34 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2019-11-02 19:23:27 +08:00
|
|
|
|
2019-11-02 19:23:34 +08:00
|
|
|
return spi_nor_wait_till_ready(nor);
|
2019-10-29 19:16:52 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_read_sr2() - Read the Status Register 2 using the
|
|
|
|
* SPINOR_OP_RDSR2 (3fh) command.
|
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
|
|
|
* @sr2: pointer to DMA-able buffer where the value of the
|
|
|
|
* Status Register 2 will be written.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
2019-10-29 19:16:52 +08:00
|
|
|
static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
|
|
|
|
{
|
2019-11-02 19:23:27 +08:00
|
|
|
int ret;
|
|
|
|
|
2019-10-29 19:16:52 +08:00
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 0),
|
2019-10-29 19:16:52 +08:00
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP_DATA_IN(1, sr2, 0));
|
|
|
|
|
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
2019-10-29 19:16:52 +08:00
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
2020-10-05 23:31:25 +08:00
|
|
|
ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2,
|
|
|
|
1);
|
2019-10-29 19:16:52 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
if (ret)
|
|
|
|
dev_dbg(nor->dev, "error %d reading SR2\n", ret);
|
|
|
|
|
|
|
|
return ret;
|
2019-10-29 19:16:52 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:37 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_erase_chip() - Erase the entire flash memory.
|
|
|
|
* @nor: pointer to 'struct spi_nor'.
|
2014-02-24 18:37:37 +08:00
|
|
|
*
|
2019-11-02 19:23:37 +08:00
|
|
|
* Return: 0 on success, -errno otherwise.
|
2014-02-24 18:37:37 +08:00
|
|
|
*/
|
2019-10-29 19:16:49 +08:00
|
|
|
static int spi_nor_erase_chip(struct spi_nor *nor)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
2019-11-02 19:23:27 +08:00
|
|
|
int ret;
|
|
|
|
|
2015-08-14 06:46:05 +08:00
|
|
|
dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 0),
|
2019-08-06 13:10:40 +08:00
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
|
|
|
SPI_MEM_OP_NO_DATA);
|
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
} else {
|
2020-10-05 23:31:25 +08:00
|
|
|
ret = spi_nor_controller_ops_write_reg(nor,
|
|
|
|
SPINOR_OP_CHIP_ERASE,
|
|
|
|
NULL, 0);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:27 +08:00
|
|
|
if (ret)
|
|
|
|
dev_dbg(nor->dev, "error %d erasing chip\n", ret);
|
|
|
|
|
|
|
|
return ret;
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
2019-10-29 19:16:52 +08:00
|
|
|
static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++)
|
|
|
|
if (table[i][0] == opcode)
|
|
|
|
return table[i][1];
|
|
|
|
|
|
|
|
/* No conversion found, keep input op code. */
|
|
|
|
return opcode;
|
|
|
|
}
|
|
|
|
|
2020-03-14 03:42:37 +08:00
|
|
|
u8 spi_nor_convert_3to4_read(u8 opcode)
|
2019-10-29 19:16:52 +08:00
|
|
|
{
|
|
|
|
static const u8 spi_nor_3to4_read[][2] = {
|
|
|
|
{ SPINOR_OP_READ, SPINOR_OP_READ_4B },
|
|
|
|
{ SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
|
|
|
|
{ SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
|
|
|
|
{ SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
|
|
|
|
{ SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
|
|
|
|
{ SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
|
|
|
|
{ SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
|
|
|
|
{ SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
|
|
|
|
|
|
|
|
{ SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
|
|
|
|
{ SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
|
|
|
|
{ SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
|
|
|
|
};
|
|
|
|
|
|
|
|
return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
|
|
|
|
ARRAY_SIZE(spi_nor_3to4_read));
|
|
|
|
}
|
|
|
|
|
|
|
|
static u8 spi_nor_convert_3to4_program(u8 opcode)
|
|
|
|
{
|
|
|
|
static const u8 spi_nor_3to4_program[][2] = {
|
|
|
|
{ SPINOR_OP_PP, SPINOR_OP_PP_4B },
|
|
|
|
{ SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
|
|
|
|
{ SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
|
|
|
|
{ SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
|
|
|
|
{ SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
|
|
|
|
};
|
|
|
|
|
|
|
|
return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
|
|
|
|
ARRAY_SIZE(spi_nor_3to4_program));
|
|
|
|
}
|
|
|
|
|
|
|
|
static u8 spi_nor_convert_3to4_erase(u8 opcode)
|
|
|
|
{
|
|
|
|
static const u8 spi_nor_3to4_erase[][2] = {
|
|
|
|
{ SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
|
|
|
|
{ SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
|
|
|
|
{ SPINOR_OP_SE, SPINOR_OP_SE_4B },
|
|
|
|
};
|
|
|
|
|
|
|
|
return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
|
|
|
|
ARRAY_SIZE(spi_nor_3to4_erase));
|
|
|
|
}
|
|
|
|
|
2020-03-14 03:42:53 +08:00
|
|
|
static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
return !!nor->params->erase_map.uniform_erase_type;
|
|
|
|
}
|
|
|
|
|
2019-10-29 19:16:52 +08:00
|
|
|
static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
|
|
|
|
nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
|
|
|
|
nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
|
|
|
|
|
|
|
|
if (!spi_nor_has_uniform_erase(nor)) {
|
2020-03-14 03:42:53 +08:00
|
|
|
struct spi_nor_erase_map *map = &nor->params->erase_map;
|
2019-10-29 19:16:52 +08:00
|
|
|
struct spi_nor_erase_type *erase;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
|
|
|
|
erase = &map->erase_type[i];
|
|
|
|
erase->opcode =
|
|
|
|
spi_nor_convert_3to4_erase(erase->opcode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-14 03:42:38 +08:00
|
|
|
int spi_nor_lock_and_prep(struct spi_nor *nor)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
mutex_lock(&nor->lock);
|
|
|
|
|
2019-09-24 15:45:53 +08:00
|
|
|
if (nor->controller_ops && nor->controller_ops->prepare) {
|
2020-01-14 06:32:48 +08:00
|
|
|
ret = nor->controller_ops->prepare(nor);
|
2014-02-24 18:37:37 +08:00
|
|
|
if (ret) {
|
|
|
|
mutex_unlock(&nor->lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-03-14 03:42:38 +08:00
|
|
|
void spi_nor_unlock_and_unprep(struct spi_nor *nor)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
2019-09-24 15:45:53 +08:00
|
|
|
if (nor->controller_ops && nor->controller_ops->unprepare)
|
2020-01-14 06:32:48 +08:00
|
|
|
nor->controller_ops->unprepare(nor);
|
2014-02-24 18:37:37 +08:00
|
|
|
mutex_unlock(&nor->lock);
|
|
|
|
}
|
|
|
|
|
2019-08-24 15:17:20 +08:00
|
|
|
static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
|
|
|
|
{
|
2020-03-14 03:42:53 +08:00
|
|
|
if (!nor->params->convert_addr)
|
2019-08-24 15:17:20 +08:00
|
|
|
return addr;
|
|
|
|
|
2020-03-14 03:42:53 +08:00
|
|
|
return nor->params->convert_addr(nor, addr);
|
2019-08-24 15:17:20 +08:00
|
|
|
}
|
|
|
|
|
2015-11-11 04:15:27 +08:00
|
|
|
/*
|
|
|
|
* Initiate the erasure of a single sector
|
|
|
|
*/
|
2021-06-07 19:27:44 +08:00
|
|
|
int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
|
2015-11-11 04:15:27 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2019-08-24 15:17:20 +08:00
|
|
|
addr = spi_nor_convert_addr(nor, addr);
|
2016-12-02 19:31:44 +08:00
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 0),
|
|
|
|
SPI_MEM_OP_ADDR(nor->addr_width, addr, 0),
|
2019-08-06 13:10:40 +08:00
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
|
|
|
SPI_MEM_OP_NO_DATA);
|
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
return spi_mem_exec_op(nor->spimem, &op);
|
2019-10-25 22:28:36 +08:00
|
|
|
} else if (nor->controller_ops->erase) {
|
2020-10-05 23:31:25 +08:00
|
|
|
return spi_nor_controller_ops_erase(nor, addr);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
|
|
|
|
2015-11-11 04:15:27 +08:00
|
|
|
/*
|
|
|
|
* Default implementation, if driver doesn't have a specialized HW
|
|
|
|
* control
|
|
|
|
*/
|
|
|
|
for (i = nor->addr_width - 1; i >= 0; i--) {
|
2019-08-06 13:10:39 +08:00
|
|
|
nor->bouncebuf[i] = addr & 0xff;
|
2015-11-11 04:15:27 +08:00
|
|
|
addr >>= 8;
|
|
|
|
}
|
|
|
|
|
2020-10-05 23:31:25 +08:00
|
|
|
return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
|
|
|
|
nor->bouncebuf, nor->addr_width);
|
2015-11-11 04:15:27 +08:00
|
|
|
}
|
|
|
|
|
2018-09-11 23:40:06 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_div_by_erase_size() - calculate remainder and update new dividend
|
|
|
|
* @erase: pointer to a structure that describes a SPI NOR erase type
|
|
|
|
* @dividend: dividend value
|
|
|
|
* @remainder: pointer to u32 remainder (will be updated)
|
|
|
|
*
|
|
|
|
* Return: the result of the division
|
|
|
|
*/
|
|
|
|
static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
|
|
|
|
u64 dividend, u32 *remainder)
|
|
|
|
{
|
|
|
|
/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
|
|
|
|
*remainder = (u32)dividend & erase->size_mask;
|
|
|
|
return dividend >> erase->size_shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_find_best_erase_type() - find the best erase type for the given
|
|
|
|
* offset in the serial flash memory and the
|
|
|
|
* number of bytes to erase. The region in
|
|
|
|
* which the address fits is expected to be
|
|
|
|
* provided.
|
|
|
|
* @map: the erase map of the SPI NOR
|
|
|
|
* @region: pointer to a structure that describes a SPI NOR erase region
|
|
|
|
* @addr: offset in the serial flash memory
|
|
|
|
* @len: number of bytes to erase
|
|
|
|
*
|
|
|
|
* Return: a pointer to the best fitted erase type, NULL otherwise.
|
|
|
|
*/
|
|
|
|
static const struct spi_nor_erase_type *
|
|
|
|
spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
|
|
|
|
const struct spi_nor_erase_region *region,
|
|
|
|
u64 addr, u32 len)
|
|
|
|
{
|
|
|
|
const struct spi_nor_erase_type *erase;
|
|
|
|
u32 rem;
|
|
|
|
int i;
|
|
|
|
u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
|
|
|
|
|
|
|
|
/*
|
2019-03-20 00:57:13 +08:00
|
|
|
* Erase types are ordered by size, with the smallest erase type at
|
2018-09-11 23:40:06 +08:00
|
|
|
* index 0.
|
|
|
|
*/
|
|
|
|
for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
|
|
|
|
/* Does the erase region support the tested erase type? */
|
|
|
|
if (!(erase_mask & BIT(i)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
erase = &map->erase_type[i];
|
|
|
|
|
2020-10-02 13:18:01 +08:00
|
|
|
/* Alignment is not mandatory for overlaid regions */
|
|
|
|
if (region->offset & SNOR_OVERLAID_REGION &&
|
|
|
|
region->size <= len)
|
|
|
|
return erase;
|
|
|
|
|
2018-09-11 23:40:06 +08:00
|
|
|
/* Don't erase more than what the user has asked for. */
|
|
|
|
if (erase->size > len)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
spi_nor_div_by_erase_size(erase, addr, &rem);
|
2021-06-18 17:33:31 +08:00
|
|
|
if (!rem)
|
2018-09-11 23:40:06 +08:00
|
|
|
return erase;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-03-14 03:42:53 +08:00
|
|
|
static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
|
|
|
|
{
|
|
|
|
return region->offset & SNOR_LAST_REGION;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
|
|
|
|
{
|
|
|
|
return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
|
|
|
|
}
|
|
|
|
|
2018-09-11 23:40:06 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_region_next() - get the next spi nor region
|
|
|
|
* @region: pointer to a structure that describes a SPI NOR erase region
|
|
|
|
*
|
|
|
|
* Return: the next spi nor region or NULL if last region.
|
|
|
|
*/
|
2020-03-14 03:42:37 +08:00
|
|
|
struct spi_nor_erase_region *
|
2018-09-11 23:40:06 +08:00
|
|
|
spi_nor_region_next(struct spi_nor_erase_region *region)
|
|
|
|
{
|
|
|
|
if (spi_nor_region_is_last(region))
|
|
|
|
return NULL;
|
|
|
|
region++;
|
|
|
|
return region;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_find_erase_region() - find the region of the serial flash memory in
|
|
|
|
* which the offset fits
|
|
|
|
* @map: the erase map of the SPI NOR
|
|
|
|
* @addr: offset in the serial flash memory
|
|
|
|
*
|
|
|
|
* Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
|
|
|
|
* otherwise.
|
|
|
|
*/
|
|
|
|
static struct spi_nor_erase_region *
|
|
|
|
spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
|
|
|
|
{
|
|
|
|
struct spi_nor_erase_region *region = map->regions;
|
|
|
|
u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
|
|
|
|
u64 region_end = region_start + region->size;
|
|
|
|
|
|
|
|
while (addr < region_start || addr >= region_end) {
|
|
|
|
region = spi_nor_region_next(region);
|
|
|
|
if (!region)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
|
|
|
|
region_end = region_start + region->size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return region;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_init_erase_cmd() - initialize an erase command
|
|
|
|
* @region: pointer to a structure that describes a SPI NOR erase region
|
|
|
|
* @erase: pointer to a structure that describes a SPI NOR erase type
|
|
|
|
*
|
|
|
|
* Return: the pointer to the allocated erase command, ERR_PTR(-errno)
|
|
|
|
* otherwise.
|
|
|
|
*/
|
|
|
|
static struct spi_nor_erase_command *
|
|
|
|
spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
|
|
|
|
const struct spi_nor_erase_type *erase)
|
|
|
|
{
|
|
|
|
struct spi_nor_erase_command *cmd;
|
|
|
|
|
|
|
|
cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
|
|
|
|
if (!cmd)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&cmd->list);
|
|
|
|
cmd->opcode = erase->opcode;
|
|
|
|
cmd->count = 1;
|
|
|
|
|
|
|
|
if (region->offset & SNOR_OVERLAID_REGION)
|
|
|
|
cmd->size = region->size;
|
|
|
|
else
|
|
|
|
cmd->size = erase->size;
|
|
|
|
|
|
|
|
return cmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_destroy_erase_cmd_list() - destroy erase command list
|
|
|
|
* @erase_list: list of erase commands
|
|
|
|
*/
|
|
|
|
static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
|
|
|
|
{
|
|
|
|
struct spi_nor_erase_command *cmd, *next;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(cmd, next, erase_list, list) {
|
|
|
|
list_del(&cmd->list);
|
|
|
|
kfree(cmd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_init_erase_cmd_list() - initialize erase command list
|
|
|
|
* @nor: pointer to a 'struct spi_nor'
|
|
|
|
* @erase_list: list of erase commands to be executed once we validate that the
|
|
|
|
* erase can be performed
|
|
|
|
* @addr: offset in the serial flash memory
|
|
|
|
* @len: number of bytes to erase
|
|
|
|
*
|
|
|
|
* Builds the list of best fitted erase commands and verifies if the erase can
|
|
|
|
* be performed.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
|
|
|
static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
|
|
|
|
struct list_head *erase_list,
|
|
|
|
u64 addr, u32 len)
|
|
|
|
{
|
2020-03-14 03:42:53 +08:00
|
|
|
const struct spi_nor_erase_map *map = &nor->params->erase_map;
|
2018-09-11 23:40:06 +08:00
|
|
|
const struct spi_nor_erase_type *erase, *prev_erase = NULL;
|
|
|
|
struct spi_nor_erase_region *region;
|
|
|
|
struct spi_nor_erase_command *cmd = NULL;
|
|
|
|
u64 region_end;
|
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
|
|
region = spi_nor_find_erase_region(map, addr);
|
|
|
|
if (IS_ERR(region))
|
|
|
|
return PTR_ERR(region);
|
|
|
|
|
|
|
|
region_end = spi_nor_region_end(region);
|
|
|
|
|
|
|
|
while (len) {
|
|
|
|
erase = spi_nor_find_best_erase_type(map, region, addr, len);
|
|
|
|
if (!erase)
|
|
|
|
goto destroy_erase_cmd_list;
|
|
|
|
|
|
|
|
if (prev_erase != erase ||
|
2020-10-02 13:18:02 +08:00
|
|
|
erase->size != cmd->size ||
|
2018-09-11 23:40:06 +08:00
|
|
|
region->offset & SNOR_OVERLAID_REGION) {
|
|
|
|
cmd = spi_nor_init_erase_cmd(region, erase);
|
|
|
|
if (IS_ERR(cmd)) {
|
|
|
|
ret = PTR_ERR(cmd);
|
|
|
|
goto destroy_erase_cmd_list;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add_tail(&cmd->list, erase_list);
|
|
|
|
} else {
|
|
|
|
cmd->count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr += cmd->size;
|
|
|
|
len -= cmd->size;
|
|
|
|
|
|
|
|
if (len && addr >= region_end) {
|
|
|
|
region = spi_nor_region_next(region);
|
|
|
|
if (!region)
|
|
|
|
goto destroy_erase_cmd_list;
|
|
|
|
region_end = spi_nor_region_end(region);
|
|
|
|
}
|
|
|
|
|
|
|
|
prev_erase = erase;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
destroy_erase_cmd_list:
|
|
|
|
spi_nor_destroy_erase_cmd_list(erase_list);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_erase_multi_sectors() - perform a non-uniform erase
|
|
|
|
* @nor: pointer to a 'struct spi_nor'
|
|
|
|
* @addr: offset in the serial flash memory
|
|
|
|
* @len: number of bytes to erase
|
|
|
|
*
|
|
|
|
* Build a list of best fitted erase commands and execute it once we validate
|
|
|
|
* that the erase can be performed.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
|
|
|
static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
|
|
|
|
{
|
|
|
|
LIST_HEAD(erase_list);
|
|
|
|
struct spi_nor_erase_command *cmd, *next;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(cmd, next, &erase_list, list) {
|
|
|
|
nor->erase_opcode = cmd->opcode;
|
|
|
|
while (cmd->count) {
|
2021-03-06 17:49:59 +08:00
|
|
|
dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n",
|
|
|
|
cmd->size, cmd->opcode, cmd->count);
|
|
|
|
|
2019-11-02 19:23:28 +08:00
|
|
|
ret = spi_nor_write_enable(nor);
|
|
|
|
if (ret)
|
|
|
|
goto destroy_erase_cmd_list;
|
2018-09-11 23:40:06 +08:00
|
|
|
|
|
|
|
ret = spi_nor_erase_sector(nor, addr);
|
|
|
|
if (ret)
|
|
|
|
goto destroy_erase_cmd_list;
|
|
|
|
|
|
|
|
ret = spi_nor_wait_till_ready(nor);
|
|
|
|
if (ret)
|
|
|
|
goto destroy_erase_cmd_list;
|
2021-03-06 17:49:58 +08:00
|
|
|
|
|
|
|
addr += cmd->size;
|
|
|
|
cmd->count--;
|
2018-09-11 23:40:06 +08:00
|
|
|
}
|
|
|
|
list_del(&cmd->list);
|
|
|
|
kfree(cmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
destroy_erase_cmd_list:
|
|
|
|
spi_nor_destroy_erase_cmd_list(&erase_list);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
/*
|
|
|
|
* Erase an address range on the nor chip. The address range may extend
|
2020-11-30 23:24:15 +08:00
|
|
|
* one or more erase sectors. Return an error if there is a problem erasing.
|
2014-02-24 18:37:37 +08:00
|
|
|
*/
|
|
|
|
static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
|
|
|
|
{
|
|
|
|
struct spi_nor *nor = mtd_to_spi_nor(mtd);
|
|
|
|
u32 addr, len;
|
|
|
|
uint32_t rem;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
|
|
|
|
(long long)instr->len);
|
|
|
|
|
2018-09-11 23:40:06 +08:00
|
|
|
if (spi_nor_has_uniform_erase(nor)) {
|
|
|
|
div_u64_rem(instr->len, mtd->erasesize, &rem);
|
|
|
|
if (rem)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
addr = instr->addr;
|
|
|
|
len = instr->len;
|
|
|
|
|
2020-01-14 06:32:48 +08:00
|
|
|
ret = spi_nor_lock_and_prep(nor);
|
2014-02-24 18:37:37 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* whole-chip erase? */
|
2016-12-02 19:31:44 +08:00
|
|
|
if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
|
mtd: spi-nor: scale up timeout for full-chip erase
This patch fixes timeout issues seen on large NOR flash (e.g., 16MB
w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The
input parameters matter because spi_nor_erase() uses a different code
path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7)
opcode.
Fix: use a different timeout for full-chip erase than for other
commands.
While most operations can be expected to perform relatively similarly
across a variety of NOR flash types and sizes (and therefore might as
well use a similar timeout to keep things simple), full-chip erase is
unique, because the time it typically takes to complete:
(1) is much larger than most operations and
(2) scales with the size of the flash.
Let's base our timeout on the original comments stuck here -- that a 2MB
flash requires max 40s to erase.
Small survey of a few flash datasheets I have lying around:
Chip Size (MB) Max chip erase (seconds)
---- -------- ------------------------
w25q32fw 4 50
w25q64cv 8 30
w25q64fw 8 100
w25q128fw 16 200
s25fl128s 16 ~256
s25fl256s 32 ~512
From this data, it seems plenty sufficient to say we need to wait for
40 seconds for each 2MB of flash.
After this change, it might make some sense to decrease the timeout for
everything else, as even the most extreme operations (single block
erase?) shouldn't take more than a handful of seconds. But for safety,
let's leave it as-is. It's only an error case, after all, so we don't
exactly need to optimize it.
Signed-off-by: Furquan Shaikh <furquan@google.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-19 05:59:17 +08:00
|
|
|
unsigned long timeout;
|
|
|
|
|
2019-11-02 19:23:28 +08:00
|
|
|
ret = spi_nor_write_enable(nor);
|
|
|
|
if (ret)
|
|
|
|
goto erase_err;
|
2014-11-05 18:29:03 +08:00
|
|
|
|
2019-10-29 19:16:59 +08:00
|
|
|
ret = spi_nor_erase_chip(nor);
|
|
|
|
if (ret)
|
2014-02-24 18:37:37 +08:00
|
|
|
goto erase_err;
|
|
|
|
|
mtd: spi-nor: scale up timeout for full-chip erase
This patch fixes timeout issues seen on large NOR flash (e.g., 16MB
w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The
input parameters matter because spi_nor_erase() uses a different code
path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7)
opcode.
Fix: use a different timeout for full-chip erase than for other
commands.
While most operations can be expected to perform relatively similarly
across a variety of NOR flash types and sizes (and therefore might as
well use a similar timeout to keep things simple), full-chip erase is
unique, because the time it typically takes to complete:
(1) is much larger than most operations and
(2) scales with the size of the flash.
Let's base our timeout on the original comments stuck here -- that a 2MB
flash requires max 40s to erase.
Small survey of a few flash datasheets I have lying around:
Chip Size (MB) Max chip erase (seconds)
---- -------- ------------------------
w25q32fw 4 50
w25q64cv 8 30
w25q64fw 8 100
w25q128fw 16 200
s25fl128s 16 ~256
s25fl256s 32 ~512
From this data, it seems plenty sufficient to say we need to wait for
40 seconds for each 2MB of flash.
After this change, it might make some sense to decrease the timeout for
everything else, as even the most extreme operations (single block
erase?) shouldn't take more than a handful of seconds. But for safety,
let's leave it as-is. It's only an error case, after all, so we don't
exactly need to optimize it.
Signed-off-by: Furquan Shaikh <furquan@google.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-19 05:59:17 +08:00
|
|
|
/*
|
|
|
|
* Scale the timeout linearly with the size of the flash, with
|
|
|
|
* a minimum calibrated to an old 2MB flash. We could try to
|
|
|
|
* pull these from CFI/SFDP, but these values should be good
|
|
|
|
* enough for now.
|
|
|
|
*/
|
|
|
|
timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
|
|
|
|
CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
|
|
|
|
(unsigned long)(mtd->size / SZ_2M));
|
|
|
|
ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
|
2014-08-07 09:16:57 +08:00
|
|
|
if (ret)
|
|
|
|
goto erase_err;
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
/* REVISIT in some cases we could speed up erasing large regions
|
2014-04-09 09:15:31 +08:00
|
|
|
* by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
|
2014-02-24 18:37:37 +08:00
|
|
|
* to use "small sector erase", but that's not always optimal.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* "sector"-at-a-time erase */
|
2018-09-11 23:40:06 +08:00
|
|
|
} else if (spi_nor_has_uniform_erase(nor)) {
|
2014-02-24 18:37:37 +08:00
|
|
|
while (len) {
|
2019-11-02 19:23:28 +08:00
|
|
|
ret = spi_nor_write_enable(nor);
|
|
|
|
if (ret)
|
|
|
|
goto erase_err;
|
2014-11-05 18:29:03 +08:00
|
|
|
|
2015-11-11 04:15:27 +08:00
|
|
|
ret = spi_nor_erase_sector(nor, addr);
|
|
|
|
if (ret)
|
2014-02-24 18:37:37 +08:00
|
|
|
goto erase_err;
|
|
|
|
|
2014-08-07 09:16:57 +08:00
|
|
|
ret = spi_nor_wait_till_ready(nor);
|
|
|
|
if (ret)
|
|
|
|
goto erase_err;
|
2021-03-06 17:49:58 +08:00
|
|
|
|
|
|
|
addr += mtd->erasesize;
|
|
|
|
len -= mtd->erasesize;
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
2018-09-11 23:40:06 +08:00
|
|
|
|
|
|
|
/* erase multiple sectors */
|
|
|
|
} else {
|
|
|
|
ret = spi_nor_erase_multi_sectors(nor, addr, len);
|
|
|
|
if (ret)
|
|
|
|
goto erase_err;
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
2019-11-02 19:23:28 +08:00
|
|
|
ret = spi_nor_write_disable(nor);
|
2014-11-05 18:29:03 +08:00
|
|
|
|
2015-11-18 03:18:54 +08:00
|
|
|
erase_err:
|
2020-01-14 06:32:48 +08:00
|
|
|
spi_nor_unlock_and_unprep(nor);
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-12-06 18:41:17 +08:00
|
|
|
/**
|
2020-09-04 15:47:20 +08:00
|
|
|
* spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
|
|
|
|
* Register 1.
|
2018-12-06 18:41:17 +08:00
|
|
|
* @nor: pointer to a 'struct spi_nor'
|
|
|
|
*
|
2019-11-07 16:42:09 +08:00
|
|
|
* Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
|
2018-12-06 18:41:17 +08:00
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
2020-09-04 15:47:20 +08:00
|
|
|
int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
|
2018-12-06 18:41:17 +08:00
|
|
|
{
|
2019-10-24 20:55:34 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = spi_nor_read_sr(nor, nor->bouncebuf);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-12-06 18:41:17 +08:00
|
|
|
|
2020-09-04 15:47:20 +08:00
|
|
|
if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
|
2018-12-06 18:41:17 +08:00
|
|
|
return 0;
|
|
|
|
|
2020-09-04 15:47:20 +08:00
|
|
|
nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
|
2019-11-02 19:23:35 +08:00
|
|
|
|
2019-11-07 16:41:58 +08:00
|
|
|
return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
|
2018-12-06 18:41:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2020-09-04 15:47:20 +08:00
|
|
|
* spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
|
|
|
|
* Register 2.
|
2019-11-07 16:42:05 +08:00
|
|
|
* @nor: pointer to a 'struct spi_nor'.
|
2018-12-06 18:41:17 +08:00
|
|
|
*
|
2019-11-07 16:42:05 +08:00
|
|
|
* Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
|
2018-12-06 18:41:17 +08:00
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
2020-09-04 15:47:20 +08:00
|
|
|
int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
|
2018-12-06 18:41:17 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2019-11-07 16:41:58 +08:00
|
|
|
|
2019-11-07 16:42:05 +08:00
|
|
|
if (nor->flags & SNOR_F_NO_READ_CR)
|
2020-09-04 15:47:20 +08:00
|
|
|
return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
|
2018-12-06 18:41:17 +08:00
|
|
|
|
2019-11-07 16:42:05 +08:00
|
|
|
ret = spi_nor_read_cr(nor, nor->bouncebuf);
|
2019-10-24 22:23:23 +08:00
|
|
|
if (ret)
|
2019-10-29 19:16:59 +08:00
|
|
|
return ret;
|
2018-12-06 18:41:17 +08:00
|
|
|
|
2020-09-04 15:47:20 +08:00
|
|
|
if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
|
2018-12-06 18:41:17 +08:00
|
|
|
return 0;
|
|
|
|
|
2020-09-04 15:47:20 +08:00
|
|
|
nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
|
2020-01-16 17:37:00 +08:00
|
|
|
|
2019-11-07 16:42:05 +08:00
|
|
|
return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
|
2018-12-06 18:41:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2020-09-04 15:47:20 +08:00
|
|
|
* spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
|
2018-12-06 18:41:17 +08:00
|
|
|
* @nor: pointer to a 'struct spi_nor'
|
|
|
|
*
|
|
|
|
* Set the Quad Enable (QE) bit in the Status Register 2.
|
|
|
|
*
|
|
|
|
* This is one of the procedures to set the QE bit described in the SFDP
|
|
|
|
* (JESD216 rev B) specification but no manufacturer using this procedure has
|
|
|
|
* been identified yet, hence the name of the function.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
2020-09-04 15:47:20 +08:00
|
|
|
int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
|
2018-12-06 18:41:17 +08:00
|
|
|
{
|
2019-08-06 13:10:39 +08:00
|
|
|
u8 *sr2 = nor->bouncebuf;
|
2018-12-06 18:41:17 +08:00
|
|
|
int ret;
|
2019-11-07 16:41:58 +08:00
|
|
|
u8 sr2_written;
|
2018-12-06 18:41:17 +08:00
|
|
|
|
|
|
|
/* Check current Quad Enable bit value. */
|
2019-08-06 13:10:40 +08:00
|
|
|
ret = spi_nor_read_sr2(nor, sr2);
|
2018-12-06 18:41:17 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2020-09-04 15:47:20 +08:00
|
|
|
if (*sr2 & SR2_QUAD_EN_BIT7)
|
2018-12-06 18:41:17 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Update the Quad Enable bit. */
|
2020-09-04 15:47:20 +08:00
|
|
|
*sr2 |= SR2_QUAD_EN_BIT7;
|
2018-12-06 18:41:17 +08:00
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
ret = spi_nor_write_sr2(nor, sr2);
|
2019-11-02 19:23:27 +08:00
|
|
|
if (ret)
|
2019-10-29 19:16:59 +08:00
|
|
|
return ret;
|
2018-12-06 18:41:17 +08:00
|
|
|
|
2019-11-07 16:41:58 +08:00
|
|
|
sr2_written = *sr2;
|
|
|
|
|
2018-12-06 18:41:17 +08:00
|
|
|
/* Read back and check it. */
|
2019-08-06 13:10:40 +08:00
|
|
|
ret = spi_nor_read_sr2(nor, sr2);
|
2019-10-29 19:16:54 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2019-11-07 16:41:58 +08:00
|
|
|
if (*sr2 != sr2_written) {
|
|
|
|
dev_dbg(nor->dev, "SR2: Read back test failed\n");
|
2019-11-02 19:23:41 +08:00
|
|
|
return -EIO;
|
2018-12-06 18:41:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2017-08-28 10:00:46 +08:00
|
|
|
|
2020-03-14 03:42:39 +08:00
|
|
|
static const struct spi_nor_manufacturer *manufacturers[] = {
|
|
|
|
&spi_nor_atmel,
|
2020-03-14 03:42:49 +08:00
|
|
|
&spi_nor_catalyst,
|
2020-03-14 03:42:40 +08:00
|
|
|
&spi_nor_eon,
|
2020-03-14 03:42:41 +08:00
|
|
|
&spi_nor_esmt,
|
2020-03-14 03:42:41 +08:00
|
|
|
&spi_nor_everspin,
|
2020-03-14 03:42:42 +08:00
|
|
|
&spi_nor_fujitsu,
|
2020-03-14 03:42:43 +08:00
|
|
|
&spi_nor_gigadevice,
|
2020-03-14 03:42:43 +08:00
|
|
|
&spi_nor_intel,
|
2020-03-14 03:42:44 +08:00
|
|
|
&spi_nor_issi,
|
2020-03-14 03:42:45 +08:00
|
|
|
&spi_nor_macronix,
|
2020-03-14 03:42:46 +08:00
|
|
|
&spi_nor_micron,
|
|
|
|
&spi_nor_st,
|
2020-03-14 03:42:46 +08:00
|
|
|
&spi_nor_spansion,
|
2020-03-14 03:42:47 +08:00
|
|
|
&spi_nor_sst,
|
2020-03-14 03:42:48 +08:00
|
|
|
&spi_nor_winbond,
|
2020-03-14 03:42:50 +08:00
|
|
|
&spi_nor_xilinx,
|
2020-03-14 03:42:50 +08:00
|
|
|
&spi_nor_xmc,
|
2020-03-14 03:42:39 +08:00
|
|
|
};
|
2020-03-14 03:42:39 +08:00
|
|
|
|
|
|
|
static const struct flash_info *
|
|
|
|
spi_nor_search_part_by_id(const struct flash_info *parts, unsigned int nparts,
|
|
|
|
const u8 *id)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nparts; i++) {
|
|
|
|
if (parts[i].id_len &&
|
|
|
|
!memcmp(parts[i].id, id, parts[i].id_len))
|
|
|
|
return &parts[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-08-11 03:39:03 +08:00
|
|
|
static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
2020-03-14 03:42:39 +08:00
|
|
|
const struct flash_info *info;
|
2020-02-24 01:37:13 +08:00
|
|
|
u8 *id = nor->bouncebuf;
|
|
|
|
unsigned int i;
|
|
|
|
int ret;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
if (nor->spimem) {
|
|
|
|
struct spi_mem_op op =
|
|
|
|
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
|
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
|
|
|
SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
|
|
|
|
|
2020-02-24 01:37:13 +08:00
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
2019-08-06 13:10:40 +08:00
|
|
|
} else {
|
2020-02-24 01:37:13 +08:00
|
|
|
ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
|
2019-09-24 15:45:53 +08:00
|
|
|
SPI_NOR_MAX_ID_LEN);
|
2019-08-06 13:10:40 +08:00
|
|
|
}
|
2020-02-24 01:37:13 +08:00
|
|
|
if (ret) {
|
|
|
|
dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
|
|
|
|
return ERR_PTR(ret);
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
2020-03-14 03:42:39 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
|
|
|
|
info = spi_nor_search_part_by_id(manufacturers[i]->parts,
|
|
|
|
manufacturers[i]->nparts,
|
|
|
|
id);
|
|
|
|
if (info) {
|
|
|
|
nor->manufacturer = manufacturers[i];
|
|
|
|
return info;
|
|
|
|
}
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
2020-03-14 03:42:39 +08:00
|
|
|
|
2019-03-12 18:45:49 +08:00
|
|
|
dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
|
|
|
|
SPI_NOR_MAX_ID_LEN, id);
|
2014-02-24 18:37:37 +08:00
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
|
|
|
|
size_t *retlen, u_char *buf)
|
|
|
|
{
|
|
|
|
struct spi_nor *nor = mtd_to_spi_nor(mtd);
|
2019-10-31 02:48:59 +08:00
|
|
|
ssize_t ret;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
|
|
|
|
|
2020-01-14 06:32:48 +08:00
|
|
|
ret = spi_nor_lock_and_prep(nor);
|
2014-02-24 18:37:37 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2016-05-06 08:31:55 +08:00
|
|
|
while (len) {
|
2016-12-02 19:31:44 +08:00
|
|
|
loff_t addr = from;
|
|
|
|
|
2019-08-24 15:17:20 +08:00
|
|
|
addr = spi_nor_convert_addr(nor, addr);
|
2016-12-02 19:31:44 +08:00
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
ret = spi_nor_read_data(nor, addr, len, buf);
|
2016-05-06 08:31:55 +08:00
|
|
|
if (ret == 0) {
|
|
|
|
/* We shouldn't see 0-length reads */
|
|
|
|
ret = -EIO;
|
|
|
|
goto read_err;
|
|
|
|
}
|
|
|
|
if (ret < 0)
|
|
|
|
goto read_err;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2016-05-06 08:31:55 +08:00
|
|
|
WARN_ON(ret > len);
|
|
|
|
*retlen += ret;
|
|
|
|
buf += ret;
|
|
|
|
from += ret;
|
|
|
|
len -= ret;
|
|
|
|
}
|
|
|
|
ret = 0;
|
2016-05-06 08:31:47 +08:00
|
|
|
|
2016-05-06 08:31:55 +08:00
|
|
|
read_err:
|
2020-01-14 06:32:48 +08:00
|
|
|
spi_nor_unlock_and_unprep(nor);
|
2016-05-06 08:31:55 +08:00
|
|
|
return ret;
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write an address range to the nor chip. Data must be written in
|
|
|
|
* FLASH_PAGESIZE chunks. The address range may be any size provided
|
|
|
|
* it is within the physical boundaries.
|
|
|
|
*/
|
|
|
|
static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
|
|
|
|
size_t *retlen, const u_char *buf)
|
|
|
|
{
|
|
|
|
struct spi_nor *nor = mtd_to_spi_nor(mtd);
|
2016-05-06 08:31:54 +08:00
|
|
|
size_t page_offset, page_remain, i;
|
|
|
|
ssize_t ret;
|
2021-10-30 01:26:12 +08:00
|
|
|
u32 page_size = nor->params->page_size;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
|
|
|
|
|
2020-01-14 06:32:48 +08:00
|
|
|
ret = spi_nor_lock_and_prep(nor);
|
2014-02-24 18:37:37 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2016-05-06 08:31:54 +08:00
|
|
|
for (i = 0; i < len; ) {
|
|
|
|
ssize_t written;
|
2016-12-02 19:31:44 +08:00
|
|
|
loff_t addr = to + i;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2016-12-02 19:31:44 +08:00
|
|
|
/*
|
|
|
|
* If page_size is a power of two, the offset can be quickly
|
|
|
|
* calculated with an AND operation. On the other cases we
|
|
|
|
* need to do a modulus operation (more expensive).
|
|
|
|
*/
|
2021-10-30 01:26:12 +08:00
|
|
|
if (is_power_of_2(page_size)) {
|
|
|
|
page_offset = addr & (page_size - 1);
|
2016-12-02 19:31:44 +08:00
|
|
|
} else {
|
|
|
|
uint64_t aux = addr;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2021-10-30 01:26:12 +08:00
|
|
|
page_offset = do_div(aux, page_size);
|
2016-12-02 19:31:44 +08:00
|
|
|
}
|
2014-02-24 18:37:37 +08:00
|
|
|
/* the size of data remaining on the first page */
|
2021-10-30 01:26:12 +08:00
|
|
|
page_remain = min_t(size_t, page_size - page_offset, len - i);
|
2016-05-06 08:31:54 +08:00
|
|
|
|
2019-08-24 15:17:20 +08:00
|
|
|
addr = spi_nor_convert_addr(nor, addr);
|
2016-12-02 19:31:44 +08:00
|
|
|
|
2019-11-02 19:23:28 +08:00
|
|
|
ret = spi_nor_write_enable(nor);
|
|
|
|
if (ret)
|
|
|
|
goto write_err;
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
|
2016-05-06 08:31:52 +08:00
|
|
|
if (ret < 0)
|
|
|
|
goto write_err;
|
2016-05-06 08:31:54 +08:00
|
|
|
written = ret;
|
2014-08-07 09:16:56 +08:00
|
|
|
|
2016-05-06 08:31:54 +08:00
|
|
|
ret = spi_nor_wait_till_ready(nor);
|
|
|
|
if (ret)
|
|
|
|
goto write_err;
|
|
|
|
*retlen += written;
|
|
|
|
i += written;
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
write_err:
|
2020-01-14 06:32:48 +08:00
|
|
|
spi_nor_unlock_and_unprep(nor);
|
2014-08-07 09:16:56 +08:00
|
|
|
return ret;
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
2018-12-06 18:41:17 +08:00
|
|
|
static int spi_nor_check(struct spi_nor *nor)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
2019-08-06 13:10:40 +08:00
|
|
|
if (!nor->dev ||
|
2019-10-25 22:28:34 +08:00
|
|
|
(!nor->spimem && !nor->controller_ops) ||
|
2019-09-24 15:45:53 +08:00
|
|
|
(!nor->spimem && nor->controller_ops &&
|
|
|
|
(!nor->controller_ops->read ||
|
|
|
|
!nor->controller_ops->write ||
|
|
|
|
!nor->controller_ops->read_reg ||
|
|
|
|
!nor->controller_ops->write_reg))) {
|
2014-02-24 18:37:37 +08:00
|
|
|
pr_err("spi-nor: please fill all the necessary fields!\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-10-25 22:28:34 +08:00
|
|
|
if (nor->spimem && nor->controller_ops) {
|
|
|
|
dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-05 23:31:28 +08:00
|
|
|
void
|
2017-04-26 04:08:46 +08:00
|
|
|
spi_nor_set_read_settings(struct spi_nor_read_command *read,
|
|
|
|
u8 num_mode_clocks,
|
|
|
|
u8 num_wait_states,
|
|
|
|
u8 opcode,
|
|
|
|
enum spi_nor_protocol proto)
|
|
|
|
{
|
|
|
|
read->num_mode_clocks = num_mode_clocks;
|
|
|
|
read->num_wait_states = num_wait_states;
|
|
|
|
read->opcode = opcode;
|
|
|
|
read->proto = proto;
|
|
|
|
}
|
|
|
|
|
2020-03-14 03:42:37 +08:00
|
|
|
void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
|
|
|
|
enum spi_nor_protocol proto)
|
2017-04-26 04:08:46 +08:00
|
|
|
{
|
|
|
|
pp->opcode = opcode;
|
|
|
|
pp->proto = proto;
|
|
|
|
}
|
|
|
|
|
2018-12-06 18:41:17 +08:00
|
|
|
static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++)
|
|
|
|
if (table[i][0] == (int)hwcaps)
|
|
|
|
return table[i][1];
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-03-14 03:42:37 +08:00
|
|
|
int spi_nor_hwcaps_read2cmd(u32 hwcaps)
|
2018-12-06 18:41:17 +08:00
|
|
|
{
|
|
|
|
static const int hwcaps_read2cmd[][2] = {
|
|
|
|
{ SNOR_HWCAPS_READ, SNOR_CMD_READ },
|
|
|
|
{ SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
|
|
|
|
{ SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
|
|
|
|
{ SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
|
|
|
|
{ SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
|
|
|
|
{ SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
|
|
|
|
{ SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
|
|
|
|
{ SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
|
|
|
|
{ SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
|
|
|
|
{ SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
|
|
|
|
{ SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
|
|
|
|
{ SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
|
|
|
|
{ SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
|
|
|
|
{ SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
|
|
|
|
{ SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
|
2020-10-05 23:31:26 +08:00
|
|
|
{ SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR },
|
2018-12-06 18:41:17 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
|
|
|
|
ARRAY_SIZE(hwcaps_read2cmd));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
|
|
|
|
{
|
|
|
|
static const int hwcaps_pp2cmd[][2] = {
|
|
|
|
{ SNOR_HWCAPS_PP, SNOR_CMD_PP },
|
|
|
|
{ SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
|
|
|
|
{ SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
|
|
|
|
{ SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
|
|
|
|
{ SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
|
|
|
|
{ SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
|
|
|
|
{ SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
|
2020-10-05 23:31:26 +08:00
|
|
|
{ SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR },
|
2018-12-06 18:41:17 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
|
|
|
|
ARRAY_SIZE(hwcaps_pp2cmd));
|
|
|
|
}
|
|
|
|
|
2019-08-06 13:10:41 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_spimem_check_op - check if the operation is supported
|
|
|
|
* by controller
|
|
|
|
*@nor: pointer to a 'struct spi_nor'
|
|
|
|
*@op: pointer to op template to be checked
|
|
|
|
*
|
2020-10-05 23:31:24 +08:00
|
|
|
* Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
|
2019-08-06 13:10:41 +08:00
|
|
|
*/
|
|
|
|
static int spi_nor_spimem_check_op(struct spi_nor *nor,
|
|
|
|
struct spi_mem_op *op)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* First test with 4 address bytes. The opcode itself might
|
|
|
|
* be a 3B addressing opcode but we don't care, because
|
|
|
|
* SPI controller implementation should not check the opcode,
|
|
|
|
* but just the sequence.
|
|
|
|
*/
|
|
|
|
op->addr.nbytes = 4;
|
|
|
|
if (!spi_mem_supports_op(nor->spimem, op)) {
|
2021-12-07 22:02:42 +08:00
|
|
|
if (nor->params->size > SZ_16M)
|
2020-10-05 23:31:24 +08:00
|
|
|
return -EOPNOTSUPP;
|
2019-08-06 13:10:41 +08:00
|
|
|
|
|
|
|
/* If flash size <= 16MB, 3 address bytes are sufficient */
|
|
|
|
op->addr.nbytes = 3;
|
|
|
|
if (!spi_mem_supports_op(nor->spimem, op))
|
2020-10-05 23:31:24 +08:00
|
|
|
return -EOPNOTSUPP;
|
2019-08-06 13:10:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_spimem_check_readop - check if the read op is supported
|
|
|
|
* by controller
|
|
|
|
*@nor: pointer to a 'struct spi_nor'
|
|
|
|
*@read: pointer to op template to be checked
|
|
|
|
*
|
2020-10-05 23:31:24 +08:00
|
|
|
* Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
|
2019-08-06 13:10:41 +08:00
|
|
|
*/
|
|
|
|
static int spi_nor_spimem_check_readop(struct spi_nor *nor,
|
|
|
|
const struct spi_nor_read_command *read)
|
|
|
|
{
|
2020-10-05 23:31:26 +08:00
|
|
|
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 0),
|
|
|
|
SPI_MEM_OP_ADDR(3, 0, 0),
|
|
|
|
SPI_MEM_OP_DUMMY(1, 0),
|
|
|
|
SPI_MEM_OP_DATA_IN(1, NULL, 0));
|
2019-08-06 13:10:41 +08:00
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
spi_nor_spimem_setup_op(nor, &op, read->proto);
|
|
|
|
|
|
|
|
/* convert the dummy cycles to the number of bytes */
|
|
|
|
op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
|
|
|
|
if (spi_nor_protocol_is_dtr(nor->read_proto))
|
|
|
|
op.dummy.nbytes *= 2;
|
2019-08-06 13:10:41 +08:00
|
|
|
|
|
|
|
return spi_nor_spimem_check_op(nor, &op);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_spimem_check_pp - check if the page program op is supported
|
|
|
|
* by controller
|
|
|
|
*@nor: pointer to a 'struct spi_nor'
|
|
|
|
*@pp: pointer to op template to be checked
|
|
|
|
*
|
2020-10-05 23:31:24 +08:00
|
|
|
* Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
|
2019-08-06 13:10:41 +08:00
|
|
|
*/
|
|
|
|
static int spi_nor_spimem_check_pp(struct spi_nor *nor,
|
|
|
|
const struct spi_nor_pp_command *pp)
|
|
|
|
{
|
2020-10-05 23:31:26 +08:00
|
|
|
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 0),
|
|
|
|
SPI_MEM_OP_ADDR(3, 0, 0),
|
2019-08-06 13:10:41 +08:00
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP_DATA_OUT(1, NULL, 0));
|
2019-08-06 13:10:41 +08:00
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
spi_nor_spimem_setup_op(nor, &op, pp->proto);
|
2019-08-06 13:10:41 +08:00
|
|
|
|
|
|
|
return spi_nor_spimem_check_op(nor, &op);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
|
|
|
|
* based on SPI controller capabilities
|
|
|
|
* @nor: pointer to a 'struct spi_nor'
|
|
|
|
* @hwcaps: pointer to resulting capabilities after adjusting
|
|
|
|
* according to controller and flash's capability
|
|
|
|
*/
|
|
|
|
static void
|
2019-08-23 23:53:37 +08:00
|
|
|
spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
|
2019-08-06 13:10:41 +08:00
|
|
|
{
|
2020-03-14 03:42:53 +08:00
|
|
|
struct spi_nor_flash_parameter *params = nor->params;
|
2019-08-06 13:10:41 +08:00
|
|
|
unsigned int cap;
|
|
|
|
|
|
|
|
/* X-X-X modes are not supported yet, mask them all. */
|
|
|
|
*hwcaps &= ~SNOR_HWCAPS_X_X_X;
|
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
/*
|
|
|
|
* If the reset line is broken, we do not want to enter a stateful
|
|
|
|
* mode.
|
|
|
|
*/
|
|
|
|
if (nor->flags & SNOR_F_BROKEN_RESET)
|
|
|
|
*hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR);
|
|
|
|
|
2019-08-06 13:10:41 +08:00
|
|
|
for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
|
|
|
|
int rdidx, ppidx;
|
|
|
|
|
|
|
|
if (!(*hwcaps & BIT(cap)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
|
|
|
|
if (rdidx >= 0 &&
|
|
|
|
spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx]))
|
|
|
|
*hwcaps &= ~BIT(cap);
|
|
|
|
|
|
|
|
ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
|
|
|
|
if (ppidx < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (spi_nor_spimem_check_pp(nor,
|
|
|
|
¶ms->page_programs[ppidx]))
|
|
|
|
*hwcaps &= ~BIT(cap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-11 23:40:06 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_set_erase_type() - set a SPI NOR erase type
|
|
|
|
* @erase: pointer to a structure that describes a SPI NOR erase type
|
|
|
|
* @size: the size of the sector/block erased by the erase type
|
|
|
|
* @opcode: the SPI command op code to erase the sector/block
|
|
|
|
*/
|
2020-03-14 03:42:37 +08:00
|
|
|
void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
|
|
|
|
u8 opcode)
|
2018-09-11 23:40:06 +08:00
|
|
|
{
|
|
|
|
erase->size = size;
|
|
|
|
erase->opcode = opcode;
|
|
|
|
/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
|
|
|
|
erase->size_shift = ffs(erase->size) - 1;
|
|
|
|
erase->size_mask = (1 << erase->size_shift) - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_init_uniform_erase_map() - Initialize uniform erase map
|
|
|
|
* @map: the erase map of the SPI NOR
|
|
|
|
* @erase_mask: bitmask encoding erase types that can erase the entire
|
|
|
|
* flash memory
|
|
|
|
* @flash_size: the spi nor flash memory size
|
|
|
|
*/
|
2020-03-14 03:42:37 +08:00
|
|
|
void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
|
|
|
|
u8 erase_mask, u64 flash_size)
|
2018-09-11 23:40:06 +08:00
|
|
|
{
|
|
|
|
/* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
|
|
|
|
map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
|
|
|
|
SNOR_LAST_REGION;
|
|
|
|
map->uniform_region.size = flash_size;
|
|
|
|
map->regions = &map->uniform_region;
|
|
|
|
map->uniform_erase_type = erase_mask;
|
|
|
|
}
|
|
|
|
|
2020-03-14 03:42:37 +08:00
|
|
|
int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
|
|
|
|
const struct sfdp_parameter_header *bfpt_header,
|
2021-03-06 17:50:00 +08:00
|
|
|
const struct sfdp_bfpt *bfpt)
|
2018-12-06 18:37:35 +08:00
|
|
|
{
|
2020-03-14 03:42:39 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (nor->manufacturer && nor->manufacturer->fixups &&
|
|
|
|
nor->manufacturer->fixups->post_bfpt) {
|
|
|
|
ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
|
2021-03-06 17:50:00 +08:00
|
|
|
bfpt);
|
2020-03-14 03:42:39 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-12-06 18:37:35 +08:00
|
|
|
if (nor->info->fixups && nor->info->fixups->post_bfpt)
|
2021-03-06 17:50:00 +08:00
|
|
|
return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt);
|
2018-12-06 18:37:35 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-26 03:48:36 +08:00
|
|
|
static int spi_nor_select_read(struct spi_nor *nor,
|
|
|
|
u32 shared_hwcaps)
|
|
|
|
{
|
|
|
|
int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
|
|
|
|
const struct spi_nor_read_command *read;
|
|
|
|
|
|
|
|
if (best_match < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
|
|
|
|
if (cmd < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-03-14 03:42:53 +08:00
|
|
|
read = &nor->params->reads[cmd];
|
2019-08-26 03:48:36 +08:00
|
|
|
nor->read_opcode = read->opcode;
|
|
|
|
nor->read_proto = read->proto;
|
|
|
|
|
|
|
|
/*
|
2020-04-28 16:47:43 +08:00
|
|
|
* In the SPI NOR framework, we don't need to make the difference
|
2019-08-26 03:48:36 +08:00
|
|
|
* between mode clock cycles and wait state clock cycles.
|
|
|
|
* Indeed, the value of the mode clock cycles is used by a QSPI
|
|
|
|
* flash memory to know whether it should enter or leave its 0-4-4
|
|
|
|
* (Continuous Read / XIP) mode.
|
|
|
|
* eXecution In Place is out of the scope of the mtd sub-system.
|
|
|
|
* Hence we choose to merge both mode and wait state clock cycles
|
|
|
|
* into the so called dummy clock cycles.
|
|
|
|
*/
|
|
|
|
nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_nor_select_pp(struct spi_nor *nor,
|
|
|
|
u32 shared_hwcaps)
|
|
|
|
{
|
|
|
|
int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
|
|
|
|
const struct spi_nor_pp_command *pp;
|
|
|
|
|
|
|
|
if (best_match < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
|
|
|
|
if (cmd < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-03-14 03:42:53 +08:00
|
|
|
pp = &nor->params->page_programs[cmd];
|
2019-08-26 03:48:36 +08:00
|
|
|
nor->program_opcode = pp->opcode;
|
|
|
|
nor->write_proto = pp->proto;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_select_uniform_erase() - select optimum uniform erase type
|
|
|
|
* @map: the erase map of the SPI NOR
|
|
|
|
* @wanted_size: the erase type size to search for. Contains the value of
|
|
|
|
* info->sector_size or of the "small sector" size in case
|
|
|
|
* CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
|
|
|
|
*
|
|
|
|
* Once the optimum uniform sector erase command is found, disable all the
|
|
|
|
* other.
|
|
|
|
*
|
|
|
|
* Return: pointer to erase type on success, NULL otherwise.
|
|
|
|
*/
|
|
|
|
static const struct spi_nor_erase_type *
|
|
|
|
spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
|
|
|
|
const u32 wanted_size)
|
|
|
|
{
|
|
|
|
const struct spi_nor_erase_type *tested_erase, *erase = NULL;
|
|
|
|
int i;
|
|
|
|
u8 uniform_erase_type = map->uniform_erase_type;
|
|
|
|
|
|
|
|
for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
|
|
|
|
if (!(uniform_erase_type & BIT(i)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
tested_erase = &map->erase_type[i];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the current erase size is the one, stop here:
|
|
|
|
* we have found the right uniform Sector Erase command.
|
|
|
|
*/
|
|
|
|
if (tested_erase->size == wanted_size) {
|
|
|
|
erase = tested_erase;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-11-30 23:24:15 +08:00
|
|
|
* Otherwise, the current erase size is still a valid candidate.
|
2019-08-26 03:48:36 +08:00
|
|
|
* Select the biggest valid candidate.
|
|
|
|
*/
|
|
|
|
if (!erase && tested_erase->size)
|
|
|
|
erase = tested_erase;
|
|
|
|
/* keep iterating to find the wanted_size */
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!erase)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Disable all other Sector Erase commands. */
|
|
|
|
map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
|
|
|
|
map->uniform_erase_type |= BIT(erase - map->erase_type);
|
|
|
|
return erase;
|
|
|
|
}
|
|
|
|
|
2019-08-28 18:35:17 +08:00
|
|
|
static int spi_nor_select_erase(struct spi_nor *nor)
|
2019-08-26 03:48:36 +08:00
|
|
|
{
|
2020-03-14 03:42:53 +08:00
|
|
|
struct spi_nor_erase_map *map = &nor->params->erase_map;
|
2019-08-26 03:48:36 +08:00
|
|
|
const struct spi_nor_erase_type *erase = NULL;
|
|
|
|
struct mtd_info *mtd = &nor->mtd;
|
2019-08-28 18:35:17 +08:00
|
|
|
u32 wanted_size = nor->info->sector_size;
|
2019-08-26 03:48:36 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The previous implementation handling Sector Erase commands assumed
|
|
|
|
* that the SPI flash memory has an uniform layout then used only one
|
|
|
|
* of the supported erase sizes for all Sector Erase commands.
|
|
|
|
* So to be backward compatible, the new implementation also tries to
|
|
|
|
* manage the SPI flash memory as uniform with a single erase sector
|
|
|
|
* size, when possible.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
|
|
|
|
/* prefer "small sector" erase if possible */
|
|
|
|
wanted_size = 4096u;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (spi_nor_has_uniform_erase(nor)) {
|
|
|
|
erase = spi_nor_select_uniform_erase(map, wanted_size);
|
|
|
|
if (!erase)
|
|
|
|
return -EINVAL;
|
|
|
|
nor->erase_opcode = erase->opcode;
|
|
|
|
mtd->erasesize = erase->size;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For non-uniform SPI flash memory, set mtd->erasesize to the
|
|
|
|
* maximum erase sector size. No need to set nor->erase_opcode.
|
|
|
|
*/
|
|
|
|
for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
|
|
|
|
if (map->erase_type[i].size) {
|
|
|
|
erase = &map->erase_type[i];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!erase)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mtd->erasesize = erase->size;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_nor_default_setup(struct spi_nor *nor,
|
|
|
|
const struct spi_nor_hwcaps *hwcaps)
|
|
|
|
{
|
2020-03-14 03:42:53 +08:00
|
|
|
struct spi_nor_flash_parameter *params = nor->params;
|
2019-08-26 03:48:36 +08:00
|
|
|
u32 ignored_mask, shared_mask;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Keep only the hardware capabilities supported by both the SPI
|
|
|
|
* controller and the SPI flash memory.
|
|
|
|
*/
|
|
|
|
shared_mask = hwcaps->mask & params->hwcaps.mask;
|
|
|
|
|
|
|
|
if (nor->spimem) {
|
|
|
|
/*
|
|
|
|
* When called from spi_nor_probe(), all caps are set and we
|
|
|
|
* need to discard some of them based on what the SPI
|
|
|
|
* controller actually supports (using spi_mem_supports_op()).
|
|
|
|
*/
|
|
|
|
spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* SPI n-n-n protocols are not supported when the SPI
|
|
|
|
* controller directly implements the spi_nor interface.
|
|
|
|
* Yet another reason to switch to spi-mem.
|
|
|
|
*/
|
2020-10-05 23:31:26 +08:00
|
|
|
ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR;
|
2019-08-26 03:48:36 +08:00
|
|
|
if (shared_mask & ignored_mask) {
|
|
|
|
dev_dbg(nor->dev,
|
|
|
|
"SPI n-n-n protocols are not supported.\n");
|
|
|
|
shared_mask &= ~ignored_mask;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Select the (Fast) Read command. */
|
|
|
|
err = spi_nor_select_read(nor, shared_mask);
|
|
|
|
if (err) {
|
2019-11-02 19:23:25 +08:00
|
|
|
dev_dbg(nor->dev,
|
2019-08-26 03:48:36 +08:00
|
|
|
"can't select read settings supported by both the SPI controller and memory.\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Select the Page Program command. */
|
|
|
|
err = spi_nor_select_pp(nor, shared_mask);
|
|
|
|
if (err) {
|
2019-11-02 19:23:25 +08:00
|
|
|
dev_dbg(nor->dev,
|
2019-08-26 03:48:36 +08:00
|
|
|
"can't select write settings supported by both the SPI controller and memory.\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Select the Sector Erase command. */
|
2019-08-28 18:35:17 +08:00
|
|
|
err = spi_nor_select_erase(nor);
|
2019-08-26 03:48:36 +08:00
|
|
|
if (err) {
|
2019-11-02 19:23:25 +08:00
|
|
|
dev_dbg(nor->dev,
|
2019-08-26 03:48:36 +08:00
|
|
|
"can't select erase settings supported by both the SPI controller and memory.\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_nor_setup(struct spi_nor *nor,
|
|
|
|
const struct spi_nor_hwcaps *hwcaps)
|
|
|
|
{
|
2020-03-14 03:42:53 +08:00
|
|
|
if (!nor->params->setup)
|
2019-08-26 03:48:36 +08:00
|
|
|
return 0;
|
|
|
|
|
2020-03-14 03:42:53 +08:00
|
|
|
return nor->params->setup(nor, hwcaps);
|
2019-08-26 03:48:36 +08:00
|
|
|
}
|
|
|
|
|
2019-08-24 20:00:37 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
|
2019-08-24 20:00:41 +08:00
|
|
|
* settings based on MFR register and ->default_init() hook.
|
2020-04-22 03:38:42 +08:00
|
|
|
* @nor: pointer to a 'struct spi_nor'.
|
2019-08-24 20:00:37 +08:00
|
|
|
*/
|
|
|
|
static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
|
|
|
|
{
|
2020-03-14 03:42:39 +08:00
|
|
|
if (nor->manufacturer && nor->manufacturer->fixups &&
|
|
|
|
nor->manufacturer->fixups->default_init)
|
|
|
|
nor->manufacturer->fixups->default_init(nor);
|
|
|
|
|
2019-08-24 20:00:37 +08:00
|
|
|
if (nor->info->fixups && nor->info->fixups->default_init)
|
|
|
|
nor->info->fixups->default_init(nor);
|
|
|
|
}
|
|
|
|
|
2019-08-24 13:27:02 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
|
|
|
|
* based on JESD216 SFDP standard.
|
2020-04-22 03:40:05 +08:00
|
|
|
* @nor: pointer to a 'struct spi_nor'.
|
2019-08-24 13:27:02 +08:00
|
|
|
*
|
|
|
|
* The method has a roll-back mechanism: in case the SFDP parsing fails, the
|
|
|
|
* legacy flash parameters and settings will be restored.
|
|
|
|
*/
|
|
|
|
static void spi_nor_sfdp_init_params(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
struct spi_nor_flash_parameter sfdp_params;
|
|
|
|
|
2020-03-14 03:42:53 +08:00
|
|
|
memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
|
2019-08-24 13:27:02 +08:00
|
|
|
|
2021-03-06 17:50:00 +08:00
|
|
|
if (spi_nor_parse_sfdp(nor)) {
|
2020-10-05 16:48:03 +08:00
|
|
|
memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
|
2019-08-24 13:27:02 +08:00
|
|
|
nor->addr_width = 0;
|
|
|
|
nor->flags &= ~SNOR_F_4B_OPCODES;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* spi_nor_info_init_params() - Initialize the flash's parameters and settings
|
|
|
|
* based on nor->info data.
|
2020-04-22 03:40:05 +08:00
|
|
|
* @nor: pointer to a 'struct spi_nor'.
|
2019-08-24 13:27:02 +08:00
|
|
|
*/
|
|
|
|
static void spi_nor_info_init_params(struct spi_nor *nor)
|
2017-04-26 04:08:46 +08:00
|
|
|
{
|
2020-03-14 03:42:53 +08:00
|
|
|
struct spi_nor_flash_parameter *params = nor->params;
|
2019-08-23 22:36:03 +08:00
|
|
|
struct spi_nor_erase_map *map = ¶ms->erase_map;
|
2018-12-06 18:41:18 +08:00
|
|
|
const struct flash_info *info = nor->info;
|
2019-08-24 20:19:20 +08:00
|
|
|
struct device_node *np = spi_nor_get_flash_node(nor);
|
mtd: spi-nor: Rework the flash_info flags
Clarify for what the flash_info flags are used for. Split them in
four categories and a bool:
1/ FLAGS: flags that indicate support that is not defined by the JESD216
standard in its SFDP tables.
2/ NO_SFDP_FLAGS: these flags are used when the flash does not define the
SFDP tables. These flags indicate support that can be discovered via
SFDP. Used together with SPI_NOR_SKIP_SFDP flag.
3/ FIXUP_FLAGS: flags that indicate support that can be discovered
via SFDP ideally, but can not be discovered for this particular flash
because the SFDP table that indicates this support is not defined by
the flash. In case the table for this support is defined but has wrong
values, one should instead use a post_sfdp() hook to set the SNOR_F
equivalent flag.
4/ MFR_FLAGS: manufacturer private flags. Used in the manufacturer
fixup hooks to differentiate support between flashes of the same
manufacturer.
5/ PARSE_SFDP: sets info->parse_sfdp to true. All flash_info entries
that support SFDP should be converted to set info->parse_sfdp to true.
SPI NOR flashes that statically declare one of the
SPI_NOR_{DUAL, QUAD, OCTAL, OCTAL_DTR}_READ flags and do not support
the RDSFDP command are gratuiously receiving the RDSFDP command
in the attempt of parsing the SFDP tables. It is not desirable to issue
commands that are not supported, so introduce PARSE_SFDP to help on this
situation.
New flash additions/updates should be declared/updated to use either
PARSE_SFDP or SPI_NOR_SKIP_SFDP. Once all the flash_info entries are
converted to use SPI_NOR_SKIP_SFDP or PARSE_SFDP, we can get rid of the
SPI_NOR_SKIP_SFDP flag and use just the bool nor->info->parse_sfdp to
determine whether to parse SFDP or not. SPI_NOR_SKIP_SFDP flag is kept
just as a way to differentiate whether a flash is converted to the new
flags logic or not.
Support that can be discovered when parsing SFDP should not be duplicated
by explicit flags at flash declaration. All the flash parameters will be
discovered when parsing SFDP. Sometimes manufacturers wrongly define some
fields in the SFDP tables. If that's the case, SFDP data can be amended
with the fixups() hooks. It is not common, but if the SFDP tables are
entirely wrong, and it does not worth the hassle to tweak the SFDP
parameters by using the fixups hooks, or if the flash does not define the
SFDP tables at all, then statically init the flash with the
SPI_NOR_SKIP_SFDP flag and specify the rest of flash capabilities with
the flash info flags.
With time, we want to convert all flashes to use PARSE_SFDP and
stop triggering the SFDP parsing with the
SPI_NOR_{DUAL, QUAD, OCTAL*}_READ flags. Getting rid of the
SPI_NOR_{OCTAL, OCTAL_DTR}_READ trigger is easily achievable,
the rest are a long term goal.
Manufacturer specific flags like USE_CLSR, USE_FSR, SPI_NOR_XSR_RDY,
will be removed in a future series.
No functional changes intended in this patch.
Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Reviewed-by: Pratyush Yadav <p.yadav@ti.com>
Link: https://lore.kernel.org/r/20211207140254.87681-7-tudor.ambarus@microchip.com
2021-12-07 22:02:46 +08:00
|
|
|
const u8 no_sfdp_flags = info->no_sfdp_flags;
|
2018-09-11 23:40:06 +08:00
|
|
|
u8 i, erase_mask;
|
|
|
|
|
2021-03-15 13:56:34 +08:00
|
|
|
/* Initialize default flash parameters and settings. */
|
2019-11-07 16:42:05 +08:00
|
|
|
params->quad_enable = spi_nor_sr2_bit1_quad_enable;
|
2020-03-14 03:42:35 +08:00
|
|
|
params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
|
2019-08-26 03:48:36 +08:00
|
|
|
params->setup = spi_nor_default_setup;
|
mtd: spi-nor: add OTP support
SPI flashes sometimes have a special OTP area, which can (and is) used to
store immutable properties like board serial number or vendor assigned
network hardware addresses.
The MTD subsystem already supports accessing such areas and some (non
SPI NOR) flashes already implement support for it. It differentiates
between user and factory areas. User areas can be written by the user and
factory ones are pre-programmed and locked down by the vendor, usually
containing an "electrical serial number". This patch will only add support
for the user areas.
Lay the foundation and implement the MTD callbacks for the SPI NOR and add
necessary parameters to the flash_info structure. If a flash supports OTP
it can be added by the convenience macro OTP_INFO(). Sometimes there are
individual regions, which might have individual offsets. Therefore, it is
possible to specify the starting address of the first regions as well as
the distance between two regions (e.g. Winbond devices uses this method).
Additionally, the regions might be locked down. Once locked, no further
write access is possible.
For SPI NOR flashes the OTP area is accessed like the normal memory, e.g.
by offset addressing; except that you either have to use special read/write
commands (Winbond) or you have to enter (and exit) a specific OTP mode
(Macronix, Micron).
Thus we introduce four operations to which the MTD callbacks will be
mapped: .read(), .write(), .lock() and .is_locked(). The read and the write
ops will be given an address offset to operate on while the locking ops use
regions because locking always affects a whole region. It is up to the
flash driver to implement these ops.
Signed-off-by: Michael Walle <michael@walle.cc>
[ta: use div64_u64(), IS_ALIGNED, params->otp.org. unsigned int region,
drop comment, add rlen local variable in spi_nor_mtd_otp_lock()]
Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Link: https://lore.kernel.org/r/20210321235140.8308-2-michael@walle.cc
2021-03-22 07:51:38 +08:00
|
|
|
params->otp.org = &info->otp_org;
|
|
|
|
|
2019-11-07 16:41:51 +08:00
|
|
|
/* Default to 16-bit Write Status (01h) Command */
|
|
|
|
nor->flags |= SNOR_F_HAS_16BIT_SR;
|
2019-08-24 20:00:41 +08:00
|
|
|
|
2017-04-26 04:08:46 +08:00
|
|
|
/* Set SPI NOR sizes. */
|
2020-12-01 18:27:10 +08:00
|
|
|
params->writesize = 1;
|
2018-11-28 16:02:14 +08:00
|
|
|
params->size = (u64)info->sector_size * info->n_sectors;
|
2017-04-26 04:08:46 +08:00
|
|
|
params->page_size = info->page_size;
|
|
|
|
|
2019-08-24 20:19:20 +08:00
|
|
|
if (!(info->flags & SPI_NOR_NO_FR)) {
|
|
|
|
/* Default to Fast Read for DT and non-DT platform devices. */
|
|
|
|
params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
|
|
|
|
|
|
|
|
/* Mask out Fast Read if not requested at DT instantiation. */
|
|
|
|
if (np && !of_property_read_bool(np, "m25p,fast-read"))
|
|
|
|
params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
|
|
|
|
}
|
|
|
|
|
2017-04-26 04:08:46 +08:00
|
|
|
/* (Fast) Read settings. */
|
|
|
|
params->hwcaps.mask |= SNOR_HWCAPS_READ;
|
|
|
|
spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ],
|
|
|
|
0, 0, SPINOR_OP_READ,
|
|
|
|
SNOR_PROTO_1_1_1);
|
|
|
|
|
2019-08-24 20:19:20 +08:00
|
|
|
if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
|
2017-04-26 04:08:46 +08:00
|
|
|
spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST],
|
|
|
|
0, 8, SPINOR_OP_READ_FAST,
|
|
|
|
SNOR_PROTO_1_1_1);
|
|
|
|
|
mtd: spi-nor: Rework the flash_info flags
Clarify for what the flash_info flags are used for. Split them in
four categories and a bool:
1/ FLAGS: flags that indicate support that is not defined by the JESD216
standard in its SFDP tables.
2/ NO_SFDP_FLAGS: these flags are used when the flash does not define the
SFDP tables. These flags indicate support that can be discovered via
SFDP. Used together with SPI_NOR_SKIP_SFDP flag.
3/ FIXUP_FLAGS: flags that indicate support that can be discovered
via SFDP ideally, but can not be discovered for this particular flash
because the SFDP table that indicates this support is not defined by
the flash. In case the table for this support is defined but has wrong
values, one should instead use a post_sfdp() hook to set the SNOR_F
equivalent flag.
4/ MFR_FLAGS: manufacturer private flags. Used in the manufacturer
fixup hooks to differentiate support between flashes of the same
manufacturer.
5/ PARSE_SFDP: sets info->parse_sfdp to true. All flash_info entries
that support SFDP should be converted to set info->parse_sfdp to true.
SPI NOR flashes that statically declare one of the
SPI_NOR_{DUAL, QUAD, OCTAL, OCTAL_DTR}_READ flags and do not support
the RDSFDP command are gratuiously receiving the RDSFDP command
in the attempt of parsing the SFDP tables. It is not desirable to issue
commands that are not supported, so introduce PARSE_SFDP to help on this
situation.
New flash additions/updates should be declared/updated to use either
PARSE_SFDP or SPI_NOR_SKIP_SFDP. Once all the flash_info entries are
converted to use SPI_NOR_SKIP_SFDP or PARSE_SFDP, we can get rid of the
SPI_NOR_SKIP_SFDP flag and use just the bool nor->info->parse_sfdp to
determine whether to parse SFDP or not. SPI_NOR_SKIP_SFDP flag is kept
just as a way to differentiate whether a flash is converted to the new
flags logic or not.
Support that can be discovered when parsing SFDP should not be duplicated
by explicit flags at flash declaration. All the flash parameters will be
discovered when parsing SFDP. Sometimes manufacturers wrongly define some
fields in the SFDP tables. If that's the case, SFDP data can be amended
with the fixups() hooks. It is not common, but if the SFDP tables are
entirely wrong, and it does not worth the hassle to tweak the SFDP
parameters by using the fixups hooks, or if the flash does not define the
SFDP tables at all, then statically init the flash with the
SPI_NOR_SKIP_SFDP flag and specify the rest of flash capabilities with
the flash info flags.
With time, we want to convert all flashes to use PARSE_SFDP and
stop triggering the SFDP parsing with the
SPI_NOR_{DUAL, QUAD, OCTAL*}_READ flags. Getting rid of the
SPI_NOR_{OCTAL, OCTAL_DTR}_READ trigger is easily achievable,
the rest are a long term goal.
Manufacturer specific flags like USE_CLSR, USE_FSR, SPI_NOR_XSR_RDY,
will be removed in a future series.
No functional changes intended in this patch.
Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Reviewed-by: Pratyush Yadav <p.yadav@ti.com>
Link: https://lore.kernel.org/r/20211207140254.87681-7-tudor.ambarus@microchip.com
2021-12-07 22:02:46 +08:00
|
|
|
if (no_sfdp_flags & SPI_NOR_DUAL_READ) {
|
2017-04-26 04:08:46 +08:00
|
|
|
params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
|
|
|
|
spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2],
|
|
|
|
0, 8, SPINOR_OP_READ_1_1_2,
|
|
|
|
SNOR_PROTO_1_1_2);
|
|
|
|
}
|
|
|
|
|
mtd: spi-nor: Rework the flash_info flags
Clarify for what the flash_info flags are used for. Split them in
four categories and a bool:
1/ FLAGS: flags that indicate support that is not defined by the JESD216
standard in its SFDP tables.
2/ NO_SFDP_FLAGS: these flags are used when the flash does not define the
SFDP tables. These flags indicate support that can be discovered via
SFDP. Used together with SPI_NOR_SKIP_SFDP flag.
3/ FIXUP_FLAGS: flags that indicate support that can be discovered
via SFDP ideally, but can not be discovered for this particular flash
because the SFDP table that indicates this support is not defined by
the flash. In case the table for this support is defined but has wrong
values, one should instead use a post_sfdp() hook to set the SNOR_F
equivalent flag.
4/ MFR_FLAGS: manufacturer private flags. Used in the manufacturer
fixup hooks to differentiate support between flashes of the same
manufacturer.
5/ PARSE_SFDP: sets info->parse_sfdp to true. All flash_info entries
that support SFDP should be converted to set info->parse_sfdp to true.
SPI NOR flashes that statically declare one of the
SPI_NOR_{DUAL, QUAD, OCTAL, OCTAL_DTR}_READ flags and do not support
the RDSFDP command are gratuiously receiving the RDSFDP command
in the attempt of parsing the SFDP tables. It is not desirable to issue
commands that are not supported, so introduce PARSE_SFDP to help on this
situation.
New flash additions/updates should be declared/updated to use either
PARSE_SFDP or SPI_NOR_SKIP_SFDP. Once all the flash_info entries are
converted to use SPI_NOR_SKIP_SFDP or PARSE_SFDP, we can get rid of the
SPI_NOR_SKIP_SFDP flag and use just the bool nor->info->parse_sfdp to
determine whether to parse SFDP or not. SPI_NOR_SKIP_SFDP flag is kept
just as a way to differentiate whether a flash is converted to the new
flags logic or not.
Support that can be discovered when parsing SFDP should not be duplicated
by explicit flags at flash declaration. All the flash parameters will be
discovered when parsing SFDP. Sometimes manufacturers wrongly define some
fields in the SFDP tables. If that's the case, SFDP data can be amended
with the fixups() hooks. It is not common, but if the SFDP tables are
entirely wrong, and it does not worth the hassle to tweak the SFDP
parameters by using the fixups hooks, or if the flash does not define the
SFDP tables at all, then statically init the flash with the
SPI_NOR_SKIP_SFDP flag and specify the rest of flash capabilities with
the flash info flags.
With time, we want to convert all flashes to use PARSE_SFDP and
stop triggering the SFDP parsing with the
SPI_NOR_{DUAL, QUAD, OCTAL*}_READ flags. Getting rid of the
SPI_NOR_{OCTAL, OCTAL_DTR}_READ trigger is easily achievable,
the rest are a long term goal.
Manufacturer specific flags like USE_CLSR, USE_FSR, SPI_NOR_XSR_RDY,
will be removed in a future series.
No functional changes intended in this patch.
Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Reviewed-by: Pratyush Yadav <p.yadav@ti.com>
Link: https://lore.kernel.org/r/20211207140254.87681-7-tudor.ambarus@microchip.com
2021-12-07 22:02:46 +08:00
|
|
|
if (no_sfdp_flags & SPI_NOR_QUAD_READ) {
|
2017-04-26 04:08:46 +08:00
|
|
|
params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
|
|
|
|
spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4],
|
|
|
|
0, 8, SPINOR_OP_READ_1_1_4,
|
|
|
|
SNOR_PROTO_1_1_4);
|
|
|
|
}
|
|
|
|
|
mtd: spi-nor: Rework the flash_info flags
Clarify for what the flash_info flags are used for. Split them in
four categories and a bool:
1/ FLAGS: flags that indicate support that is not defined by the JESD216
standard in its SFDP tables.
2/ NO_SFDP_FLAGS: these flags are used when the flash does not define the
SFDP tables. These flags indicate support that can be discovered via
SFDP. Used together with SPI_NOR_SKIP_SFDP flag.
3/ FIXUP_FLAGS: flags that indicate support that can be discovered
via SFDP ideally, but can not be discovered for this particular flash
because the SFDP table that indicates this support is not defined by
the flash. In case the table for this support is defined but has wrong
values, one should instead use a post_sfdp() hook to set the SNOR_F
equivalent flag.
4/ MFR_FLAGS: manufacturer private flags. Used in the manufacturer
fixup hooks to differentiate support between flashes of the same
manufacturer.
5/ PARSE_SFDP: sets info->parse_sfdp to true. All flash_info entries
that support SFDP should be converted to set info->parse_sfdp to true.
SPI NOR flashes that statically declare one of the
SPI_NOR_{DUAL, QUAD, OCTAL, OCTAL_DTR}_READ flags and do not support
the RDSFDP command are gratuiously receiving the RDSFDP command
in the attempt of parsing the SFDP tables. It is not desirable to issue
commands that are not supported, so introduce PARSE_SFDP to help on this
situation.
New flash additions/updates should be declared/updated to use either
PARSE_SFDP or SPI_NOR_SKIP_SFDP. Once all the flash_info entries are
converted to use SPI_NOR_SKIP_SFDP or PARSE_SFDP, we can get rid of the
SPI_NOR_SKIP_SFDP flag and use just the bool nor->info->parse_sfdp to
determine whether to parse SFDP or not. SPI_NOR_SKIP_SFDP flag is kept
just as a way to differentiate whether a flash is converted to the new
flags logic or not.
Support that can be discovered when parsing SFDP should not be duplicated
by explicit flags at flash declaration. All the flash parameters will be
discovered when parsing SFDP. Sometimes manufacturers wrongly define some
fields in the SFDP tables. If that's the case, SFDP data can be amended
with the fixups() hooks. It is not common, but if the SFDP tables are
entirely wrong, and it does not worth the hassle to tweak the SFDP
parameters by using the fixups hooks, or if the flash does not define the
SFDP tables at all, then statically init the flash with the
SPI_NOR_SKIP_SFDP flag and specify the rest of flash capabilities with
the flash info flags.
With time, we want to convert all flashes to use PARSE_SFDP and
stop triggering the SFDP parsing with the
SPI_NOR_{DUAL, QUAD, OCTAL*}_READ flags. Getting rid of the
SPI_NOR_{OCTAL, OCTAL_DTR}_READ trigger is easily achievable,
the rest are a long term goal.
Manufacturer specific flags like USE_CLSR, USE_FSR, SPI_NOR_XSR_RDY,
will be removed in a future series.
No functional changes intended in this patch.
Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Reviewed-by: Pratyush Yadav <p.yadav@ti.com>
Link: https://lore.kernel.org/r/20211207140254.87681-7-tudor.ambarus@microchip.com
2021-12-07 22:02:46 +08:00
|
|
|
if (no_sfdp_flags & SPI_NOR_OCTAL_READ) {
|
2019-01-15 18:05:10 +08:00
|
|
|
params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
|
|
|
|
spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8],
|
|
|
|
0, 8, SPINOR_OP_READ_1_1_8,
|
|
|
|
SNOR_PROTO_1_1_8);
|
|
|
|
}
|
|
|
|
|
mtd: spi-nor: Rework the flash_info flags
Clarify for what the flash_info flags are used for. Split them in
four categories and a bool:
1/ FLAGS: flags that indicate support that is not defined by the JESD216
standard in its SFDP tables.
2/ NO_SFDP_FLAGS: these flags are used when the flash does not define the
SFDP tables. These flags indicate support that can be discovered via
SFDP. Used together with SPI_NOR_SKIP_SFDP flag.
3/ FIXUP_FLAGS: flags that indicate support that can be discovered
via SFDP ideally, but can not be discovered for this particular flash
because the SFDP table that indicates this support is not defined by
the flash. In case the table for this support is defined but has wrong
values, one should instead use a post_sfdp() hook to set the SNOR_F
equivalent flag.
4/ MFR_FLAGS: manufacturer private flags. Used in the manufacturer
fixup hooks to differentiate support between flashes of the same
manufacturer.
5/ PARSE_SFDP: sets info->parse_sfdp to true. All flash_info entries
that support SFDP should be converted to set info->parse_sfdp to true.
SPI NOR flashes that statically declare one of the
SPI_NOR_{DUAL, QUAD, OCTAL, OCTAL_DTR}_READ flags and do not support
the RDSFDP command are gratuiously receiving the RDSFDP command
in the attempt of parsing the SFDP tables. It is not desirable to issue
commands that are not supported, so introduce PARSE_SFDP to help on this
situation.
New flash additions/updates should be declared/updated to use either
PARSE_SFDP or SPI_NOR_SKIP_SFDP. Once all the flash_info entries are
converted to use SPI_NOR_SKIP_SFDP or PARSE_SFDP, we can get rid of the
SPI_NOR_SKIP_SFDP flag and use just the bool nor->info->parse_sfdp to
determine whether to parse SFDP or not. SPI_NOR_SKIP_SFDP flag is kept
just as a way to differentiate whether a flash is converted to the new
flags logic or not.
Support that can be discovered when parsing SFDP should not be duplicated
by explicit flags at flash declaration. All the flash parameters will be
discovered when parsing SFDP. Sometimes manufacturers wrongly define some
fields in the SFDP tables. If that's the case, SFDP data can be amended
with the fixups() hooks. It is not common, but if the SFDP tables are
entirely wrong, and it does not worth the hassle to tweak the SFDP
parameters by using the fixups hooks, or if the flash does not define the
SFDP tables at all, then statically init the flash with the
SPI_NOR_SKIP_SFDP flag and specify the rest of flash capabilities with
the flash info flags.
With time, we want to convert all flashes to use PARSE_SFDP and
stop triggering the SFDP parsing with the
SPI_NOR_{DUAL, QUAD, OCTAL*}_READ flags. Getting rid of the
SPI_NOR_{OCTAL, OCTAL_DTR}_READ trigger is easily achievable,
the rest are a long term goal.
Manufacturer specific flags like USE_CLSR, USE_FSR, SPI_NOR_XSR_RDY,
will be removed in a future series.
No functional changes intended in this patch.
Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Reviewed-by: Pratyush Yadav <p.yadav@ti.com>
Link: https://lore.kernel.org/r/20211207140254.87681-7-tudor.ambarus@microchip.com
2021-12-07 22:02:46 +08:00
|
|
|
if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) {
|
2020-10-05 23:31:26 +08:00
|
|
|
params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
|
|
|
|
spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR],
|
|
|
|
0, 20, SPINOR_OP_READ_FAST,
|
|
|
|
SNOR_PROTO_8_8_8_DTR);
|
|
|
|
}
|
|
|
|
|
2017-04-26 04:08:46 +08:00
|
|
|
/* Page Program settings. */
|
|
|
|
params->hwcaps.mask |= SNOR_HWCAPS_PP;
|
|
|
|
spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
|
|
|
|
SPINOR_OP_PP, SNOR_PROTO_1_1_1);
|
|
|
|
|
mtd: spi-nor: Rework the flash_info flags
Clarify for what the flash_info flags are used for. Split them in
four categories and a bool:
1/ FLAGS: flags that indicate support that is not defined by the JESD216
standard in its SFDP tables.
2/ NO_SFDP_FLAGS: these flags are used when the flash does not define the
SFDP tables. These flags indicate support that can be discovered via
SFDP. Used together with SPI_NOR_SKIP_SFDP flag.
3/ FIXUP_FLAGS: flags that indicate support that can be discovered
via SFDP ideally, but can not be discovered for this particular flash
because the SFDP table that indicates this support is not defined by
the flash. In case the table for this support is defined but has wrong
values, one should instead use a post_sfdp() hook to set the SNOR_F
equivalent flag.
4/ MFR_FLAGS: manufacturer private flags. Used in the manufacturer
fixup hooks to differentiate support between flashes of the same
manufacturer.
5/ PARSE_SFDP: sets info->parse_sfdp to true. All flash_info entries
that support SFDP should be converted to set info->parse_sfdp to true.
SPI NOR flashes that statically declare one of the
SPI_NOR_{DUAL, QUAD, OCTAL, OCTAL_DTR}_READ flags and do not support
the RDSFDP command are gratuiously receiving the RDSFDP command
in the attempt of parsing the SFDP tables. It is not desirable to issue
commands that are not supported, so introduce PARSE_SFDP to help on this
situation.
New flash additions/updates should be declared/updated to use either
PARSE_SFDP or SPI_NOR_SKIP_SFDP. Once all the flash_info entries are
converted to use SPI_NOR_SKIP_SFDP or PARSE_SFDP, we can get rid of the
SPI_NOR_SKIP_SFDP flag and use just the bool nor->info->parse_sfdp to
determine whether to parse SFDP or not. SPI_NOR_SKIP_SFDP flag is kept
just as a way to differentiate whether a flash is converted to the new
flags logic or not.
Support that can be discovered when parsing SFDP should not be duplicated
by explicit flags at flash declaration. All the flash parameters will be
discovered when parsing SFDP. Sometimes manufacturers wrongly define some
fields in the SFDP tables. If that's the case, SFDP data can be amended
with the fixups() hooks. It is not common, but if the SFDP tables are
entirely wrong, and it does not worth the hassle to tweak the SFDP
parameters by using the fixups hooks, or if the flash does not define the
SFDP tables at all, then statically init the flash with the
SPI_NOR_SKIP_SFDP flag and specify the rest of flash capabilities with
the flash info flags.
With time, we want to convert all flashes to use PARSE_SFDP and
stop triggering the SFDP parsing with the
SPI_NOR_{DUAL, QUAD, OCTAL*}_READ flags. Getting rid of the
SPI_NOR_{OCTAL, OCTAL_DTR}_READ trigger is easily achievable,
the rest are a long term goal.
Manufacturer specific flags like USE_CLSR, USE_FSR, SPI_NOR_XSR_RDY,
will be removed in a future series.
No functional changes intended in this patch.
Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Reviewed-by: Pratyush Yadav <p.yadav@ti.com>
Link: https://lore.kernel.org/r/20211207140254.87681-7-tudor.ambarus@microchip.com
2021-12-07 22:02:46 +08:00
|
|
|
if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) {
|
2020-10-05 23:31:26 +08:00
|
|
|
params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
|
|
|
|
/*
|
|
|
|
* Since xSPI Page Program opcode is backward compatible with
|
|
|
|
* Legacy SPI, use Legacy SPI opcode there as well.
|
|
|
|
*/
|
|
|
|
spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR],
|
|
|
|
SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR);
|
|
|
|
}
|
|
|
|
|
2018-09-11 23:40:06 +08:00
|
|
|
/*
|
|
|
|
* Sector Erase settings. Sort Erase Types in ascending order, with the
|
|
|
|
* smallest erase size starting at BIT(0).
|
|
|
|
*/
|
|
|
|
erase_mask = 0;
|
|
|
|
i = 0;
|
mtd: spi-nor: Rework the flash_info flags
Clarify for what the flash_info flags are used for. Split them in
four categories and a bool:
1/ FLAGS: flags that indicate support that is not defined by the JESD216
standard in its SFDP tables.
2/ NO_SFDP_FLAGS: these flags are used when the flash does not define the
SFDP tables. These flags indicate support that can be discovered via
SFDP. Used together with SPI_NOR_SKIP_SFDP flag.
3/ FIXUP_FLAGS: flags that indicate support that can be discovered
via SFDP ideally, but can not be discovered for this particular flash
because the SFDP table that indicates this support is not defined by
the flash. In case the table for this support is defined but has wrong
values, one should instead use a post_sfdp() hook to set the SNOR_F
equivalent flag.
4/ MFR_FLAGS: manufacturer private flags. Used in the manufacturer
fixup hooks to differentiate support between flashes of the same
manufacturer.
5/ PARSE_SFDP: sets info->parse_sfdp to true. All flash_info entries
that support SFDP should be converted to set info->parse_sfdp to true.
SPI NOR flashes that statically declare one of the
SPI_NOR_{DUAL, QUAD, OCTAL, OCTAL_DTR}_READ flags and do not support
the RDSFDP command are gratuiously receiving the RDSFDP command
in the attempt of parsing the SFDP tables. It is not desirable to issue
commands that are not supported, so introduce PARSE_SFDP to help on this
situation.
New flash additions/updates should be declared/updated to use either
PARSE_SFDP or SPI_NOR_SKIP_SFDP. Once all the flash_info entries are
converted to use SPI_NOR_SKIP_SFDP or PARSE_SFDP, we can get rid of the
SPI_NOR_SKIP_SFDP flag and use just the bool nor->info->parse_sfdp to
determine whether to parse SFDP or not. SPI_NOR_SKIP_SFDP flag is kept
just as a way to differentiate whether a flash is converted to the new
flags logic or not.
Support that can be discovered when parsing SFDP should not be duplicated
by explicit flags at flash declaration. All the flash parameters will be
discovered when parsing SFDP. Sometimes manufacturers wrongly define some
fields in the SFDP tables. If that's the case, SFDP data can be amended
with the fixups() hooks. It is not common, but if the SFDP tables are
entirely wrong, and it does not worth the hassle to tweak the SFDP
parameters by using the fixups hooks, or if the flash does not define the
SFDP tables at all, then statically init the flash with the
SPI_NOR_SKIP_SFDP flag and specify the rest of flash capabilities with
the flash info flags.
With time, we want to convert all flashes to use PARSE_SFDP and
stop triggering the SFDP parsing with the
SPI_NOR_{DUAL, QUAD, OCTAL*}_READ flags. Getting rid of the
SPI_NOR_{OCTAL, OCTAL_DTR}_READ trigger is easily achievable,
the rest are a long term goal.
Manufacturer specific flags like USE_CLSR, USE_FSR, SPI_NOR_XSR_RDY,
will be removed in a future series.
No functional changes intended in this patch.
Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Reviewed-by: Pratyush Yadav <p.yadav@ti.com>
Link: https://lore.kernel.org/r/20211207140254.87681-7-tudor.ambarus@microchip.com
2021-12-07 22:02:46 +08:00
|
|
|
if (no_sfdp_flags & SECT_4K_PMC) {
|
2018-09-11 23:40:06 +08:00
|
|
|
erase_mask |= BIT(i);
|
|
|
|
spi_nor_set_erase_type(&map->erase_type[i], 4096u,
|
|
|
|
SPINOR_OP_BE_4K_PMC);
|
|
|
|
i++;
|
mtd: spi-nor: Rework the flash_info flags
Clarify for what the flash_info flags are used for. Split them in
four categories and a bool:
1/ FLAGS: flags that indicate support that is not defined by the JESD216
standard in its SFDP tables.
2/ NO_SFDP_FLAGS: these flags are used when the flash does not define the
SFDP tables. These flags indicate support that can be discovered via
SFDP. Used together with SPI_NOR_SKIP_SFDP flag.
3/ FIXUP_FLAGS: flags that indicate support that can be discovered
via SFDP ideally, but can not be discovered for this particular flash
because the SFDP table that indicates this support is not defined by
the flash. In case the table for this support is defined but has wrong
values, one should instead use a post_sfdp() hook to set the SNOR_F
equivalent flag.
4/ MFR_FLAGS: manufacturer private flags. Used in the manufacturer
fixup hooks to differentiate support between flashes of the same
manufacturer.
5/ PARSE_SFDP: sets info->parse_sfdp to true. All flash_info entries
that support SFDP should be converted to set info->parse_sfdp to true.
SPI NOR flashes that statically declare one of the
SPI_NOR_{DUAL, QUAD, OCTAL, OCTAL_DTR}_READ flags and do not support
the RDSFDP command are gratuiously receiving the RDSFDP command
in the attempt of parsing the SFDP tables. It is not desirable to issue
commands that are not supported, so introduce PARSE_SFDP to help on this
situation.
New flash additions/updates should be declared/updated to use either
PARSE_SFDP or SPI_NOR_SKIP_SFDP. Once all the flash_info entries are
converted to use SPI_NOR_SKIP_SFDP or PARSE_SFDP, we can get rid of the
SPI_NOR_SKIP_SFDP flag and use just the bool nor->info->parse_sfdp to
determine whether to parse SFDP or not. SPI_NOR_SKIP_SFDP flag is kept
just as a way to differentiate whether a flash is converted to the new
flags logic or not.
Support that can be discovered when parsing SFDP should not be duplicated
by explicit flags at flash declaration. All the flash parameters will be
discovered when parsing SFDP. Sometimes manufacturers wrongly define some
fields in the SFDP tables. If that's the case, SFDP data can be amended
with the fixups() hooks. It is not common, but if the SFDP tables are
entirely wrong, and it does not worth the hassle to tweak the SFDP
parameters by using the fixups hooks, or if the flash does not define the
SFDP tables at all, then statically init the flash with the
SPI_NOR_SKIP_SFDP flag and specify the rest of flash capabilities with
the flash info flags.
With time, we want to convert all flashes to use PARSE_SFDP and
stop triggering the SFDP parsing with the
SPI_NOR_{DUAL, QUAD, OCTAL*}_READ flags. Getting rid of the
SPI_NOR_{OCTAL, OCTAL_DTR}_READ trigger is easily achievable,
the rest are a long term goal.
Manufacturer specific flags like USE_CLSR, USE_FSR, SPI_NOR_XSR_RDY,
will be removed in a future series.
No functional changes intended in this patch.
Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Reviewed-by: Pratyush Yadav <p.yadav@ti.com>
Link: https://lore.kernel.org/r/20211207140254.87681-7-tudor.ambarus@microchip.com
2021-12-07 22:02:46 +08:00
|
|
|
} else if (no_sfdp_flags & SECT_4K) {
|
2018-09-11 23:40:06 +08:00
|
|
|
erase_mask |= BIT(i);
|
|
|
|
spi_nor_set_erase_type(&map->erase_type[i], 4096u,
|
|
|
|
SPINOR_OP_BE_4K);
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
erase_mask |= BIT(i);
|
|
|
|
spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
|
|
|
|
SPINOR_OP_SE);
|
|
|
|
spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
|
2019-08-24 13:27:02 +08:00
|
|
|
}
|
2018-09-11 23:40:06 +08:00
|
|
|
|
2021-12-07 22:02:47 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_init_flags() - Initialize NOR flags for settings that are not defined
|
|
|
|
* in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP.
|
|
|
|
* @nor: pointer to a 'struct spi_nor'
|
|
|
|
*/
|
|
|
|
static void spi_nor_init_flags(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
struct device_node *np = spi_nor_get_flash_node(nor);
|
|
|
|
const u16 flags = nor->info->flags;
|
|
|
|
|
|
|
|
if (of_property_read_bool(np, "broken-flash-reset"))
|
|
|
|
nor->flags |= SNOR_F_BROKEN_RESET;
|
|
|
|
|
|
|
|
if (flags & SPI_NOR_SWP_IS_VOLATILE)
|
|
|
|
nor->flags |= SNOR_F_SWP_IS_VOLATILE;
|
|
|
|
|
|
|
|
if (flags & SPI_NOR_HAS_LOCK)
|
|
|
|
nor->flags |= SNOR_F_HAS_LOCK;
|
|
|
|
|
|
|
|
if (flags & SPI_NOR_HAS_TB) {
|
|
|
|
nor->flags |= SNOR_F_HAS_SR_TB;
|
|
|
|
if (flags & SPI_NOR_TB_SR_BIT6)
|
|
|
|
nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & SPI_NOR_4BIT_BP) {
|
|
|
|
nor->flags |= SNOR_F_HAS_4BIT_BP;
|
|
|
|
if (flags & SPI_NOR_BP3_SR_BIT6)
|
|
|
|
nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & NO_CHIP_ERASE)
|
|
|
|
nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
|
|
|
|
|
|
|
|
if (flags & USE_CLSR)
|
|
|
|
nor->flags |= SNOR_F_USE_CLSR;
|
|
|
|
|
|
|
|
if (flags & USE_FSR)
|
|
|
|
nor->flags |= SNOR_F_USE_FSR;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure the XSR_RDY flag is set before calling
|
|
|
|
* spi_nor_wait_till_ready(). Xilinx S3AN share MFR
|
|
|
|
* with Atmel SPI NOR.
|
|
|
|
*/
|
|
|
|
if (flags & SPI_NOR_XSR_RDY)
|
|
|
|
nor->flags |= SNOR_F_READY_XSR_RDY;
|
|
|
|
}
|
|
|
|
|
2021-12-07 22:02:48 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not
|
|
|
|
* be discovered by SFDP for this particular flash because the SFDP table that
|
|
|
|
* indicates this support is not defined in the flash. In case the table for
|
|
|
|
* this support is defined but has wrong values, one should instead use a
|
|
|
|
* post_sfdp() hook to set the SNOR_F equivalent flag.
|
|
|
|
* @nor: pointer to a 'struct spi_nor'
|
|
|
|
*/
|
|
|
|
static void spi_nor_init_fixup_flags(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
const u8 fixup_flags = nor->info->fixup_flags;
|
|
|
|
|
|
|
|
if (fixup_flags & SPI_NOR_4B_OPCODES)
|
|
|
|
nor->flags |= SNOR_F_4B_OPCODES;
|
|
|
|
|
|
|
|
if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE)
|
|
|
|
nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
|
|
|
|
}
|
|
|
|
|
2019-08-24 14:22:03 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_late_init_params() - Late initialization of default flash parameters.
|
|
|
|
* @nor: pointer to a 'struct spi_nor'
|
|
|
|
*
|
2021-10-30 01:26:13 +08:00
|
|
|
* Used to initialize flash parameters that are not declared in the JESD216
|
|
|
|
* SFDP standard, or where SFDP tables are not defined at all.
|
|
|
|
* Will replace the spi_nor_manufacturer_init_params() method.
|
2019-08-24 14:22:03 +08:00
|
|
|
*/
|
|
|
|
static void spi_nor_late_init_params(struct spi_nor *nor)
|
|
|
|
{
|
2021-10-30 01:26:13 +08:00
|
|
|
if (nor->manufacturer && nor->manufacturer->fixups &&
|
|
|
|
nor->manufacturer->fixups->late_init)
|
|
|
|
nor->manufacturer->fixups->late_init(nor);
|
|
|
|
|
|
|
|
if (nor->info->fixups && nor->info->fixups->late_init)
|
|
|
|
nor->info->fixups->late_init(nor);
|
|
|
|
|
2021-12-07 22:02:47 +08:00
|
|
|
spi_nor_init_flags(nor);
|
2021-12-07 22:02:48 +08:00
|
|
|
spi_nor_init_fixup_flags(nor);
|
2021-12-07 22:02:47 +08:00
|
|
|
|
2019-08-24 14:22:03 +08:00
|
|
|
/*
|
|
|
|
* NOR protection support. When locking_ops are not provided, we pick
|
|
|
|
* the default ones.
|
|
|
|
*/
|
2020-03-14 03:42:53 +08:00
|
|
|
if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
|
2021-03-22 15:51:30 +08:00
|
|
|
spi_nor_init_default_locking_ops(nor);
|
2019-08-24 14:22:03 +08:00
|
|
|
}
|
|
|
|
|
2019-08-24 13:27:02 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_init_params() - Initialize the flash's parameters and settings.
|
2020-04-22 03:40:05 +08:00
|
|
|
* @nor: pointer to a 'struct spi_nor'.
|
2019-08-24 13:27:02 +08:00
|
|
|
*
|
|
|
|
* The flash parameters and settings are initialized based on a sequence of
|
|
|
|
* calls that are ordered by priority:
|
|
|
|
*
|
|
|
|
* 1/ Default flash parameters initialization. The initializations are done
|
|
|
|
* based on nor->info data:
|
|
|
|
* spi_nor_info_init_params()
|
|
|
|
*
|
|
|
|
* which can be overwritten by:
|
|
|
|
* 2/ Manufacturer flash parameters initialization. The initializations are
|
|
|
|
* done based on MFR register, or when the decisions can not be done solely
|
|
|
|
* based on MFR, by using specific flash_info tweeks, ->default_init():
|
|
|
|
* spi_nor_manufacturer_init_params()
|
|
|
|
*
|
|
|
|
* which can be overwritten by:
|
|
|
|
* 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
|
|
|
|
* should be more accurate that the above.
|
|
|
|
* spi_nor_sfdp_init_params()
|
|
|
|
*
|
|
|
|
* Please note that there is a ->post_bfpt() fixup hook that can overwrite
|
|
|
|
* the flash parameters and settings immediately after parsing the Basic
|
|
|
|
* Flash Parameter Table.
|
2021-12-07 22:02:44 +08:00
|
|
|
* spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed.
|
|
|
|
* It is used to tweak various flash parameters when information provided
|
|
|
|
* by the SFDP tables are wrong.
|
2019-08-24 14:22:03 +08:00
|
|
|
*
|
2019-08-24 20:07:09 +08:00
|
|
|
* which can be overwritten by:
|
2021-12-07 22:02:44 +08:00
|
|
|
* 4/ Late flash parameters initialization, used to initialize flash
|
2021-10-30 01:26:13 +08:00
|
|
|
* parameters that are not declared in the JESD216 SFDP standard, or where SFDP
|
|
|
|
* tables are not defined at all.
|
2019-08-24 14:22:03 +08:00
|
|
|
* spi_nor_late_init_params()
|
2019-08-24 13:27:02 +08:00
|
|
|
*/
|
2020-03-14 03:42:53 +08:00
|
|
|
static int spi_nor_init_params(struct spi_nor *nor)
|
2019-08-24 13:27:02 +08:00
|
|
|
{
|
2020-03-14 03:42:53 +08:00
|
|
|
nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
|
|
|
|
if (!nor->params)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-08-24 13:27:02 +08:00
|
|
|
spi_nor_info_init_params(nor);
|
2017-04-26 04:08:46 +08:00
|
|
|
|
2019-08-24 20:00:37 +08:00
|
|
|
spi_nor_manufacturer_init_params(nor);
|
|
|
|
|
mtd: spi-nor: Rework the flash_info flags
Clarify for what the flash_info flags are used for. Split them in
four categories and a bool:
1/ FLAGS: flags that indicate support that is not defined by the JESD216
standard in its SFDP tables.
2/ NO_SFDP_FLAGS: these flags are used when the flash does not define the
SFDP tables. These flags indicate support that can be discovered via
SFDP. Used together with SPI_NOR_SKIP_SFDP flag.
3/ FIXUP_FLAGS: flags that indicate support that can be discovered
via SFDP ideally, but can not be discovered for this particular flash
because the SFDP table that indicates this support is not defined by
the flash. In case the table for this support is defined but has wrong
values, one should instead use a post_sfdp() hook to set the SNOR_F
equivalent flag.
4/ MFR_FLAGS: manufacturer private flags. Used in the manufacturer
fixup hooks to differentiate support between flashes of the same
manufacturer.
5/ PARSE_SFDP: sets info->parse_sfdp to true. All flash_info entries
that support SFDP should be converted to set info->parse_sfdp to true.
SPI NOR flashes that statically declare one of the
SPI_NOR_{DUAL, QUAD, OCTAL, OCTAL_DTR}_READ flags and do not support
the RDSFDP command are gratuiously receiving the RDSFDP command
in the attempt of parsing the SFDP tables. It is not desirable to issue
commands that are not supported, so introduce PARSE_SFDP to help on this
situation.
New flash additions/updates should be declared/updated to use either
PARSE_SFDP or SPI_NOR_SKIP_SFDP. Once all the flash_info entries are
converted to use SPI_NOR_SKIP_SFDP or PARSE_SFDP, we can get rid of the
SPI_NOR_SKIP_SFDP flag and use just the bool nor->info->parse_sfdp to
determine whether to parse SFDP or not. SPI_NOR_SKIP_SFDP flag is kept
just as a way to differentiate whether a flash is converted to the new
flags logic or not.
Support that can be discovered when parsing SFDP should not be duplicated
by explicit flags at flash declaration. All the flash parameters will be
discovered when parsing SFDP. Sometimes manufacturers wrongly define some
fields in the SFDP tables. If that's the case, SFDP data can be amended
with the fixups() hooks. It is not common, but if the SFDP tables are
entirely wrong, and it does not worth the hassle to tweak the SFDP
parameters by using the fixups hooks, or if the flash does not define the
SFDP tables at all, then statically init the flash with the
SPI_NOR_SKIP_SFDP flag and specify the rest of flash capabilities with
the flash info flags.
With time, we want to convert all flashes to use PARSE_SFDP and
stop triggering the SFDP parsing with the
SPI_NOR_{DUAL, QUAD, OCTAL*}_READ flags. Getting rid of the
SPI_NOR_{OCTAL, OCTAL_DTR}_READ trigger is easily achievable,
the rest are a long term goal.
Manufacturer specific flags like USE_CLSR, USE_FSR, SPI_NOR_XSR_RDY,
will be removed in a future series.
No functional changes intended in this patch.
Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Reviewed-by: Pratyush Yadav <p.yadav@ti.com>
Link: https://lore.kernel.org/r/20211207140254.87681-7-tudor.ambarus@microchip.com
2021-12-07 22:02:46 +08:00
|
|
|
if ((nor->info->parse_sfdp ||
|
|
|
|
(nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ |
|
|
|
|
SPI_NOR_QUAD_READ |
|
|
|
|
SPI_NOR_OCTAL_READ |
|
|
|
|
SPI_NOR_OCTAL_DTR_READ))) &&
|
|
|
|
!(nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP))
|
2019-08-24 13:27:02 +08:00
|
|
|
spi_nor_sfdp_init_params(nor);
|
2019-08-24 14:22:03 +08:00
|
|
|
|
|
|
|
spi_nor_late_init_params(nor);
|
2020-03-14 03:42:53 +08:00
|
|
|
|
|
|
|
return 0;
|
2017-04-26 04:08:46 +08:00
|
|
|
}
|
|
|
|
|
2020-10-05 23:31:33 +08:00
|
|
|
/** spi_nor_octal_dtr_enable() - enable Octal DTR I/O if needed
|
|
|
|
* @nor: pointer to a 'struct spi_nor'
|
|
|
|
* @enable: whether to enable or disable Octal DTR
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
|
|
|
static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!nor->params->octal_dtr_enable)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
|
|
|
|
nor->write_proto == SNOR_PROTO_8_8_8_DTR))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = nor->params->octal_dtr_enable(nor, enable);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (enable)
|
|
|
|
nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
|
|
|
|
else
|
|
|
|
nor->reg_proto = SNOR_PROTO_1_1_1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-23 23:53:39 +08:00
|
|
|
/**
|
2020-09-04 15:47:20 +08:00
|
|
|
* spi_nor_quad_enable() - enable Quad I/O if needed.
|
2019-08-23 23:53:39 +08:00
|
|
|
* @nor: pointer to a 'struct spi_nor'
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
2020-09-04 15:47:20 +08:00
|
|
|
static int spi_nor_quad_enable(struct spi_nor *nor)
|
2019-08-23 23:53:39 +08:00
|
|
|
{
|
2020-03-14 03:42:53 +08:00
|
|
|
if (!nor->params->quad_enable)
|
2019-08-23 23:53:39 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
|
|
|
|
spi_nor_get_protocol_width(nor->write_proto) == 4))
|
|
|
|
return 0;
|
|
|
|
|
2020-09-04 15:47:20 +08:00
|
|
|
return nor->params->quad_enable(nor);
|
2019-08-23 23:53:39 +08:00
|
|
|
}
|
|
|
|
|
mtd: spi-nor: Rework the disabling of block write protection
spi_nor_unlock() unlocks blocks of memory or the entire flash memory
array, if requested. clear_sr_bp() unlocks the entire flash memory
array at boot time. This calls for some unification, clear_sr_bp() is
just an optimization for the case when the unlock request covers the
entire flash size.
Get rid of clear_sr_bp() and introduce spi_nor_unlock_all(), which is
just a call to spi_nor_unlock() for the entire flash memory array.
This fixes a bug that was present in spi_nor_spansion_clear_sr_bp().
When the QE bit was zero, we used the Write Status (01h) command with
one data byte, which might cleared the Status Register 2. We now always
use the Write Status (01h) command with two data bytes when
SNOR_F_HAS_16BIT_SR is set, to avoid clearing the Status Register 2.
The SNOR_F_NO_READ_CR case is treated as well. When the flash doesn't
support the CR Read command, we make an assumption about the value of
the QE bit. In spi_nor_init(), call spi_nor_quad_enable() first, then
spi_nor_unlock_all(), so that at the spi_nor_unlock_all() time we can
be sure the QE bit has value one, because of the previous call to
spi_nor_quad_enable().
Get rid of the MFR handling and implement specific manufacturer
default_init() fixup hooks.
Note that this changes a bit the logic for the SNOR_MFR_ATMEL,
SNOR_MFR_INTEL and SNOR_MFR_SST cases. Before this patch, the Atmel,
Intel and SST chips did not set the locking ops, but unlocked the entire
flash at boot time, while now they are setting the locking ops to
stm_locking_ops. This should work, since the disable of the block
protection at the boot time used the same Status Register bits to unlock
the flash, as in the stm_locking_ops case.
Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Reviewed-by: Vignesh Raghavendra <vigneshr@ti.com>
2019-11-07 16:41:55 +08:00
|
|
|
static int spi_nor_init(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
int err;
|
2017-08-23 04:45:21 +08:00
|
|
|
|
2020-10-05 23:31:33 +08:00
|
|
|
err = spi_nor_octal_dtr_enable(nor, true);
|
|
|
|
if (err) {
|
|
|
|
dev_dbg(nor->dev, "octal mode not supported\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-09-04 15:47:20 +08:00
|
|
|
err = spi_nor_quad_enable(nor);
|
2019-08-23 23:53:39 +08:00
|
|
|
if (err) {
|
2019-11-02 19:23:25 +08:00
|
|
|
dev_dbg(nor->dev, "quad mode not supported\n");
|
2019-08-23 23:53:39 +08:00
|
|
|
return err;
|
2017-04-26 04:08:46 +08:00
|
|
|
}
|
|
|
|
|
mtd: spi-nor: keep lock bits if they are non-volatile
Traditionally, Linux unlocks the whole flash because there are legacy
devices which has the write protection bits set by default at startup.
If you actually want to use the flash protection bits, eg. because there
is a read-only part for a bootloader, this automatic unlocking is
harmful. If there is no hardware write protection in place (usually
called WP#), a startup of the kernel just discards this protection.
I've gone through the datasheets of all the flashes (except the Intel
ones where I could not find any datasheet nor reference) which supports
the unlocking feature and looked how the sector protection was
implemented. The currently supported flashes can be divided into the
following two categories:
(1) block protection bits are non-volatile. Thus they keep their values
at reset and power-cycle
(2) flashes where these bits are volatile. After reset or power-cycle,
the whole memory array is protected.
(a) some devices needs a special "Global Unprotect" command, eg.
the Atmel AT25DF041A.
(b) some devices require to clear the BPn bits in the status
register.
Due to the reasons above, we do not want to clear the bits for flashes
which belong to category (1). Fortunately for us, only Atmel flashes
fall into category (2a). Implement the "Global Protect" and "Global
Unprotect" commands for these. For (2b) we can use normal block
protection locking scheme.
This patch adds a new flag to indicate the case (2). Only if we have
such a flash we unlock the whole flash array. To be backwards compatible
it also introduces a kernel configuration option which restores the
complete legacy behavior ("Disable write protection on any flashes").
Hopefully, this will clean up "unlock the entire flash for legacy
devices" once and for all.
For reference here are the actually commits which introduced the legacy
behavior (and extended the behavior to other chip manufacturers):
commit f80e521c916cb ("mtd: m25p80: add support for the Intel/Numonyx {16,32,64}0S33B SPI flash chips")
commit ea60658a08f8f ("mtd: m25p80: disable SST software protection bits by default")
commit 7228982442365 ("[MTD] m25p80: fix bug - ATmel spi flash fails to be copied to")
Actually, this might also fix handling of the Atmel AT25DF flashes,
because the original commit 7228982442365 ("[MTD] m25p80: fix bug -
ATmel spi flash fails to be copied to") was writing a 0 to the status
register, which is a "Global Unprotect". This might not be the case in
the current code which only handles the block protection bits BP2, BP1
and BP0. Thus, it depends on the current contents of the status register
if this unlock actually corresponds to a "Global Unprotect" command. In
the worst case, the current code might leave the AT25DF flashes in a
write protected state.
The commit 191f5c2ed4b6f ("mtd: spi-nor: use 16-bit WRR command when QE
is set on spansion flashes") changed that behavior by just clearing BP2
to BP0 instead of writing a 0 to the status register.
Further, the commit 3e0930f109e76 ("mtd: spi-nor: Rework the disabling
of block write protection") expanded the unlock_all() feature to ANY
flash which supports locking.
Signed-off-by: Michael Walle <michael@walle.cc>
Signed-off-by: Vignesh Raghavendra <vigneshr@ti.com>
Reviewed-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Link: https://lore.kernel.org/r/20201203162959.29589-8-michael@walle.cc
2020-12-04 00:29:59 +08:00
|
|
|
/*
|
|
|
|
* Some SPI NOR flashes are write protected by default after a power-on
|
|
|
|
* reset cycle, in order to avoid inadvertent writes during power-up.
|
|
|
|
* Backward compatibility imposes to unlock the entire flash memory
|
|
|
|
* array at power-up by default. Depending on the kernel configuration
|
|
|
|
* (1) do nothing, (2) always unlock the entire flash array or (3)
|
|
|
|
* unlock the entire flash array only when the software write
|
|
|
|
* protection bits are volatile. The latter is indicated by
|
|
|
|
* SNOR_F_SWP_IS_VOLATILE.
|
|
|
|
*/
|
|
|
|
if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) ||
|
|
|
|
(IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) &&
|
|
|
|
nor->flags & SNOR_F_SWP_IS_VOLATILE))
|
|
|
|
spi_nor_try_unlock_all(nor);
|
mtd: spi-nor: Rework the disabling of block write protection
spi_nor_unlock() unlocks blocks of memory or the entire flash memory
array, if requested. clear_sr_bp() unlocks the entire flash memory
array at boot time. This calls for some unification, clear_sr_bp() is
just an optimization for the case when the unlock request covers the
entire flash size.
Get rid of clear_sr_bp() and introduce spi_nor_unlock_all(), which is
just a call to spi_nor_unlock() for the entire flash memory array.
This fixes a bug that was present in spi_nor_spansion_clear_sr_bp().
When the QE bit was zero, we used the Write Status (01h) command with
one data byte, which might cleared the Status Register 2. We now always
use the Write Status (01h) command with two data bytes when
SNOR_F_HAS_16BIT_SR is set, to avoid clearing the Status Register 2.
The SNOR_F_NO_READ_CR case is treated as well. When the flash doesn't
support the CR Read command, we make an assumption about the value of
the QE bit. In spi_nor_init(), call spi_nor_quad_enable() first, then
spi_nor_unlock_all(), so that at the spi_nor_unlock_all() time we can
be sure the QE bit has value one, because of the previous call to
spi_nor_quad_enable().
Get rid of the MFR handling and implement specific manufacturer
default_init() fixup hooks.
Note that this changes a bit the logic for the SNOR_MFR_ATMEL,
SNOR_MFR_INTEL and SNOR_MFR_SST cases. Before this patch, the Atmel,
Intel and SST chips did not set the locking ops, but unlocked the entire
flash at boot time, while now they are setting the locking ops to
stm_locking_ops. This should work, since the disable of the block
protection at the boot time used the same Status Register bits to unlock
the flash, as in the stm_locking_ops case.
Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Reviewed-by: Vignesh Raghavendra <vigneshr@ti.com>
2019-11-07 16:41:55 +08:00
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
if (nor->addr_width == 4 &&
|
|
|
|
nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
|
|
|
|
!(nor->flags & SNOR_F_4B_OPCODES)) {
|
mtd: spi-nor: only apply reset hacks to broken hardware
Commit 59b356ffd0b0 ("mtd: m25p80: restore the status of SPI flash when
exiting") is the latest from a long history of attempts to add reboot
handling to handle stateful addressing modes on SPI flash. Some prior
mostly-related discussions:
http://lists.infradead.org/pipermail/linux-mtd/2013-March/046343.html
[PATCH 1/3] mtd: m25p80: utilize dedicated 4-byte addressing commands
http://lists.infradead.org/pipermail/barebox/2014-September/020682.html
[RFC] MTD m25p80 3-byte addressing and boot problem
http://lists.infradead.org/pipermail/linux-mtd/2015-February/057683.html
[PATCH 2/2] m25p80: if supported put chip to deep power down if not used
Previously, attempts to add reboot-time software reset handling were
rejected, but the latest attempt was not.
Quick summary of the problem:
Some systems (e.g., boot ROM or bootloader) assume that they can read
initial boot code from their SPI flash using 3-byte addressing. If the
flash is left in 4-byte mode after reset, these systems won't boot. The
above patch provided a shutdown/remove hook to attempt to reset the
addressing mode before we reboot. Notably, this patch misses out on
huge classes of unexpected reboots (e.g., crashes, watchdog resets).
Unfortunately, it is essentially impossible to solve this problem 100%:
if your system doesn't know how to reset the SPI flash to power-on
defaults at initialization time, no amount of software can really rescue
you -- there will always be a chance of some unexpected reset that
leaves your flash in an addressing mode that your boot sequence didn't
expect.
While it is not directly harmful to perform hacks like the
aforementioned commit on all 4-byte addressing flash, a
properly-designed system should not need the hack -- and in fact,
providing this hack may mask the fact that a given system is indeed
broken. So this patch attempts to apply this unsound hack more narrowly,
providing a strong suggestion to developers and system designers that
this is truly a hack. With luck, system designers can catch their errors
early on in their development cycle, rather than applying this hack long
term. But apparently enough systems are out in the wild that we still
have to provide this hack.
Document a new device tree property to denote systems that do not have a
proper hardware (or software) reset mechanism, and apply the hack (with
a loud warning) only in this case.
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Reviewed-by: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
2018-07-28 02:33:13 +08:00
|
|
|
/*
|
|
|
|
* If the RESET# pin isn't hooked up properly, or the system
|
|
|
|
* otherwise doesn't perform a reset command in the boot
|
|
|
|
* sequence, it's impossible to 100% protect against unexpected
|
|
|
|
* reboots (e.g., crashes). Warn the user (or hopefully, system
|
|
|
|
* designer) that this is bad.
|
|
|
|
*/
|
|
|
|
WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
|
|
|
|
"enabling reset hack; may not recover from unexpected reboots\n");
|
2020-03-14 03:42:53 +08:00
|
|
|
nor->params->set_4byte_addr_mode(nor, true);
|
mtd: spi-nor: only apply reset hacks to broken hardware
Commit 59b356ffd0b0 ("mtd: m25p80: restore the status of SPI flash when
exiting") is the latest from a long history of attempts to add reboot
handling to handle stateful addressing modes on SPI flash. Some prior
mostly-related discussions:
http://lists.infradead.org/pipermail/linux-mtd/2013-March/046343.html
[PATCH 1/3] mtd: m25p80: utilize dedicated 4-byte addressing commands
http://lists.infradead.org/pipermail/barebox/2014-September/020682.html
[RFC] MTD m25p80 3-byte addressing and boot problem
http://lists.infradead.org/pipermail/linux-mtd/2015-February/057683.html
[PATCH 2/2] m25p80: if supported put chip to deep power down if not used
Previously, attempts to add reboot-time software reset handling were
rejected, but the latest attempt was not.
Quick summary of the problem:
Some systems (e.g., boot ROM or bootloader) assume that they can read
initial boot code from their SPI flash using 3-byte addressing. If the
flash is left in 4-byte mode after reset, these systems won't boot. The
above patch provided a shutdown/remove hook to attempt to reset the
addressing mode before we reboot. Notably, this patch misses out on
huge classes of unexpected reboots (e.g., crashes, watchdog resets).
Unfortunately, it is essentially impossible to solve this problem 100%:
if your system doesn't know how to reset the SPI flash to power-on
defaults at initialization time, no amount of software can really rescue
you -- there will always be a chance of some unexpected reset that
leaves your flash in an addressing mode that your boot sequence didn't
expect.
While it is not directly harmful to perform hacks like the
aforementioned commit on all 4-byte addressing flash, a
properly-designed system should not need the hack -- and in fact,
providing this hack may mask the fact that a given system is indeed
broken. So this patch attempts to apply this unsound hack more narrowly,
providing a strong suggestion to developers and system designers that
this is truly a hack. With luck, system designers can catch their errors
early on in their development cycle, rather than applying this hack long
term. But apparently enough systems are out in the wild that we still
have to provide this hack.
Document a new device tree property to denote systems that do not have a
proper hardware (or software) reset mechanism, and apply the hack (with
a loud warning) only in this case.
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Reviewed-by: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
2018-07-28 02:33:13 +08:00
|
|
|
}
|
2017-08-23 04:45:21 +08:00
|
|
|
|
2017-04-26 04:08:46 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-05-11 17:39:58 +08:00
|
|
|
/**
|
|
|
|
* spi_nor_soft_reset() - Perform a software reset
|
|
|
|
* @nor: pointer to 'struct spi_nor'
|
|
|
|
*
|
|
|
|
* Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets
|
|
|
|
* the device to its power-on-reset state. This is useful when the software has
|
|
|
|
* made some changes to device (volatile) registers and needs to reset it before
|
|
|
|
* shutting down, for example.
|
|
|
|
*
|
|
|
|
* Not every flash supports this sequence. The same set of opcodes might be used
|
|
|
|
* for some other operation on a flash that does not support this. Support for
|
|
|
|
* this sequence can be discovered via SFDP in the BFPT table.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno otherwise.
|
|
|
|
*/
|
2020-10-05 23:31:35 +08:00
|
|
|
static void spi_nor_soft_reset(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
struct spi_mem_op op;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRSTEN, 0),
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DATA);
|
|
|
|
|
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
|
|
|
|
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
if (ret) {
|
|
|
|
dev_warn(nor->dev, "Software reset failed: %d\n", ret);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRST, 0),
|
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
|
|
|
SPI_MEM_OP_NO_ADDR,
|
|
|
|
SPI_MEM_OP_NO_DATA);
|
|
|
|
|
|
|
|
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
|
|
|
|
|
|
|
|
ret = spi_mem_exec_op(nor->spimem, &op);
|
|
|
|
if (ret) {
|
|
|
|
dev_warn(nor->dev, "Software reset failed: %d\n", ret);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Software Reset is not instant, and the delay varies from flash to
|
|
|
|
* flash. Looking at a few flashes, most range somewhere below 100
|
|
|
|
* microseconds. So, sleep for a range of 200-400 us.
|
|
|
|
*/
|
|
|
|
usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX);
|
|
|
|
}
|
|
|
|
|
2020-10-05 23:31:36 +08:00
|
|
|
/* mtd suspend handler */
|
|
|
|
static int spi_nor_suspend(struct mtd_info *mtd)
|
|
|
|
{
|
|
|
|
struct spi_nor *nor = mtd_to_spi_nor(mtd);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Disable octal DTR mode if we enabled it. */
|
|
|
|
ret = spi_nor_octal_dtr_enable(nor, false);
|
|
|
|
if (ret)
|
|
|
|
dev_err(nor->dev, "suspend() failed\n");
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-08-23 04:45:22 +08:00
|
|
|
/* mtd resume handler */
|
|
|
|
static void spi_nor_resume(struct mtd_info *mtd)
|
|
|
|
{
|
|
|
|
struct spi_nor *nor = mtd_to_spi_nor(mtd);
|
|
|
|
struct device *dev = nor->dev;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* re-initialize the nor chip */
|
|
|
|
ret = spi_nor_init(nor);
|
|
|
|
if (ret)
|
|
|
|
dev_err(dev, "resume() failed\n");
|
|
|
|
}
|
|
|
|
|
2021-04-01 15:34:46 +08:00
|
|
|
static int spi_nor_get_device(struct mtd_info *mtd)
|
|
|
|
{
|
|
|
|
struct mtd_info *master = mtd_get_master(mtd);
|
|
|
|
struct spi_nor *nor = mtd_to_spi_nor(master);
|
|
|
|
struct device *dev;
|
|
|
|
|
|
|
|
if (nor->spimem)
|
|
|
|
dev = nor->spimem->spi->controller->dev.parent;
|
|
|
|
else
|
|
|
|
dev = nor->dev;
|
|
|
|
|
|
|
|
if (!try_module_get(dev->driver->owner))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spi_nor_put_device(struct mtd_info *mtd)
|
|
|
|
{
|
|
|
|
struct mtd_info *master = mtd_get_master(mtd);
|
|
|
|
struct spi_nor *nor = mtd_to_spi_nor(master);
|
|
|
|
struct device *dev;
|
|
|
|
|
|
|
|
if (nor->spimem)
|
|
|
|
dev = nor->spimem->spi->controller->dev.parent;
|
|
|
|
else
|
|
|
|
dev = nor->dev;
|
|
|
|
|
|
|
|
module_put(dev->driver->owner);
|
|
|
|
}
|
|
|
|
|
2017-12-06 10:53:41 +08:00
|
|
|
void spi_nor_restore(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
/* restore the addressing mode */
|
2018-12-06 18:37:34 +08:00
|
|
|
if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
|
|
|
|
nor->flags & SNOR_F_BROKEN_RESET)
|
2020-03-14 03:42:53 +08:00
|
|
|
nor->params->set_4byte_addr_mode(nor, false);
|
2020-10-05 23:31:35 +08:00
|
|
|
|
|
|
|
if (nor->flags & SNOR_F_SOFT_RESET)
|
|
|
|
spi_nor_soft_reset(nor);
|
2017-12-06 10:53:41 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(spi_nor_restore);
|
|
|
|
|
2020-03-14 03:42:39 +08:00
|
|
|
static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
|
|
|
|
const char *name)
|
2018-12-06 18:41:17 +08:00
|
|
|
{
|
2020-03-14 03:42:39 +08:00
|
|
|
unsigned int i, j;
|
2018-12-06 18:41:17 +08:00
|
|
|
|
2020-03-14 03:42:39 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
|
|
|
|
for (j = 0; j < manufacturers[i]->nparts; j++) {
|
|
|
|
if (!strcmp(name, manufacturers[i]->parts[j].name)) {
|
|
|
|
nor->manufacturer = manufacturers[i];
|
|
|
|
return &manufacturers[i]->parts[j];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-06 18:41:17 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-08-24 20:19:22 +08:00
|
|
|
static int spi_nor_set_addr_width(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
if (nor->addr_width) {
|
|
|
|
/* already configured from SFDP */
|
2020-10-05 23:31:26 +08:00
|
|
|
} else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
|
|
|
|
/*
|
|
|
|
* In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
|
|
|
|
* in this protocol an odd address width cannot be used because
|
|
|
|
* then the address phase would only span a cycle and a half.
|
|
|
|
* Half a cycle would be left over. We would then have to start
|
|
|
|
* the dummy phase in the middle of a cycle and so too the data
|
|
|
|
* phase, and we will end the transaction with half a cycle left
|
|
|
|
* over.
|
|
|
|
*
|
|
|
|
* Force all 8D-8D-8D flashes to use an address width of 4 to
|
|
|
|
* avoid this situation.
|
|
|
|
*/
|
|
|
|
nor->addr_width = 4;
|
2019-08-24 20:19:22 +08:00
|
|
|
} else if (nor->info->addr_width) {
|
|
|
|
nor->addr_width = nor->info->addr_width;
|
|
|
|
} else {
|
|
|
|
nor->addr_width = 3;
|
|
|
|
}
|
|
|
|
|
2021-12-07 22:02:42 +08:00
|
|
|
if (nor->addr_width == 3 && nor->params->size > 0x1000000) {
|
2020-10-06 21:23:46 +08:00
|
|
|
/* enable 4-byte addressing if the device exceeds 16MiB */
|
|
|
|
nor->addr_width = 4;
|
|
|
|
}
|
|
|
|
|
2019-08-24 20:19:22 +08:00
|
|
|
if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
|
2019-11-02 19:23:25 +08:00
|
|
|
dev_dbg(nor->dev, "address width is too large: %u\n",
|
2019-08-24 20:19:22 +08:00
|
|
|
nor->addr_width);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set 4byte opcodes when possible. */
|
|
|
|
if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
|
|
|
|
!(nor->flags & SNOR_F_HAS_4BAIT))
|
|
|
|
spi_nor_set_4byte_opcodes(nor);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-28 14:08:02 +08:00
|
|
|
static void spi_nor_debugfs_init(struct spi_nor *nor,
|
|
|
|
const struct flash_info *info)
|
|
|
|
{
|
|
|
|
struct mtd_info *mtd = &nor->mtd;
|
|
|
|
|
|
|
|
mtd->dbg.partname = info->name;
|
|
|
|
mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
|
|
|
|
info->id_len, info->id);
|
|
|
|
}
|
|
|
|
|
2019-08-24 20:19:24 +08:00
|
|
|
static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
const struct flash_info *info = NULL;
|
|
|
|
|
|
|
|
if (name)
|
2020-03-14 03:42:39 +08:00
|
|
|
info = spi_nor_match_id(nor, name);
|
2019-08-24 20:19:24 +08:00
|
|
|
/* Try to auto-detect if chip name wasn't specified or not found */
|
|
|
|
if (!info)
|
|
|
|
info = spi_nor_read_id(nor);
|
|
|
|
if (IS_ERR_OR_NULL(info))
|
|
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If caller has specified name of flash model that can normally be
|
|
|
|
* detected using JEDEC, let's verify it.
|
|
|
|
*/
|
|
|
|
if (name && info->id_len) {
|
|
|
|
const struct flash_info *jinfo;
|
|
|
|
|
|
|
|
jinfo = spi_nor_read_id(nor);
|
|
|
|
if (IS_ERR(jinfo)) {
|
|
|
|
return jinfo;
|
|
|
|
} else if (jinfo != info) {
|
|
|
|
/*
|
|
|
|
* JEDEC knows better, so overwrite platform ID. We
|
|
|
|
* can't trust partitions any longer, but we'll let
|
|
|
|
* mtd apply them anyway, since some partitions may be
|
|
|
|
* marked read-only, and we don't want to lose that
|
|
|
|
* information, even if it's not 100% accurate.
|
|
|
|
*/
|
|
|
|
dev_warn(nor->dev, "found %s, expected %s\n",
|
|
|
|
jinfo->name, info->name);
|
|
|
|
info = jinfo;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return info;
|
|
|
|
}
|
|
|
|
|
2021-12-07 22:02:43 +08:00
|
|
|
static void spi_nor_set_mtd_info(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
struct mtd_info *mtd = &nor->mtd;
|
|
|
|
struct device *dev = nor->dev;
|
|
|
|
|
|
|
|
spi_nor_set_mtd_locking_ops(nor);
|
|
|
|
spi_nor_set_mtd_otp_ops(nor);
|
|
|
|
|
|
|
|
mtd->dev.parent = dev;
|
|
|
|
if (!mtd->name)
|
|
|
|
mtd->name = dev_name(dev);
|
|
|
|
mtd->type = MTD_NORFLASH;
|
|
|
|
mtd->flags = MTD_CAP_NORFLASH;
|
|
|
|
if (nor->info->flags & SPI_NOR_NO_ERASE)
|
|
|
|
mtd->flags |= MTD_NO_ERASE;
|
|
|
|
mtd->writesize = nor->params->writesize;
|
|
|
|
mtd->writebufsize = nor->params->page_size;
|
|
|
|
mtd->size = nor->params->size;
|
|
|
|
mtd->_erase = spi_nor_erase;
|
|
|
|
mtd->_read = spi_nor_read;
|
|
|
|
/* Might be already set by some SST flashes. */
|
|
|
|
if (!mtd->_write)
|
|
|
|
mtd->_write = spi_nor_write;
|
|
|
|
mtd->_suspend = spi_nor_suspend;
|
|
|
|
mtd->_resume = spi_nor_resume;
|
|
|
|
mtd->_get_device = spi_nor_get_device;
|
|
|
|
mtd->_put_device = spi_nor_put_device;
|
|
|
|
}
|
|
|
|
|
2017-04-26 04:08:46 +08:00
|
|
|
int spi_nor_scan(struct spi_nor *nor, const char *name,
|
|
|
|
const struct spi_nor_hwcaps *hwcaps)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
2019-08-24 20:19:24 +08:00
|
|
|
const struct flash_info *info;
|
2014-02-24 18:37:37 +08:00
|
|
|
struct device *dev = nor->dev;
|
2015-08-14 06:46:05 +08:00
|
|
|
struct mtd_info *mtd = &nor->mtd;
|
2014-02-24 18:37:37 +08:00
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ret = spi_nor_check(nor);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-04-26 04:08:46 +08:00
|
|
|
/* Reset SPI protocol for all commands. */
|
|
|
|
nor->reg_proto = SNOR_PROTO_1_1_1;
|
|
|
|
nor->read_proto = SNOR_PROTO_1_1_1;
|
|
|
|
nor->write_proto = SNOR_PROTO_1_1_1;
|
|
|
|
|
2019-08-06 13:10:39 +08:00
|
|
|
/*
|
|
|
|
* We need the bounce buffer early to read/write registers when going
|
|
|
|
* through the spi-mem layer (buffers have to be DMA-able).
|
2019-08-06 13:10:40 +08:00
|
|
|
* For spi-mem drivers, we'll reallocate a new buffer if
|
2021-10-30 01:26:12 +08:00
|
|
|
* nor->params->page_size turns out to be greater than PAGE_SIZE (which
|
2019-08-06 13:10:40 +08:00
|
|
|
* shouldn't happen before long since NOR pages are usually less
|
|
|
|
* than 1KB) after spi_nor_scan() returns.
|
2019-08-06 13:10:39 +08:00
|
|
|
*/
|
|
|
|
nor->bouncebuf_size = PAGE_SIZE;
|
|
|
|
nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!nor->bouncebuf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-08-24 20:19:24 +08:00
|
|
|
info = spi_nor_get_flash_info(nor, name);
|
|
|
|
if (IS_ERR(info))
|
|
|
|
return PTR_ERR(info);
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2018-12-06 18:41:18 +08:00
|
|
|
nor->info = info;
|
|
|
|
|
2019-08-28 14:08:02 +08:00
|
|
|
spi_nor_debugfs_init(nor, info);
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
mutex_init(&nor->lock);
|
|
|
|
|
2019-08-24 13:27:02 +08:00
|
|
|
/* Init flash parameters based on flash_info struct and SFDP */
|
2020-03-14 03:42:53 +08:00
|
|
|
ret = spi_nor_init_params(nor);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-04-26 04:08:46 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Configure the SPI memory:
|
|
|
|
* - select op codes for (Fast) Read, Page Program and Sector Erase.
|
|
|
|
* - set the number of dummy cycles (mode cycles + wait states).
|
|
|
|
* - set the SPI protocols for register and memory accesses.
|
|
|
|
*/
|
2019-08-23 23:53:37 +08:00
|
|
|
ret = spi_nor_setup(nor, hwcaps);
|
2017-04-26 04:08:46 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2019-08-24 20:19:22 +08:00
|
|
|
ret = spi_nor_set_addr_width(nor);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2015-11-11 04:15:27 +08:00
|
|
|
|
2017-08-23 04:45:21 +08:00
|
|
|
/* Send all the required SPI flash commands to initialize device */
|
|
|
|
ret = spi_nor_init(nor);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-12-07 22:02:43 +08:00
|
|
|
/* No mtd_info fields should be used up to this point. */
|
|
|
|
spi_nor_set_mtd_info(nor);
|
mtd: spi-nor: add OTP support
SPI flashes sometimes have a special OTP area, which can (and is) used to
store immutable properties like board serial number or vendor assigned
network hardware addresses.
The MTD subsystem already supports accessing such areas and some (non
SPI NOR) flashes already implement support for it. It differentiates
between user and factory areas. User areas can be written by the user and
factory ones are pre-programmed and locked down by the vendor, usually
containing an "electrical serial number". This patch will only add support
for the user areas.
Lay the foundation and implement the MTD callbacks for the SPI NOR and add
necessary parameters to the flash_info structure. If a flash supports OTP
it can be added by the convenience macro OTP_INFO(). Sometimes there are
individual regions, which might have individual offsets. Therefore, it is
possible to specify the starting address of the first regions as well as
the distance between two regions (e.g. Winbond devices uses this method).
Additionally, the regions might be locked down. Once locked, no further
write access is possible.
For SPI NOR flashes the OTP area is accessed like the normal memory, e.g.
by offset addressing; except that you either have to use special read/write
commands (Winbond) or you have to enter (and exit) a specific OTP mode
(Macronix, Micron).
Thus we introduce four operations to which the MTD callbacks will be
mapped: .read(), .write(), .lock() and .is_locked(). The read and the write
ops will be given an address offset to operate on while the locking ops use
regions because locking always affects a whole region. It is up to the
flash driver to implement these ops.
Signed-off-by: Michael Walle <michael@walle.cc>
[ta: use div64_u64(), IS_ALIGNED, params->otp.org. unsigned int region,
drop comment, add rlen local variable in spi_nor_mtd_otp_lock()]
Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Link: https://lore.kernel.org/r/20210321235140.8308-2-michael@walle.cc
2021-03-22 07:51:38 +08:00
|
|
|
|
2015-08-11 03:39:03 +08:00
|
|
|
dev_info(dev, "%s (%lld Kbytes)\n", info->name,
|
2014-02-24 18:37:37 +08:00
|
|
|
(long long)mtd->size >> 10);
|
|
|
|
|
|
|
|
dev_dbg(dev,
|
|
|
|
"mtd .name = %s, .size = 0x%llx (%lldMiB), "
|
|
|
|
".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
|
|
|
|
mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
|
|
|
|
mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
|
|
|
|
|
|
|
|
if (mtd->numeraseregions)
|
|
|
|
for (i = 0; i < mtd->numeraseregions; i++)
|
|
|
|
dev_dbg(dev,
|
|
|
|
"mtd.eraseregions[%d] = { .offset = 0x%llx, "
|
|
|
|
".erasesize = 0x%.8x (%uKiB), "
|
|
|
|
".numblocks = %d }\n",
|
|
|
|
i, (long long)mtd->eraseregions[i].offset,
|
|
|
|
mtd->eraseregions[i].erasesize,
|
|
|
|
mtd->eraseregions[i].erasesize / 1024,
|
|
|
|
mtd->eraseregions[i].numblocks);
|
|
|
|
return 0;
|
|
|
|
}
|
2014-04-09 09:22:57 +08:00
|
|
|
EXPORT_SYMBOL_GPL(spi_nor_scan);
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2020-02-19 05:24:10 +08:00
|
|
|
static int spi_nor_create_read_dirmap(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
struct spi_mem_dirmap_info info = {
|
2020-10-05 23:31:26 +08:00
|
|
|
.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
|
|
|
|
SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
|
|
|
|
SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
|
|
|
|
SPI_MEM_OP_DATA_IN(0, NULL, 0)),
|
2020-02-19 05:24:10 +08:00
|
|
|
.offset = 0,
|
2021-12-07 22:02:42 +08:00
|
|
|
.length = nor->params->size,
|
2020-02-19 05:24:10 +08:00
|
|
|
};
|
|
|
|
struct spi_mem_op *op = &info.op_tmpl;
|
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
spi_nor_spimem_setup_op(nor, op, nor->read_proto);
|
2020-02-19 05:24:10 +08:00
|
|
|
|
|
|
|
/* convert the dummy cycles to the number of bytes */
|
|
|
|
op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
|
2020-10-05 23:31:26 +08:00
|
|
|
if (spi_nor_protocol_is_dtr(nor->read_proto))
|
|
|
|
op->dummy.nbytes *= 2;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since spi_nor_spimem_setup_op() only sets buswidth when the number
|
|
|
|
* of data bytes is non-zero, the data buswidth won't be set here. So,
|
|
|
|
* do it explicitly.
|
|
|
|
*/
|
|
|
|
op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
|
2020-02-19 05:24:10 +08:00
|
|
|
|
|
|
|
nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
|
|
|
|
&info);
|
|
|
|
return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_nor_create_write_dirmap(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
struct spi_mem_dirmap_info info = {
|
2020-10-05 23:31:26 +08:00
|
|
|
.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
|
|
|
|
SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
|
2020-02-19 05:24:10 +08:00
|
|
|
SPI_MEM_OP_NO_DUMMY,
|
2020-10-05 23:31:26 +08:00
|
|
|
SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
|
2020-02-19 05:24:10 +08:00
|
|
|
.offset = 0,
|
2021-12-07 22:02:42 +08:00
|
|
|
.length = nor->params->size,
|
2020-02-19 05:24:10 +08:00
|
|
|
};
|
|
|
|
struct spi_mem_op *op = &info.op_tmpl;
|
|
|
|
|
|
|
|
if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
|
|
|
|
op->addr.nbytes = 0;
|
|
|
|
|
2020-10-05 23:31:26 +08:00
|
|
|
spi_nor_spimem_setup_op(nor, op, nor->write_proto);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since spi_nor_spimem_setup_op() only sets buswidth when the number
|
|
|
|
* of data bytes is non-zero, the data buswidth won't be set here. So,
|
|
|
|
* do it explicitly.
|
|
|
|
*/
|
|
|
|
op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
|
|
|
|
|
2020-02-19 05:24:10 +08:00
|
|
|
nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
|
|
|
|
&info);
|
|
|
|
return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
|
|
|
|
}
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
static int spi_nor_probe(struct spi_mem *spimem)
|
|
|
|
{
|
|
|
|
struct spi_device *spi = spimem->spi;
|
|
|
|
struct flash_platform_data *data = dev_get_platdata(&spi->dev);
|
|
|
|
struct spi_nor *nor;
|
2019-08-06 13:10:41 +08:00
|
|
|
/*
|
|
|
|
* Enable all caps by default. The core will mask them after
|
|
|
|
* checking what's really supported using spi_mem_supports_op().
|
|
|
|
*/
|
|
|
|
const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
|
2019-08-06 13:10:40 +08:00
|
|
|
char *flash_name;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
|
|
|
|
if (!nor)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
nor->spimem = spimem;
|
|
|
|
nor->dev = &spi->dev;
|
|
|
|
spi_nor_set_flash_node(nor, spi->dev.of_node);
|
|
|
|
|
|
|
|
spi_mem_set_drvdata(spimem, nor);
|
|
|
|
|
|
|
|
if (data && data->name)
|
|
|
|
nor->mtd.name = data->name;
|
|
|
|
|
|
|
|
if (!nor->mtd.name)
|
|
|
|
nor->mtd.name = spi_mem_get_name(spimem);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For some (historical?) reason many platforms provide two different
|
|
|
|
* names in flash_platform_data: "name" and "type". Quite often name is
|
|
|
|
* set to "m25p80" and then "type" provides a real chip name.
|
|
|
|
* If that's the case, respect "type" and ignore a "name".
|
|
|
|
*/
|
|
|
|
if (data && data->type)
|
|
|
|
flash_name = data->type;
|
|
|
|
else if (!strcmp(spi->modalias, "spi-nor"))
|
|
|
|
flash_name = NULL; /* auto-detect */
|
|
|
|
else
|
|
|
|
flash_name = spi->modalias;
|
|
|
|
|
|
|
|
ret = spi_nor_scan(nor, flash_name, &hwcaps);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* None of the existing parts have > 512B pages, but let's play safe
|
|
|
|
* and add this logic so that if anyone ever adds support for such
|
|
|
|
* a NOR we don't end up with buffer overflows.
|
|
|
|
*/
|
2021-10-30 01:26:12 +08:00
|
|
|
if (nor->params->page_size > PAGE_SIZE) {
|
|
|
|
nor->bouncebuf_size = nor->params->page_size;
|
2019-08-06 13:10:40 +08:00
|
|
|
devm_kfree(nor->dev, nor->bouncebuf);
|
|
|
|
nor->bouncebuf = devm_kmalloc(nor->dev,
|
|
|
|
nor->bouncebuf_size,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!nor->bouncebuf)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2020-02-19 05:24:10 +08:00
|
|
|
ret = spi_nor_create_read_dirmap(nor);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = spi_nor_create_write_dirmap(nor);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2019-08-06 13:10:40 +08:00
|
|
|
return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
|
|
|
|
data ? data->nr_parts : 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_nor_remove(struct spi_mem *spimem)
|
|
|
|
{
|
|
|
|
struct spi_nor *nor = spi_mem_get_drvdata(spimem);
|
|
|
|
|
|
|
|
spi_nor_restore(nor);
|
|
|
|
|
|
|
|
/* Clean up MTD stuff. */
|
|
|
|
return mtd_device_unregister(&nor->mtd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spi_nor_shutdown(struct spi_mem *spimem)
|
|
|
|
{
|
|
|
|
struct spi_nor *nor = spi_mem_get_drvdata(spimem);
|
|
|
|
|
|
|
|
spi_nor_restore(nor);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do NOT add to this array without reading the following:
|
|
|
|
*
|
|
|
|
* Historically, many flash devices are bound to this driver by their name. But
|
|
|
|
* since most of these flash are compatible to some extent, and their
|
|
|
|
* differences can often be differentiated by the JEDEC read-ID command, we
|
|
|
|
* encourage new users to add support to the spi-nor library, and simply bind
|
|
|
|
* against a generic string here (e.g., "jedec,spi-nor").
|
|
|
|
*
|
|
|
|
* Many flash names are kept here in this list (as well as in spi-nor.c) to
|
|
|
|
* keep them available as module aliases for existing platforms.
|
|
|
|
*/
|
|
|
|
static const struct spi_device_id spi_nor_dev_ids[] = {
|
|
|
|
/*
|
|
|
|
* Allow non-DT platform devices to bind to the "spi-nor" modalias, and
|
|
|
|
* hack around the fact that the SPI core does not provide uevent
|
|
|
|
* matching for .of_match_table
|
|
|
|
*/
|
|
|
|
{"spi-nor"},
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Entries not used in DTs that should be safe to drop after replacing
|
|
|
|
* them with "spi-nor" in platform data.
|
|
|
|
*/
|
|
|
|
{"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Entries that were used in DTs without "jedec,spi-nor" fallback and
|
|
|
|
* should be kept for backward compatibility.
|
|
|
|
*/
|
|
|
|
{"at25df321a"}, {"at25df641"}, {"at26df081a"},
|
|
|
|
{"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
|
|
|
|
{"mx25l25635e"},{"mx66l51235l"},
|
|
|
|
{"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
|
|
|
|
{"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
|
|
|
|
{"s25fl064k"},
|
|
|
|
{"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
|
|
|
|
{"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
|
|
|
|
{"m25p64"}, {"m25p128"},
|
|
|
|
{"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
|
|
|
|
{"w25q80bl"}, {"w25q128"}, {"w25q256"},
|
|
|
|
|
|
|
|
/* Flashes that can't be detected using JEDEC */
|
|
|
|
{"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
|
|
|
|
{"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
|
|
|
|
{"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
|
|
|
|
|
|
|
|
/* Everspin MRAMs (non-JEDEC) */
|
|
|
|
{ "mr25h128" }, /* 128 Kib, 40 MHz */
|
|
|
|
{ "mr25h256" }, /* 256 Kib, 40 MHz */
|
|
|
|
{ "mr25h10" }, /* 1 Mib, 40 MHz */
|
|
|
|
{ "mr25h40" }, /* 4 Mib, 40 MHz */
|
|
|
|
|
|
|
|
{ },
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
|
|
|
|
|
|
|
|
static const struct of_device_id spi_nor_of_table[] = {
|
|
|
|
/*
|
|
|
|
* Generic compatibility for SPI NOR that can be identified by the
|
|
|
|
* JEDEC READ ID opcode (0x9F). Use this, if possible.
|
|
|
|
*/
|
|
|
|
{ .compatible = "jedec,spi-nor" },
|
|
|
|
{ /* sentinel */ },
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, spi_nor_of_table);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* REVISIT: many of these chips have deep power-down modes, which
|
|
|
|
* should clearly be entered on suspend() to minimize power use.
|
|
|
|
* And also when they're otherwise idle...
|
|
|
|
*/
|
|
|
|
static struct spi_mem_driver spi_nor_driver = {
|
|
|
|
.spidrv = {
|
|
|
|
.driver = {
|
|
|
|
.name = "spi-nor",
|
|
|
|
.of_match_table = spi_nor_of_table,
|
2021-05-03 23:56:51 +08:00
|
|
|
.dev_groups = spi_nor_sysfs_groups,
|
2019-08-06 13:10:40 +08:00
|
|
|
},
|
|
|
|
.id_table = spi_nor_dev_ids,
|
|
|
|
},
|
|
|
|
.probe = spi_nor_probe,
|
|
|
|
.remove = spi_nor_remove,
|
|
|
|
.shutdown = spi_nor_shutdown,
|
|
|
|
};
|
|
|
|
module_spi_mem_driver(spi_nor_driver);
|
|
|
|
|
2018-12-06 18:41:20 +08:00
|
|
|
MODULE_LICENSE("GPL v2");
|
2014-02-24 18:37:37 +08:00
|
|
|
MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
|
|
|
|
MODULE_AUTHOR("Mike Lavender");
|
|
|
|
MODULE_DESCRIPTION("framework for SPI NOR");
|