mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-03 17:14:14 +08:00
lightnvm: move block fold outside of get_bb_tbl()
The get block table command returns a list of blocks and planes with their associated state. Users, such as gennvm and sysblk, manages all planes as a single virtual block. It was therefore natural to fold the bad block list before it is returned. However, to allow users, which manages on a per-plane block level, to also use the interface, the get_bb_tbl interface is changed to not fold by default and instead let the caller fold if necessary. Reviewed by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
4891d120b9
commit
22e8c9766a
@ -420,6 +420,41 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_submit_ppa);
|
||||
|
||||
/*
|
||||
* folds a bad block list from its plane representation to its virtual
|
||||
* block representation. The fold is done in place and reduced size is
|
||||
* returned.
|
||||
*
|
||||
* If any of the planes status are bad or grown bad block, the virtual block
|
||||
* is marked bad. If not bad, the first plane state acts as the block state.
|
||||
*/
|
||||
int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
|
||||
{
|
||||
int blk, offset, pl, blktype;
|
||||
|
||||
if (nr_blks != dev->blks_per_lun * dev->plane_mode)
|
||||
return -EINVAL;
|
||||
|
||||
for (blk = 0; blk < dev->blks_per_lun; blk++) {
|
||||
offset = blk * dev->plane_mode;
|
||||
blktype = blks[offset];
|
||||
|
||||
/* Bad blocks on any planes take precedence over other types */
|
||||
for (pl = 0; pl < dev->plane_mode; pl++) {
|
||||
if (blks[offset + pl] &
|
||||
(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
|
||||
blktype = blks[offset + pl];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
blks[blk] = blktype;
|
||||
}
|
||||
|
||||
return dev->blks_per_lun;
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_bb_tbl_fold);
|
||||
|
||||
static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
|
||||
{
|
||||
int i;
|
||||
|
@ -129,18 +129,21 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
|
||||
void *private)
|
||||
static int gennvm_block_bb(struct nvm_dev *dev, struct ppa_addr ppa,
|
||||
u8 *blks, int nr_blks, void *private)
|
||||
{
|
||||
struct gen_nvm *gn = private;
|
||||
struct nvm_dev *dev = gn->dev;
|
||||
struct gen_lun *lun;
|
||||
struct nvm_block *blk;
|
||||
int i;
|
||||
|
||||
nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
|
||||
if (nr_blks < 0)
|
||||
return nr_blks;
|
||||
|
||||
lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
|
||||
|
||||
for (i = 0; i < nr_blocks; i++) {
|
||||
for (i = 0; i < nr_blks; i++) {
|
||||
if (blks[i] == 0)
|
||||
continue;
|
||||
|
||||
@ -250,8 +253,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
|
||||
ppa = generic_to_dev_addr(dev, ppa);
|
||||
|
||||
ret = dev->ops->get_bb_tbl(dev, ppa,
|
||||
dev->blks_per_lun,
|
||||
gennvm_block_bb, gn);
|
||||
gennvm_block_bb, gn);
|
||||
if (ret)
|
||||
pr_err("gennvm: could not read BB table\n");
|
||||
}
|
||||
|
@ -93,12 +93,16 @@ void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
|
||||
s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
|
||||
}
|
||||
|
||||
static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
|
||||
void *private)
|
||||
static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
|
||||
u8 *blks, int nr_blks, void *private)
|
||||
{
|
||||
struct sysblk_scan *s = private;
|
||||
int i, nr_sysblk = 0;
|
||||
|
||||
nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
|
||||
if (nr_blks < 0)
|
||||
return nr_blks;
|
||||
|
||||
for (i = 0; i < nr_blks; i++) {
|
||||
if (blks[i] != NVM_BLK_T_HOST)
|
||||
continue;
|
||||
@ -130,7 +134,7 @@ static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
|
||||
dppa = generic_to_dev_addr(dev, ppas[i]);
|
||||
s->row = i;
|
||||
|
||||
ret = dev->ops->get_bb_tbl(dev, dppa, dev->blks_per_lun, fn, s);
|
||||
ret = dev->ops->get_bb_tbl(dev, dppa, fn, s);
|
||||
if (ret) {
|
||||
pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
|
||||
ppas[i].g.ch,
|
||||
@ -235,13 +239,17 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sysblk_get_free_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
|
||||
void *private)
|
||||
static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
|
||||
u8 *blks, int nr_blks, void *private)
|
||||
{
|
||||
struct sysblk_scan *s = private;
|
||||
struct ppa_addr *sppa;
|
||||
int i, blkid = 0;
|
||||
|
||||
nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
|
||||
if (nr_blks < 0)
|
||||
return nr_blks;
|
||||
|
||||
for (i = 0; i < nr_blks; i++) {
|
||||
if (blks[i] == NVM_BLK_T_HOST)
|
||||
return -EEXIST;
|
||||
@ -578,13 +586,16 @@ static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
|
||||
BITS_PER_LONG;
|
||||
}
|
||||
|
||||
static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
|
||||
void *private)
|
||||
static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
|
||||
u8 *blks, int nr_blks, void *private)
|
||||
{
|
||||
struct factory_blks *f = private;
|
||||
struct nvm_dev *dev = f->dev;
|
||||
int i, lunoff;
|
||||
|
||||
nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
|
||||
if (nr_blks < 0)
|
||||
return nr_blks;
|
||||
|
||||
lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);
|
||||
|
||||
/* non-set bits correspond to the block must be erased */
|
||||
@ -661,7 +672,7 @@ static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa,
|
||||
|
||||
dev_ppa = generic_to_dev_addr(dev, ppa);
|
||||
|
||||
ret = dev->ops->get_bb_tbl(dev, dev_ppa, dev->blks_per_lun, fn, priv);
|
||||
ret = dev->ops->get_bb_tbl(dev, dev_ppa, fn, priv);
|
||||
if (ret)
|
||||
pr_err("nvm: failed bb tbl for ch%u lun%u\n",
|
||||
ppa.g.ch, ppa.g.blk);
|
||||
|
@ -387,41 +387,16 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void nvme_nvm_bb_tbl_fold(struct nvm_dev *nvmdev,
|
||||
int nr_dst_blks, u8 *dst_blks,
|
||||
int nr_src_blks, u8 *src_blks)
|
||||
{
|
||||
int blk, offset, pl, blktype;
|
||||
|
||||
for (blk = 0; blk < nr_dst_blks; blk++) {
|
||||
offset = blk * nvmdev->plane_mode;
|
||||
blktype = src_blks[offset];
|
||||
|
||||
/* Bad blocks on any planes take precedence over other types */
|
||||
for (pl = 0; pl < nvmdev->plane_mode; pl++) {
|
||||
if (src_blks[offset + pl] &
|
||||
(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
|
||||
blktype = src_blks[offset + pl];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
dst_blks[blk] = blktype;
|
||||
}
|
||||
}
|
||||
|
||||
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
|
||||
int nr_dst_blks, nvm_bb_update_fn *update_bbtbl,
|
||||
void *priv)
|
||||
nvm_bb_update_fn *update_bbtbl, void *priv)
|
||||
{
|
||||
struct request_queue *q = nvmdev->q;
|
||||
struct nvme_ns *ns = q->queuedata;
|
||||
struct nvme_ctrl *ctrl = ns->ctrl;
|
||||
struct nvme_nvm_command c = {};
|
||||
struct nvme_nvm_bb_tbl *bb_tbl;
|
||||
u8 *dst_blks = NULL;
|
||||
int nr_src_blks = nr_dst_blks * nvmdev->plane_mode;
|
||||
int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_src_blks;
|
||||
int nr_blks = nvmdev->blks_per_lun * nvmdev->plane_mode;
|
||||
int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
|
||||
int ret = 0;
|
||||
|
||||
c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
|
||||
@ -432,12 +407,6 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
|
||||
if (!bb_tbl)
|
||||
return -ENOMEM;
|
||||
|
||||
dst_blks = kzalloc(nr_dst_blks, GFP_KERNEL);
|
||||
if (!dst_blks) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
|
||||
bb_tbl, tblsz);
|
||||
if (ret) {
|
||||
@ -459,21 +428,17 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le32_to_cpu(bb_tbl->tblks) != nr_src_blks) {
|
||||
if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
|
||||
ret = -EINVAL;
|
||||
dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
|
||||
le32_to_cpu(bb_tbl->tblks), nr_src_blks);
|
||||
le32_to_cpu(bb_tbl->tblks), nr_blks);
|
||||
goto out;
|
||||
}
|
||||
|
||||
nvme_nvm_bb_tbl_fold(nvmdev, nr_dst_blks, dst_blks,
|
||||
nr_src_blks, bb_tbl->blk);
|
||||
|
||||
ppa = dev_to_generic_addr(nvmdev, ppa);
|
||||
ret = update_bbtbl(ppa, nr_dst_blks, dst_blks, priv);
|
||||
ret = update_bbtbl(nvmdev, ppa, bb_tbl->blk, nr_blks, priv);
|
||||
|
||||
out:
|
||||
kfree(dst_blks);
|
||||
kfree(bb_tbl);
|
||||
return ret;
|
||||
}
|
||||
|
@ -41,11 +41,12 @@ struct nvm_id;
|
||||
struct nvm_dev;
|
||||
|
||||
typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
|
||||
typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
|
||||
typedef int (nvm_bb_update_fn)(struct nvm_dev *, struct ppa_addr, u8 *, int,
|
||||
void *);
|
||||
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
|
||||
typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
|
||||
nvm_l2p_update_fn *, void *);
|
||||
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
|
||||
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr,
|
||||
nvm_bb_update_fn *, void *);
|
||||
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
|
||||
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||
@ -538,6 +539,7 @@ extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
|
||||
void *, int);
|
||||
extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int,
|
||||
int, void *, int);
|
||||
extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
|
||||
|
||||
/* sysblk.c */
|
||||
#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
|
||||
|
Loading…
Reference in New Issue
Block a user