Merge branch 'for-5.18/drivers' into for-5.18/write-streams

* for-5.18/drivers: (51 commits)
  bcache: fixup multiple threads crash
  bcache: fixup bcache_dev_sectors_dirty_add() multithreaded CPU false sharing
  floppy: use memcpy_{to,from}_bvec
  drbd: use bvec_kmap_local in recv_dless_read
  drbd: use bvec_kmap_local in drbd_csum_bio
  bcache: use bvec_kmap_local in bio_csum
  nvdimm-btt: use bvec_kmap_local in btt_rw_integrity
  nvdimm-blk: use bvec_kmap_local in nd_blk_rw_integrity
  zram: use memcpy_from_bvec in zram_bvec_write
  zram: use memcpy_to_bvec in zram_bvec_read
  aoe: use bvec_kmap_local in bvcpy
  iss-simdisk: use bvec_kmap_local in simdisk_submit_bio
  nvme: check that EUI/GUID/UUID are globally unique
  nvme: check for duplicate identifiers earlier
  nvme: fix the check for duplicate unique identifiers
  nvme: cleanup __nvme_check_ids
  nvme: remove nssa from struct nvme_ctrl
  nvme: explicitly set non-error for directives
  nvme: expose cntrltype and dctype through sysfs
  nvme: send uevent on connection up
  ...
This commit is contained in:
Jens Axboe 2022-03-07 12:44:39 -07:00
commit b46bebaf2a
38 changed files with 635 additions and 256 deletions

View File

@ -108,13 +108,13 @@ static void simdisk_submit_bio(struct bio *bio)
sector_t sector = bio->bi_iter.bi_sector; sector_t sector = bio->bi_iter.bi_sector;
bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment(bvec, bio, iter) {
char *buffer = kmap_atomic(bvec.bv_page) + bvec.bv_offset; char *buffer = bvec_kmap_local(&bvec);
unsigned len = bvec.bv_len >> SECTOR_SHIFT; unsigned len = bvec.bv_len >> SECTOR_SHIFT;
simdisk_transfer(dev, sector, len, buffer, simdisk_transfer(dev, sector, len, buffer,
bio_data_dir(bio) == WRITE); bio_data_dir(bio) == WRITE);
sector += len; sector += len;
kunmap_atomic(buffer); kunmap_local(buffer);
} }
bio_endio(bio); bio_endio(bio);

View File

@ -1018,9 +1018,9 @@ bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
iter.bi_size = cnt; iter.bi_size = cnt;
__bio_for_each_segment(bv, bio, iter, iter) { __bio_for_each_segment(bv, bio, iter, iter) {
char *p = kmap_atomic(bv.bv_page) + bv.bv_offset; char *p = bvec_kmap_local(&bv);
skb_copy_bits(skb, soff, p, bv.bv_len); skb_copy_bits(skb, soff, p, bv.bv_len);
kunmap_atomic(p); kunmap_local(p);
soff += bv.bv_len; soff += bv.bv_len;
} }
} }

View File

@ -2017,10 +2017,10 @@ static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_req
D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector); D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment(bvec, bio, iter) {
void *mapped = kmap(bvec.bv_page) + bvec.bv_offset; void *mapped = bvec_kmap_local(&bvec);
expect = min_t(int, data_size, bvec.bv_len); expect = min_t(int, data_size, bvec.bv_len);
err = drbd_recv_all_warn(peer_device->connection, mapped, expect); err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
kunmap(bvec.bv_page); kunmap_local(mapped);
if (err) if (err)
return err; return err;
data_size -= expect; data_size -= expect;

View File

@ -326,9 +326,9 @@ void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment(bvec, bio, iter) {
u8 *src; u8 *src;
src = kmap_atomic(bvec.bv_page); src = bvec_kmap_local(&bvec);
crypto_shash_update(desc, src + bvec.bv_offset, bvec.bv_len); crypto_shash_update(desc, src, bvec.bv_len);
kunmap_atomic(src); kunmap_local(src);
/* REQ_OP_WRITE_SAME has only one segment, /* REQ_OP_WRITE_SAME has only one segment,
* checksum the payload only once. */ * checksum the payload only once. */

View File

@ -2485,11 +2485,9 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
} }
if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) if (CT(raw_cmd->cmd[COMMAND]) == FD_READ)
memcpy_to_page(bv.bv_page, bv.bv_offset, dma_buffer, memcpy_to_bvec(&bv, dma_buffer);
size);
else else
memcpy_from_page(dma_buffer, bv.bv_page, bv.bv_offset, memcpy_from_bvec(dma_buffer, &bv);
size);
remaining -= size; remaining -= size;
dma_buffer += size; dma_buffer += size;

View File

@ -86,6 +86,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#define LOOP_IDLE_WORKER_TIMEOUT (60 * HZ) #define LOOP_IDLE_WORKER_TIMEOUT (60 * HZ)
#define LOOP_DEFAULT_HW_Q_DEPTH (128)
static DEFINE_IDR(loop_index_idr); static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_ctl_mutex); static DEFINE_MUTEX(loop_ctl_mutex);
@ -309,12 +310,11 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
* a.k.a. discard/zerorange. * a.k.a. discard/zerorange.
*/ */
struct file *file = lo->lo_backing_file; struct file *file = lo->lo_backing_file;
struct request_queue *q = lo->lo_queue;
int ret; int ret;
mode |= FALLOC_FL_KEEP_SIZE; mode |= FALLOC_FL_KEEP_SIZE;
if (!blk_queue_discard(q)) { if (!blk_queue_discard(lo->lo_queue)) {
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto out; goto out;
} }
@ -328,8 +328,7 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
static int lo_req_flush(struct loop_device *lo, struct request *rq) static int lo_req_flush(struct loop_device *lo, struct request *rq)
{ {
struct file *file = lo->lo_backing_file; int ret = vfs_fsync(lo->lo_backing_file, 0);
int ret = vfs_fsync(file, 0);
if (unlikely(ret && ret != -EINVAL)) if (unlikely(ret && ret != -EINVAL))
ret = -EIO; ret = -EIO;
@ -681,33 +680,33 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
{ {
return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset); return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset);
} }
static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
{ {
return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
} }
static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
{ {
int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
return sprintf(buf, "%s\n", autoclear ? "1" : "0"); return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0");
} }
static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
{ {
int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
return sprintf(buf, "%s\n", partscan ? "1" : "0"); return sysfs_emit(buf, "%s\n", partscan ? "1" : "0");
} }
static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf) static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
{ {
int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO); int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
return sprintf(buf, "%s\n", dio ? "1" : "0"); return sysfs_emit(buf, "%s\n", dio ? "1" : "0");
} }
LOOP_ATTR_RO(backing_file); LOOP_ATTR_RO(backing_file);
@ -1261,7 +1260,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) { if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) {
/* If any pages were dirtied after invalidate_bdev(), try again */ /* If any pages were dirtied after invalidate_bdev(), try again */
err = -EAGAIN; err = -EAGAIN;
pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", pr_warn("%s: loop%d (%s) still has dirty pages (nrpages=%lu)\n",
__func__, lo->lo_number, lo->lo_file_name, __func__, lo->lo_number, lo->lo_file_name,
lo->lo_device->bd_inode->i_mapping->nrpages); lo->lo_device->bd_inode->i_mapping->nrpages);
goto out_unfreeze; goto out_unfreeze;
@ -1481,7 +1480,7 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
/* invalidate_bdev should have truncated all the pages */ /* invalidate_bdev should have truncated all the pages */
if (lo->lo_device->bd_inode->i_mapping->nrpages) { if (lo->lo_device->bd_inode->i_mapping->nrpages) {
err = -EAGAIN; err = -EAGAIN;
pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", pr_warn("%s: loop%d (%s) still has dirty pages (nrpages=%lu)\n",
__func__, lo->lo_number, lo->lo_file_name, __func__, lo->lo_number, lo->lo_file_name,
lo->lo_device->bd_inode->i_mapping->nrpages); lo->lo_device->bd_inode->i_mapping->nrpages);
goto out_unfreeze; goto out_unfreeze;
@ -1786,6 +1785,24 @@ module_param(max_loop, int, 0444);
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
module_param(max_part, int, 0444); module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
static int hw_queue_depth = LOOP_DEFAULT_HW_Q_DEPTH;
static int loop_set_hw_queue_depth(const char *s, const struct kernel_param *p)
{
int ret = kstrtoint(s, 10, &hw_queue_depth);
return (ret || (hw_queue_depth < 1)) ? -EINVAL : 0;
}
static const struct kernel_param_ops loop_hw_qdepth_param_ops = {
.set = loop_set_hw_queue_depth,
.get = param_get_int,
};
device_param_cb(hw_queue_depth, &loop_hw_qdepth_param_ops, &hw_queue_depth, 0444);
MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 128");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
@ -1980,7 +1997,7 @@ static int loop_add(int i)
lo->tag_set.ops = &loop_mq_ops; lo->tag_set.ops = &loop_mq_ops;
lo->tag_set.nr_hw_queues = 1; lo->tag_set.nr_hw_queues = 1;
lo->tag_set.queue_depth = 128; lo->tag_set.queue_depth = hw_queue_depth;
lo->tag_set.numa_node = NUMA_NO_NODE; lo->tag_set.numa_node = NUMA_NO_NODE;
lo->tag_set.cmd_size = sizeof(struct loop_cmd); lo->tag_set.cmd_size = sizeof(struct loop_cmd);
lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING | lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |

View File

@ -431,9 +431,10 @@ static ssize_t nullb_device_power_store(struct config_item *item,
if (!dev->power && newp) { if (!dev->power && newp) {
if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags)) if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
return count; return count;
if (null_add_dev(dev)) { ret = null_add_dev(dev);
if (ret) {
clear_bit(NULLB_DEV_FL_UP, &dev->flags); clear_bit(NULLB_DEV_FL_UP, &dev->flags);
return -ENOMEM; return ret;
} }
set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
@ -719,26 +720,25 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
return NULL; return NULL;
} }
static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, struct bio *bio)
{ {
struct nullb_cmd *cmd; struct nullb_cmd *cmd;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
cmd = __alloc_cmd(nq);
if (cmd || !can_wait)
return cmd;
do { do {
prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); /*
* This avoids multiple return statements, multiple calls to
* __alloc_cmd() and a fast path call to prepare_to_wait().
*/
cmd = __alloc_cmd(nq); cmd = __alloc_cmd(nq);
if (cmd) if (cmd) {
break; cmd->bio = bio;
return cmd;
}
prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
io_schedule(); io_schedule();
finish_wait(&nq->wait, &wait);
} while (1); } while (1);
finish_wait(&nq->wait, &wait);
return cmd;
} }
static void end_cmd(struct nullb_cmd *cmd) static void end_cmd(struct nullb_cmd *cmd)
@ -777,24 +777,22 @@ static void null_complete_rq(struct request *rq)
end_cmd(blk_mq_rq_to_pdu(rq)); end_cmd(blk_mq_rq_to_pdu(rq));
} }
static struct nullb_page *null_alloc_page(gfp_t gfp_flags) static struct nullb_page *null_alloc_page(void)
{ {
struct nullb_page *t_page; struct nullb_page *t_page;
t_page = kmalloc(sizeof(struct nullb_page), gfp_flags); t_page = kmalloc(sizeof(struct nullb_page), GFP_NOIO);
if (!t_page) if (!t_page)
goto out; return NULL;
t_page->page = alloc_pages(gfp_flags, 0); t_page->page = alloc_pages(GFP_NOIO, 0);
if (!t_page->page) if (!t_page->page) {
goto out_freepage; kfree(t_page);
return NULL;
}
memset(t_page->bitmap, 0, sizeof(t_page->bitmap)); memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
return t_page; return t_page;
out_freepage:
kfree(t_page);
out:
return NULL;
} }
static void null_free_page(struct nullb_page *t_page) static void null_free_page(struct nullb_page *t_page)
@ -932,7 +930,7 @@ static struct nullb_page *null_insert_page(struct nullb *nullb,
spin_unlock_irq(&nullb->lock); spin_unlock_irq(&nullb->lock);
t_page = null_alloc_page(GFP_NOIO); t_page = null_alloc_page();
if (!t_page) if (!t_page)
goto out_lock; goto out_lock;
@ -1476,12 +1474,8 @@ static void null_submit_bio(struct bio *bio)
sector_t nr_sectors = bio_sectors(bio); sector_t nr_sectors = bio_sectors(bio);
struct nullb *nullb = bio->bi_bdev->bd_disk->private_data; struct nullb *nullb = bio->bi_bdev->bd_disk->private_data;
struct nullb_queue *nq = nullb_to_queue(nullb); struct nullb_queue *nq = nullb_to_queue(nullb);
struct nullb_cmd *cmd;
cmd = alloc_cmd(nq, 1); null_handle_cmd(alloc_cmd(nq, bio), sector, nr_sectors, bio_op(bio));
cmd->bio = bio;
null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
} }
static bool should_timeout_request(struct request *rq) static bool should_timeout_request(struct request *rq)

View File

@ -23,7 +23,6 @@ MODULE_LICENSE("GPL");
static int rnbd_client_major; static int rnbd_client_major;
static DEFINE_IDA(index_ida); static DEFINE_IDA(index_ida);
static DEFINE_MUTEX(ida_lock);
static DEFINE_MUTEX(sess_lock); static DEFINE_MUTEX(sess_lock);
static LIST_HEAD(sess_list); static LIST_HEAD(sess_list);
@ -55,9 +54,7 @@ static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev)
if (!refcount_dec_and_test(&dev->refcount)) if (!refcount_dec_and_test(&dev->refcount))
return; return;
mutex_lock(&ida_lock); ida_free(&index_ida, dev->clt_device_id);
ida_simple_remove(&index_ida, dev->clt_device_id);
mutex_unlock(&ida_lock);
kfree(dev->hw_queues); kfree(dev->hw_queues);
kfree(dev->pathname); kfree(dev->pathname);
rnbd_clt_put_sess(dev->sess); rnbd_clt_put_sess(dev->sess);
@ -87,7 +84,6 @@ static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
dev->discard_granularity = le32_to_cpu(rsp->discard_granularity); dev->discard_granularity = le32_to_cpu(rsp->discard_granularity);
dev->discard_alignment = le32_to_cpu(rsp->discard_alignment); dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
dev->secure_discard = le16_to_cpu(rsp->secure_discard); dev->secure_discard = le16_to_cpu(rsp->secure_discard);
dev->rotational = rsp->rotational;
dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK); dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK);
dev->fua = !!(rsp->cache_policy & RNBD_FUA); dev->fua = !!(rsp->cache_policy & RNBD_FUA);
@ -1262,9 +1258,9 @@ find_and_get_or_create_sess(const char *sessname,
struct rtrs_clt_ops rtrs_ops; struct rtrs_clt_ops rtrs_ops;
sess = find_or_create_sess(sessname, &first); sess = find_or_create_sess(sessname, &first);
if (sess == ERR_PTR(-ENOMEM)) if (sess == ERR_PTR(-ENOMEM)) {
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
else if ((nr_poll_queues && !first) || (!nr_poll_queues && sess->nr_poll_queues)) { } else if ((nr_poll_queues && !first) || (!nr_poll_queues && sess->nr_poll_queues)) {
/* /*
* A device MUST have its own session to use the polling-mode. * A device MUST have its own session to use the polling-mode.
* It must fail to map new device with the same session. * It must fail to map new device with the same session.
@ -1410,8 +1406,10 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
dev->read_only = false; dev->read_only = false;
} }
if (!dev->rotational) /*
blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue); * Network device does not need rotational
*/
blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
err = add_disk(dev->gd); err = add_disk(dev->gd);
if (err) if (err)
blk_cleanup_disk(dev->gd); blk_cleanup_disk(dev->gd);
@ -1459,10 +1457,8 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
goto out_alloc; goto out_alloc;
} }
mutex_lock(&ida_lock); ret = ida_alloc_max(&index_ida, 1 << (MINORBITS - RNBD_PART_BITS),
ret = ida_simple_get(&index_ida, 0, 1 << (MINORBITS - RNBD_PART_BITS), GFP_KERNEL);
GFP_KERNEL);
mutex_unlock(&ida_lock);
if (ret < 0) { if (ret < 0) {
pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n", pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
pathname, sess->sessname, ret); pathname, sess->sessname, ret);
@ -1610,13 +1606,13 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
} }
rnbd_clt_info(dev, rnbd_clt_info(dev,
"map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d, wc: %d, fua: %d)\n", "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
dev->gd->disk_name, dev->nsectors, dev->gd->disk_name, dev->nsectors,
dev->logical_block_size, dev->physical_block_size, dev->logical_block_size, dev->physical_block_size,
dev->max_write_same_sectors, dev->max_discard_sectors, dev->max_write_same_sectors, dev->max_discard_sectors,
dev->discard_granularity, dev->discard_alignment, dev->discard_granularity, dev->discard_alignment,
dev->secure_discard, dev->max_segments, dev->secure_discard, dev->max_segments,
dev->max_hw_sectors, dev->rotational, dev->wc, dev->fua); dev->max_hw_sectors, dev->wc, dev->fua);
mutex_unlock(&dev->lock); mutex_unlock(&dev->lock);
rnbd_clt_put_sess(sess); rnbd_clt_put_sess(sess);

View File

@ -118,7 +118,6 @@ struct rnbd_clt_dev {
enum rnbd_access_mode access_mode; enum rnbd_access_mode access_mode;
u32 nr_poll_queues; u32 nr_poll_queues;
bool read_only; bool read_only;
bool rotational;
bool wc; bool wc;
bool fua; bool fua;
u32 max_hw_sectors; u32 max_hw_sectors;

View File

@ -128,7 +128,7 @@ enum rnbd_cache_policy {
* @logical_block_size: logical block size device supports in bytes * @logical_block_size: logical block size device supports in bytes
* @max_segments: max segments hardware support in one transfer * @max_segments: max segments hardware support in one transfer
* @secure_discard: supports secure discard * @secure_discard: supports secure discard
* @rotation: is a rotational disc? * @obsolete_rotational: obsolete, not in used.
* @cache_policy: support write-back caching or FUA? * @cache_policy: support write-back caching or FUA?
*/ */
struct rnbd_msg_open_rsp { struct rnbd_msg_open_rsp {
@ -144,7 +144,7 @@ struct rnbd_msg_open_rsp {
__le16 logical_block_size; __le16 logical_block_size;
__le16 max_segments; __le16 max_segments;
__le16 secure_discard; __le16 secure_discard;
u8 rotational; u8 obsolete_rotational;
u8 cache_policy; u8 cache_policy;
u8 reserved[10]; u8 reserved[10];
}; };

View File

@ -558,7 +558,6 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev)); cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev));
rsp->secure_discard = rsp->secure_discard =
cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev)); cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev));
rsp->rotational = !blk_queue_nonrot(q);
rsp->cache_policy = 0; rsp->cache_policy = 0;
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
rsp->cache_policy |= RNBD_WRITEBACK; rsp->cache_policy |= RNBD_WRITEBACK;

View File

@ -1331,12 +1331,10 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
goto out; goto out;
if (is_partial_io(bvec)) { if (is_partial_io(bvec)) {
void *dst = kmap_atomic(bvec->bv_page);
void *src = kmap_atomic(page); void *src = kmap_atomic(page);
memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len); memcpy_to_bvec(bvec, src + offset);
kunmap_atomic(src); kunmap_atomic(src);
kunmap_atomic(dst);
} }
out: out:
if (is_partial_io(bvec)) if (is_partial_io(bvec))
@ -1467,7 +1465,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
{ {
int ret; int ret;
struct page *page = NULL; struct page *page = NULL;
void *src;
struct bio_vec vec; struct bio_vec vec;
vec = *bvec; vec = *bvec;
@ -1485,11 +1482,9 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
if (ret) if (ret)
goto out; goto out;
src = kmap_atomic(bvec->bv_page);
dst = kmap_atomic(page); dst = kmap_atomic(page);
memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len); memcpy_from_bvec(dst + offset, bvec);
kunmap_atomic(dst); kunmap_atomic(dst);
kunmap_atomic(src);
vec.bv_page = page; vec.bv_page = page;
vec.bv_len = PAGE_SIZE; vec.bv_len = PAGE_SIZE;

View File

@ -2060,9 +2060,11 @@ int bch_btree_check(struct cache_set *c)
} }
} }
/*
* Must wait for all threads to stop.
*/
wait_event_interruptible(check_state->wait, wait_event_interruptible(check_state->wait,
atomic_read(&check_state->started) == 0 || atomic_read(&check_state->started) == 0);
test_bit(CACHE_SET_IO_DISABLE, &c->flags));
for (i = 0; i < check_state->total_threads; i++) { for (i = 0; i < check_state->total_threads; i++) {
if (check_state->infos[i].result) { if (check_state->infos[i].result) {

View File

@ -44,10 +44,10 @@ static void bio_csum(struct bio *bio, struct bkey *k)
uint64_t csum = 0; uint64_t csum = 0;
bio_for_each_segment(bv, bio, iter) { bio_for_each_segment(bv, bio, iter) {
void *d = kmap(bv.bv_page) + bv.bv_offset; void *d = bvec_kmap_local(&bv);
csum = crc64_be(csum, d, bv.bv_len); csum = crc64_be(csum, d, bv.bv_len);
kunmap(bv.bv_page); kunmap_local(d);
} }
k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);

View File

@ -585,10 +585,13 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
sectors_dirty = atomic_add_return(s, sectors_dirty = atomic_add_return(s,
d->stripe_sectors_dirty + stripe); d->stripe_sectors_dirty + stripe);
if (sectors_dirty == d->stripe_size) if (sectors_dirty == d->stripe_size) {
set_bit(stripe, d->full_dirty_stripes); if (!test_bit(stripe, d->full_dirty_stripes))
else set_bit(stripe, d->full_dirty_stripes);
clear_bit(stripe, d->full_dirty_stripes); } else {
if (test_bit(stripe, d->full_dirty_stripes))
clear_bit(stripe, d->full_dirty_stripes);
}
nr_sectors -= s; nr_sectors -= s;
stripe_offset = 0; stripe_offset = 0;
@ -998,9 +1001,11 @@ void bch_sectors_dirty_init(struct bcache_device *d)
} }
} }
/*
* Must wait for all threads to stop.
*/
wait_event_interruptible(state->wait, wait_event_interruptible(state->wait,
atomic_read(&state->started) == 0 || atomic_read(&state->started) == 0);
test_bit(CACHE_SET_IO_DISABLE, &c->flags));
out: out:
kfree(state); kfree(state);

View File

@ -88,10 +88,9 @@ static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
*/ */
cur_len = min(len, bv.bv_len); cur_len = min(len, bv.bv_len);
iobuf = kmap_atomic(bv.bv_page); iobuf = bvec_kmap_local(&bv);
err = ndbr->do_io(ndbr, dev_offset, iobuf + bv.bv_offset, err = ndbr->do_io(ndbr, dev_offset, iobuf, cur_len, rw);
cur_len, rw); kunmap_local(iobuf);
kunmap_atomic(iobuf);
if (err) if (err)
return err; return err;

View File

@ -1163,17 +1163,15 @@ static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
*/ */
cur_len = min(len, bv.bv_len); cur_len = min(len, bv.bv_len);
mem = kmap_atomic(bv.bv_page); mem = bvec_kmap_local(&bv);
if (rw) if (rw)
ret = arena_write_bytes(arena, meta_nsoff, ret = arena_write_bytes(arena, meta_nsoff, mem, cur_len,
mem + bv.bv_offset, cur_len,
NVDIMM_IO_ATOMIC); NVDIMM_IO_ATOMIC);
else else
ret = arena_read_bytes(arena, meta_nsoff, ret = arena_read_bytes(arena, meta_nsoff, mem, cur_len,
mem + bv.bv_offset, cur_len,
NVDIMM_IO_ATOMIC); NVDIMM_IO_ATOMIC);
kunmap_atomic(mem); kunmap_local(mem);
if (ret) if (ret)
return ret; return ret;

View File

@ -24,6 +24,14 @@ config NVME_MULTIPATH
/dev/nvmeXnY device will show up for each NVMe namespace, /dev/nvmeXnY device will show up for each NVMe namespace,
even if it is accessible through multiple controllers. even if it is accessible through multiple controllers.
config NVME_VERBOSE_ERRORS
bool "NVMe verbose error reporting"
depends on NVME_CORE
help
This option enables verbose reporting for NVMe errors. The
error translation table will grow the kernel image size by
about 4 KB.
config NVME_HWMON config NVME_HWMON
bool "NVMe hardware monitoring" bool "NVMe hardware monitoring"
depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON) depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON)

View File

@ -9,7 +9,7 @@ obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o obj-$(CONFIG_NVME_FC) += nvme-fc.o
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
nvme-core-y := core.o ioctl.o nvme-core-y := core.o ioctl.o constants.o
nvme-core-$(CONFIG_TRACING) += trace.o nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o

View File

@ -0,0 +1,185 @@
// SPDX-License-Identifier: GPL-2.0
/*
* NVM Express device driver verbose errors
* Copyright (c) 2022, Oracle and/or its affiliates
*/
#include <linux/blkdev.h>
#include "nvme.h"
#ifdef CONFIG_NVME_VERBOSE_ERRORS
static const char * const nvme_ops[] = {
[nvme_cmd_flush] = "Flush",
[nvme_cmd_write] = "Write",
[nvme_cmd_read] = "Read",
[nvme_cmd_write_uncor] = "Write Uncorrectable",
[nvme_cmd_compare] = "Compare",
[nvme_cmd_write_zeroes] = "Write Zeros",
[nvme_cmd_dsm] = "Dataset Management",
[nvme_cmd_verify] = "Verify",
[nvme_cmd_resv_register] = "Reservation Register",
[nvme_cmd_resv_report] = "Reservation Report",
[nvme_cmd_resv_acquire] = "Reservation Acquire",
[nvme_cmd_resv_release] = "Reservation Release",
[nvme_cmd_zone_mgmt_send] = "Zone Management Send",
[nvme_cmd_zone_mgmt_recv] = "Zone Management Receive",
[nvme_cmd_zone_append] = "Zone Management Append",
};
static const char * const nvme_admin_ops[] = {
[nvme_admin_delete_sq] = "Delete SQ",
[nvme_admin_create_sq] = "Create SQ",
[nvme_admin_get_log_page] = "Get Log Page",
[nvme_admin_delete_cq] = "Delete CQ",
[nvme_admin_create_cq] = "Create CQ",
[nvme_admin_identify] = "Identify",
[nvme_admin_abort_cmd] = "Abort Command",
[nvme_admin_set_features] = "Set Features",
[nvme_admin_get_features] = "Get Features",
[nvme_admin_async_event] = "Async Event",
[nvme_admin_ns_mgmt] = "Namespace Management",
[nvme_admin_activate_fw] = "Activate Firmware",
[nvme_admin_download_fw] = "Download Firmware",
[nvme_admin_dev_self_test] = "Device Self Test",
[nvme_admin_ns_attach] = "Namespace Attach",
[nvme_admin_keep_alive] = "Keep Alive",
[nvme_admin_directive_send] = "Directive Send",
[nvme_admin_directive_recv] = "Directive Receive",
[nvme_admin_virtual_mgmt] = "Virtual Management",
[nvme_admin_nvme_mi_send] = "NVMe Send MI",
[nvme_admin_nvme_mi_recv] = "NVMe Receive MI",
[nvme_admin_dbbuf] = "Doorbell Buffer Config",
[nvme_admin_format_nvm] = "Format NVM",
[nvme_admin_security_send] = "Security Send",
[nvme_admin_security_recv] = "Security Receive",
[nvme_admin_sanitize_nvm] = "Sanitize NVM",
[nvme_admin_get_lba_status] = "Get LBA Status",
};
static const char * const nvme_statuses[] = {
[NVME_SC_SUCCESS] = "Success",
[NVME_SC_INVALID_OPCODE] = "Invalid Command Opcode",
[NVME_SC_INVALID_FIELD] = "Invalid Field in Command",
[NVME_SC_CMDID_CONFLICT] = "Command ID Conflict",
[NVME_SC_DATA_XFER_ERROR] = "Data Transfer Error",
[NVME_SC_POWER_LOSS] = "Commands Aborted due to Power Loss Notification",
[NVME_SC_INTERNAL] = "Internal Error",
[NVME_SC_ABORT_REQ] = "Command Abort Requested",
[NVME_SC_ABORT_QUEUE] = "Command Aborted due to SQ Deletion",
[NVME_SC_FUSED_FAIL] = "Command Aborted due to Failed Fused Command",
[NVME_SC_FUSED_MISSING] = "Command Aborted due to Missing Fused Command",
[NVME_SC_INVALID_NS] = "Invalid Namespace or Format",
[NVME_SC_CMD_SEQ_ERROR] = "Command Sequence Error",
[NVME_SC_SGL_INVALID_LAST] = "Invalid SGL Segment Descriptor",
[NVME_SC_SGL_INVALID_COUNT] = "Invalid Number of SGL Descriptors",
[NVME_SC_SGL_INVALID_DATA] = "Data SGL Length Invalid",
[NVME_SC_SGL_INVALID_METADATA] = "Metadata SGL Length Invalid",
[NVME_SC_SGL_INVALID_TYPE] = "SGL Descriptor Type Invalid",
[NVME_SC_CMB_INVALID_USE] = "Invalid Use of Controller Memory Buffer",
[NVME_SC_PRP_INVALID_OFFSET] = "PRP Offset Invalid",
[NVME_SC_ATOMIC_WU_EXCEEDED] = "Atomic Write Unit Exceeded",
[NVME_SC_OP_DENIED] = "Operation Denied",
[NVME_SC_SGL_INVALID_OFFSET] = "SGL Offset Invalid",
[NVME_SC_RESERVED] = "Reserved",
[NVME_SC_HOST_ID_INCONSIST] = "Host Identifier Inconsistent Format",
[NVME_SC_KA_TIMEOUT_EXPIRED] = "Keep Alive Timeout Expired",
[NVME_SC_KA_TIMEOUT_INVALID] = "Keep Alive Timeout Invalid",
[NVME_SC_ABORTED_PREEMPT_ABORT] = "Command Aborted due to Preempt and Abort",
[NVME_SC_SANITIZE_FAILED] = "Sanitize Failed",
[NVME_SC_SANITIZE_IN_PROGRESS] = "Sanitize In Progress",
[NVME_SC_SGL_INVALID_GRANULARITY] = "SGL Data Block Granularity Invalid",
[NVME_SC_CMD_NOT_SUP_CMB_QUEUE] = "Command Not Supported for Queue in CMB",
[NVME_SC_NS_WRITE_PROTECTED] = "Namespace is Write Protected",
[NVME_SC_CMD_INTERRUPTED] = "Command Interrupted",
[NVME_SC_TRANSIENT_TR_ERR] = "Transient Transport Error",
[NVME_SC_INVALID_IO_CMD_SET] = "Invalid IO Command Set",
[NVME_SC_LBA_RANGE] = "LBA Out of Range",
[NVME_SC_CAP_EXCEEDED] = "Capacity Exceeded",
[NVME_SC_NS_NOT_READY] = "Namespace Not Ready",
[NVME_SC_RESERVATION_CONFLICT] = "Reservation Conflict",
[NVME_SC_FORMAT_IN_PROGRESS] = "Format In Progress",
[NVME_SC_CQ_INVALID] = "Completion Queue Invalid",
[NVME_SC_QID_INVALID] = "Invalid Queue Identifier",
[NVME_SC_QUEUE_SIZE] = "Invalid Queue Size",
[NVME_SC_ABORT_LIMIT] = "Abort Command Limit Exceeded",
[NVME_SC_ABORT_MISSING] = "Reserved", /* XXX */
[NVME_SC_ASYNC_LIMIT] = "Asynchronous Event Request Limit Exceeded",
[NVME_SC_FIRMWARE_SLOT] = "Invalid Firmware Slot",
[NVME_SC_FIRMWARE_IMAGE] = "Invalid Firmware Image",
[NVME_SC_INVALID_VECTOR] = "Invalid Interrupt Vector",
[NVME_SC_INVALID_LOG_PAGE] = "Invalid Log Page",
[NVME_SC_INVALID_FORMAT] = "Invalid Format",
[NVME_SC_FW_NEEDS_CONV_RESET] = "Firmware Activation Requires Conventional Reset",
[NVME_SC_INVALID_QUEUE] = "Invalid Queue Deletion",
[NVME_SC_FEATURE_NOT_SAVEABLE] = "Feature Identifier Not Saveable",
[NVME_SC_FEATURE_NOT_CHANGEABLE] = "Feature Not Changeable",
[NVME_SC_FEATURE_NOT_PER_NS] = "Feature Not Namespace Specific",
[NVME_SC_FW_NEEDS_SUBSYS_RESET] = "Firmware Activation Requires NVM Subsystem Reset",
[NVME_SC_FW_NEEDS_RESET] = "Firmware Activation Requires Reset",
[NVME_SC_FW_NEEDS_MAX_TIME] = "Firmware Activation Requires Maximum Time Violation",
[NVME_SC_FW_ACTIVATE_PROHIBITED] = "Firmware Activation Prohibited",
[NVME_SC_OVERLAPPING_RANGE] = "Overlapping Range",
[NVME_SC_NS_INSUFFICIENT_CAP] = "Namespace Insufficient Capacity",
[NVME_SC_NS_ID_UNAVAILABLE] = "Namespace Identifier Unavailable",
[NVME_SC_NS_ALREADY_ATTACHED] = "Namespace Already Attached",
[NVME_SC_NS_IS_PRIVATE] = "Namespace Is Private",
[NVME_SC_NS_NOT_ATTACHED] = "Namespace Not Attached",
[NVME_SC_THIN_PROV_NOT_SUPP] = "Thin Provisioning Not Supported",
[NVME_SC_CTRL_LIST_INVALID] = "Controller List Invalid",
[NVME_SC_SELT_TEST_IN_PROGRESS] = "Device Self-test In Progress",
[NVME_SC_BP_WRITE_PROHIBITED] = "Boot Partition Write Prohibited",
[NVME_SC_CTRL_ID_INVALID] = "Invalid Controller Identifier",
[NVME_SC_SEC_CTRL_STATE_INVALID] = "Invalid Secondary Controller State",
[NVME_SC_CTRL_RES_NUM_INVALID] = "Invalid Number of Controller Resources",
[NVME_SC_RES_ID_INVALID] = "Invalid Resource Identifier",
[NVME_SC_PMR_SAN_PROHIBITED] = "Sanitize Prohibited",
[NVME_SC_ANA_GROUP_ID_INVALID] = "ANA Group Identifier Invalid",
[NVME_SC_ANA_ATTACH_FAILED] = "ANA Attach Failed",
[NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes",
[NVME_SC_INVALID_PI] = "Invalid Protection Information",
[NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range",
[NVME_SC_ONCS_NOT_SUPPORTED] = "ONCS Not Supported",
[NVME_SC_ZONE_BOUNDARY_ERROR] = "Zoned Boundary Error",
[NVME_SC_ZONE_FULL] = "Zone Is Full",
[NVME_SC_ZONE_READ_ONLY] = "Zone Is Read Only",
[NVME_SC_ZONE_OFFLINE] = "Zone Is Offline",
[NVME_SC_ZONE_INVALID_WRITE] = "Zone Invalid Write",
[NVME_SC_ZONE_TOO_MANY_ACTIVE] = "Too Many Active Zones",
[NVME_SC_ZONE_TOO_MANY_OPEN] = "Too Many Open Zones",
[NVME_SC_ZONE_INVALID_TRANSITION] = "Invalid Zone State Transition",
[NVME_SC_WRITE_FAULT] = "Write Fault",
[NVME_SC_READ_ERROR] = "Unrecovered Read Error",
[NVME_SC_GUARD_CHECK] = "End-to-end Guard Check Error",
[NVME_SC_APPTAG_CHECK] = "End-to-end Application Tag Check Error",
[NVME_SC_REFTAG_CHECK] = "End-to-end Reference Tag Check Error",
[NVME_SC_COMPARE_FAILED] = "Compare Failure",
[NVME_SC_ACCESS_DENIED] = "Access Denied",
[NVME_SC_UNWRITTEN_BLOCK] = "Deallocated or Unwritten Logical Block",
[NVME_SC_ANA_PERSISTENT_LOSS] = "Asymmetric Access Persistent Loss",
[NVME_SC_ANA_INACCESSIBLE] = "Asymmetric Access Inaccessible",
[NVME_SC_ANA_TRANSITION] = "Asymmetric Access Transition",
[NVME_SC_HOST_PATH_ERROR] = "Host Pathing Error",
};
const unsigned char *nvme_get_error_status_str(u16 status)
{
status &= 0x7ff;
if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
return nvme_statuses[status & 0x7ff];
return "Unknown";
}
const unsigned char *nvme_get_opcode_str(u8 opcode)
{
if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode])
return nvme_ops[opcode];
return "Unknown";
}
const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
{
if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode])
return nvme_admin_ops[opcode];
return "Unknown";
}
#endif /* CONFIG_NVME_VERBOSE_ERRORS */

View File

@ -299,6 +299,37 @@ static void nvme_retry_req(struct request *req)
blk_mq_delay_kick_requeue_list(req->q, delay); blk_mq_delay_kick_requeue_list(req->q, delay);
} }
static void nvme_log_error(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
struct nvme_request *nr = nvme_req(req);
if (ns) {
pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
ns->disk ? ns->disk->disk_name : "?",
nvme_get_opcode_str(nr->cmd->common.opcode),
nr->cmd->common.opcode,
(unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)),
(unsigned long long)blk_rq_bytes(req) >> ns->lba_shift,
nvme_get_error_status_str(nr->status),
nr->status >> 8 & 7, /* Status Code Type */
nr->status & 0xff, /* Status Code */
nr->status & NVME_SC_MORE ? "MORE " : "",
nr->status & NVME_SC_DNR ? "DNR " : "");
return;
}
pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n",
dev_name(nr->ctrl->device),
nvme_get_admin_opcode_str(nr->cmd->common.opcode),
nr->cmd->common.opcode,
nvme_get_error_status_str(nr->status),
nr->status >> 8 & 7, /* Status Code Type */
nr->status & 0xff, /* Status Code */
nr->status & NVME_SC_MORE ? "MORE " : "",
nr->status & NVME_SC_DNR ? "DNR " : "");
}
enum nvme_disposition { enum nvme_disposition {
COMPLETE, COMPLETE,
RETRY, RETRY,
@ -339,6 +370,8 @@ static inline void nvme_end_req(struct request *req)
{ {
blk_status_t status = nvme_error_status(nvme_req(req)->status); blk_status_t status = nvme_error_status(nvme_req(req)->status);
if (unlikely(nvme_req(req)->status != NVME_SC_SUCCESS))
nvme_log_error(req);
nvme_end_req_zoned(req); nvme_end_req_zoned(req);
nvme_trace_bio_complete(req); nvme_trace_bio_complete(req);
blk_mq_end_request(req, status); blk_mq_end_request(req, status);
@ -562,7 +595,7 @@ static void nvme_free_ns_head(struct kref *ref)
container_of(ref, struct nvme_ns_head, ref); container_of(ref, struct nvme_ns_head, ref);
nvme_mpath_remove_disk(head); nvme_mpath_remove_disk(head);
ida_simple_remove(&head->subsys->ns_ida, head->instance); ida_free(&head->subsys->ns_ida, head->instance);
cleanup_srcu_struct(&head->srcu); cleanup_srcu_struct(&head->srcu);
nvme_put_subsystem(head->subsys); nvme_put_subsystem(head->subsys);
kfree(head); kfree(head);
@ -758,6 +791,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
static int nvme_configure_directives(struct nvme_ctrl *ctrl) static int nvme_configure_directives(struct nvme_ctrl *ctrl)
{ {
struct streams_directive_params s; struct streams_directive_params s;
u16 nssa;
int ret; int ret;
if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES)) if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
@ -773,14 +807,16 @@ static int nvme_configure_directives(struct nvme_ctrl *ctrl)
if (ret) if (ret)
goto out_disable_stream; goto out_disable_stream;
ctrl->nssa = le16_to_cpu(s.nssa); nssa = le16_to_cpu(s.nssa);
if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) { if (nssa < BLK_MAX_WRITE_HINTS - 1) {
dev_info(ctrl->device, "too few streams (%u) available\n", dev_info(ctrl->device, "too few streams (%u) available\n",
ctrl->nssa); nssa);
/* this condition is not an error: streams are optional */
ret = 0;
goto out_disable_stream; goto out_disable_stream;
} }
ctrl->nr_streams = min_t(u16, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1); ctrl->nr_streams = min_t(u16, nssa, BLK_MAX_WRITE_HINTS - 1);
dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams); dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
return 0; return 0;
@ -1050,8 +1086,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
* >0: nvme controller's cqe status response * >0: nvme controller's cqe status response
* <0: kernel error in lieu of controller response * <0: kernel error in lieu of controller response
*/ */
static int nvme_execute_rq(struct gendisk *disk, struct request *rq, static int nvme_execute_rq(struct request *rq, bool at_head)
bool at_head)
{ {
blk_status_t status; blk_status_t status;
@ -1091,7 +1126,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out; goto out;
} }
ret = nvme_execute_rq(NULL, req, at_head); ret = nvme_execute_rq(req, at_head);
if (result && ret >= 0) if (result && ret >= 0)
*result = nvme_req(req)->result; *result = nvme_req(req)->result;
out: out:
@ -1207,12 +1242,11 @@ int nvme_execute_passthru_rq(struct request *rq)
struct nvme_command *cmd = nvme_req(rq)->cmd; struct nvme_command *cmd = nvme_req(rq)->cmd;
struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl; struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
struct nvme_ns *ns = rq->q->queuedata; struct nvme_ns *ns = rq->q->queuedata;
struct gendisk *disk = ns ? ns->disk : NULL;
u32 effects; u32 effects;
int ret; int ret;
effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode); effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
ret = nvme_execute_rq(disk, rq, false); ret = nvme_execute_rq(rq, false);
if (effects) /* nothing to be done for zero cmd effects */ if (effects) /* nothing to be done for zero cmd effects */
nvme_passthru_end(ctrl, effects, cmd, ret); nvme_passthru_end(ctrl, effects, cmd, ret);
@ -1683,13 +1717,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
} }
static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
{
return !uuid_is_null(&ids->uuid) ||
memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
}
static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
{ {
return uuid_equal(&a->uuid, &b->uuid) && return uuid_equal(&a->uuid, &b->uuid) &&
@ -1977,7 +2004,7 @@ static char nvme_pr_type(enum pr_type type)
default: default:
return 0; return 0;
} }
}; }
static int nvme_send_ns_head_pr_command(struct block_device *bdev, static int nvme_send_ns_head_pr_command(struct block_device *bdev,
struct nvme_command *c, u8 data[16]) struct nvme_command *c, u8 data[16])
@ -2565,7 +2592,7 @@ static void nvme_release_subsystem(struct device *dev)
container_of(dev, struct nvme_subsystem, dev); container_of(dev, struct nvme_subsystem, dev);
if (subsys->instance >= 0) if (subsys->instance >= 0)
ida_simple_remove(&nvme_instance_ida, subsys->instance); ida_free(&nvme_instance_ida, subsys->instance);
kfree(subsys); kfree(subsys);
} }
@ -2990,6 +3017,9 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->max_namespaces = le32_to_cpu(id->mnan); ctrl->max_namespaces = le32_to_cpu(id->mnan);
ctrl->ctratt = le32_to_cpu(id->ctratt); ctrl->ctratt = le32_to_cpu(id->ctratt);
ctrl->cntrltype = id->cntrltype;
ctrl->dctype = id->dctype;
if (id->rtd3e) { if (id->rtd3e) {
/* us -> s */ /* us -> s */
u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC; u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
@ -3523,6 +3553,40 @@ static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store); nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
static ssize_t cntrltype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
static const char * const type[] = {
[NVME_CTRL_IO] = "io\n",
[NVME_CTRL_DISC] = "discovery\n",
[NVME_CTRL_ADMIN] = "admin\n",
};
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
return sysfs_emit(buf, "reserved\n");
return sysfs_emit(buf, type[ctrl->cntrltype]);
}
static DEVICE_ATTR_RO(cntrltype);
static ssize_t dctype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
static const char * const type[] = {
[NVME_DCTYPE_NOT_REPORTED] = "none\n",
[NVME_DCTYPE_DDC] = "ddc\n",
[NVME_DCTYPE_CDC] = "cdc\n",
};
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
return sysfs_emit(buf, "reserved\n");
return sysfs_emit(buf, type[ctrl->dctype]);
}
static DEVICE_ATTR_RO(dctype);
static struct attribute *nvme_dev_attrs[] = { static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr, &dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr, &dev_attr_rescan_controller.attr,
@ -3544,6 +3608,8 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reconnect_delay.attr, &dev_attr_reconnect_delay.attr,
&dev_attr_fast_io_fail_tmo.attr, &dev_attr_fast_io_fail_tmo.attr,
&dev_attr_kato.attr, &dev_attr_kato.attr,
&dev_attr_cntrltype.attr,
&dev_attr_dctype.attr,
NULL NULL
}; };
@ -3598,16 +3664,24 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
return NULL; return NULL;
} }
static int __nvme_check_ids(struct nvme_subsystem *subsys, static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
struct nvme_ns_head *new) struct nvme_ns_ids *ids)
{ {
bool has_uuid = !uuid_is_null(&ids->uuid);
bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid));
bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
struct nvme_ns_head *h; struct nvme_ns_head *h;
lockdep_assert_held(&subsys->lock); lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry) { list_for_each_entry(h, &subsys->nsheads, entry) {
if (nvme_ns_ids_valid(&new->ids) && if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid))
nvme_ns_ids_equal(&new->ids, &h->ids)) return -EINVAL;
if (has_nguid &&
memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0)
return -EINVAL;
if (has_eui64 &&
memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0)
return -EINVAL; return -EINVAL;
} }
@ -3616,7 +3690,7 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
static void nvme_cdev_rel(struct device *dev) static void nvme_cdev_rel(struct device *dev)
{ {
ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(dev->devt)); ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
} }
void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device) void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
@ -3630,7 +3704,7 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
{ {
int minor, ret; int minor, ret;
minor = ida_simple_get(&nvme_ns_chr_minor_ida, 0, 0, GFP_KERNEL); minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL);
if (minor < 0) if (minor < 0)
return minor; return minor;
cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
@ -3693,7 +3767,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
head = kzalloc(size, GFP_KERNEL); head = kzalloc(size, GFP_KERNEL);
if (!head) if (!head)
goto out; goto out;
ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL); ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL);
if (ret < 0) if (ret < 0)
goto out_free_head; goto out_free_head;
head->instance = ret; head->instance = ret;
@ -3706,13 +3780,6 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
head->ids = *ids; head->ids = *ids;
kref_init(&head->ref); kref_init(&head->ref);
ret = __nvme_check_ids(ctrl->subsys, head);
if (ret) {
dev_err(ctrl->device,
"duplicate IDs for nsid %d\n", nsid);
goto out_cleanup_srcu;
}
if (head->ids.csi) { if (head->ids.csi) {
ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects); ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
if (ret) if (ret)
@ -3732,7 +3799,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
out_cleanup_srcu: out_cleanup_srcu:
cleanup_srcu_struct(&head->srcu); cleanup_srcu_struct(&head->srcu);
out_ida_remove: out_ida_remove:
ida_simple_remove(&ctrl->subsys->ns_ida, head->instance); ida_free(&ctrl->subsys->ns_ida, head->instance);
out_free_head: out_free_head:
kfree(head); kfree(head);
out: out:
@ -3741,16 +3808,56 @@ out:
return ERR_PTR(ret); return ERR_PTR(ret);
} }
static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
struct nvme_ns_ids *ids)
{
struct nvme_subsystem *s;
int ret = 0;
/*
* Note that this check is racy as we try to avoid holding the global
* lock over the whole ns_head creation. But it is only intended as
* a sanity check anyway.
*/
mutex_lock(&nvme_subsystems_lock);
list_for_each_entry(s, &nvme_subsystems, entry) {
if (s == this)
continue;
mutex_lock(&s->lock);
ret = nvme_subsys_check_duplicate_ids(s, ids);
mutex_unlock(&s->lock);
if (ret)
break;
}
mutex_unlock(&nvme_subsystems_lock);
return ret;
}
static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
struct nvme_ns_ids *ids, bool is_shared) struct nvme_ns_ids *ids, bool is_shared)
{ {
struct nvme_ctrl *ctrl = ns->ctrl; struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_ns_head *head = NULL; struct nvme_ns_head *head = NULL;
int ret = 0; int ret;
ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
if (ret) {
dev_err(ctrl->device,
"globally duplicate IDs for nsid %d\n", nsid);
return ret;
}
mutex_lock(&ctrl->subsys->lock); mutex_lock(&ctrl->subsys->lock);
head = nvme_find_ns_head(ctrl->subsys, nsid); head = nvme_find_ns_head(ctrl->subsys, nsid);
if (!head) { if (!head) {
ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
if (ret) {
dev_err(ctrl->device,
"duplicate IDs in subsystem for nsid %d\n",
nsid);
goto out_unlock;
}
head = nvme_alloc_ns_head(ctrl, nsid, ids); head = nvme_alloc_ns_head(ctrl, nsid, ids);
if (IS_ERR(head)) { if (IS_ERR(head)) {
ret = PTR_ERR(head); ret = PTR_ERR(head);
@ -4229,6 +4336,13 @@ static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
return ret; return ret;
} }
static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata)
{
char *envp[2] = { envdata, NULL };
kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
}
static void nvme_aen_uevent(struct nvme_ctrl *ctrl) static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
{ {
char *envp[2] = { NULL, NULL }; char *envp[2] = { NULL, NULL };
@ -4403,6 +4517,8 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
nvme_queue_scan(ctrl); nvme_queue_scan(ctrl);
nvme_start_queues(ctrl); nvme_start_queues(ctrl);
} }
nvme_change_uevent(ctrl, "NVME_EVENT=connected");
} }
EXPORT_SYMBOL_GPL(nvme_start_ctrl); EXPORT_SYMBOL_GPL(nvme_start_ctrl);
@ -4436,7 +4552,7 @@ static void nvme_free_ctrl(struct device *dev)
struct nvme_subsystem *subsys = ctrl->subsys; struct nvme_subsystem *subsys = ctrl->subsys;
if (!subsys || ctrl->instance != subsys->instance) if (!subsys || ctrl->instance != subsys->instance)
ida_simple_remove(&nvme_instance_ida, ctrl->instance); ida_free(&nvme_instance_ida, ctrl->instance);
nvme_free_cels(ctrl); nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl); nvme_mpath_uninit(ctrl);
@ -4495,7 +4611,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
goto out; goto out;
} }
ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL); ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL);
if (ret < 0) if (ret < 0)
goto out; goto out;
ctrl->instance = ret; ctrl->instance = ret;
@ -4536,7 +4652,7 @@ out_free_name:
nvme_put_ctrl(ctrl); nvme_put_ctrl(ctrl);
kfree_const(ctrl->device->kobj.name); kfree_const(ctrl->device->kobj.name);
out_release_instance: out_release_instance:
ida_simple_remove(&nvme_instance_ida, ctrl->instance); ida_free(&nvme_instance_ida, ctrl->instance);
out: out:
if (ctrl->discard_page) if (ctrl->discard_page)
__free_page(ctrl->discard_page); __free_page(ctrl->discard_page);

View File

@ -144,11 +144,10 @@ EXPORT_SYMBOL_GPL(nvmf_get_address);
*/ */
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
{ {
struct nvme_command cmd; struct nvme_command cmd = { };
union nvme_result res; union nvme_result res;
int ret; int ret;
memset(&cmd, 0, sizeof(cmd));
cmd.prop_get.opcode = nvme_fabrics_command; cmd.prop_get.opcode = nvme_fabrics_command;
cmd.prop_get.fctype = nvme_fabrics_type_property_get; cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off); cmd.prop_get.offset = cpu_to_le32(off);
@ -272,7 +271,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
int err_sctype = errval & ~NVME_SC_DNR; int err_sctype = errval & ~NVME_SC_DNR;
switch (err_sctype) { switch (err_sctype) {
case (NVME_SC_CONNECT_INVALID_PARAM): case NVME_SC_CONNECT_INVALID_PARAM:
if (offset >> 16) { if (offset >> 16) {
char *inv_data = "Connect Invalid Data Parameter"; char *inv_data = "Connect Invalid Data Parameter";
@ -873,7 +872,7 @@ static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
unsigned int required_opts) unsigned int required_opts)
{ {
if ((opts->mask & required_opts) != required_opts) { if ((opts->mask & required_opts) != required_opts) {
int i; unsigned int i;
for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
if ((opt_tokens[i].token & required_opts) && if ((opt_tokens[i].token & required_opts) &&
@ -923,7 +922,7 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
unsigned int allowed_opts) unsigned int allowed_opts)
{ {
if (opts->mask & ~allowed_opts) { if (opts->mask & ~allowed_opts) {
int i; unsigned int i;
for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
if ((opt_tokens[i].token & opts->mask) && if ((opt_tokens[i].token & opts->mask) &&

View File

@ -259,7 +259,7 @@ nvme_fc_free_lport(struct kref *ref)
complete(&nvme_fc_unload_proceed); complete(&nvme_fc_unload_proceed);
spin_unlock_irqrestore(&nvme_fc_lock, flags); spin_unlock_irqrestore(&nvme_fc_lock, flags);
ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
ida_destroy(&lport->endp_cnt); ida_destroy(&lport->endp_cnt);
put_device(lport->dev); put_device(lport->dev);
@ -399,7 +399,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
goto out_reghost_failed; goto out_reghost_failed;
} }
idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL); idx = ida_alloc(&nvme_fc_local_port_cnt, GFP_KERNEL);
if (idx < 0) { if (idx < 0) {
ret = -ENOSPC; ret = -ENOSPC;
goto out_fail_kfree; goto out_fail_kfree;
@ -439,7 +439,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
return 0; return 0;
out_ida_put: out_ida_put:
ida_simple_remove(&nvme_fc_local_port_cnt, idx); ida_free(&nvme_fc_local_port_cnt, idx);
out_fail_kfree: out_fail_kfree:
kfree(newrec); kfree(newrec);
out_reghost_failed: out_reghost_failed:
@ -535,7 +535,7 @@ nvme_fc_free_rport(struct kref *ref)
spin_unlock_irqrestore(&nvme_fc_lock, flags); spin_unlock_irqrestore(&nvme_fc_lock, flags);
WARN_ON(!list_empty(&rport->disc_list)); WARN_ON(!list_empty(&rport->disc_list));
ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num); ida_free(&lport->endp_cnt, rport->remoteport.port_num);
kfree(rport); kfree(rport);
@ -713,7 +713,7 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
goto out_lport_put; goto out_lport_put;
} }
idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL); idx = ida_alloc(&lport->endp_cnt, GFP_KERNEL);
if (idx < 0) { if (idx < 0) {
ret = -ENOSPC; ret = -ENOSPC;
goto out_kfree_rport; goto out_kfree_rport;
@ -2393,7 +2393,7 @@ nvme_fc_ctrl_free(struct kref *ref)
put_device(ctrl->dev); put_device(ctrl->dev);
nvme_fc_rport_put(ctrl->rport); nvme_fc_rport_put(ctrl->rport);
ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
if (ctrl->ctrl.opts) if (ctrl->ctrl.opts)
nvmf_free_options(ctrl->ctrl.opts); nvmf_free_options(ctrl->ctrl.opts);
kfree(ctrl); kfree(ctrl);
@ -2916,11 +2916,9 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
ctrl->ctrl.tagset = &ctrl->tag_set; ctrl->ctrl.tagset = &ctrl->tag_set;
ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
if (IS_ERR(ctrl->ctrl.connect_q)) { if (ret)
ret = PTR_ERR(ctrl->ctrl.connect_q);
goto out_free_tag_set; goto out_free_tag_set;
}
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret) if (ret)
@ -3472,7 +3470,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_fail; goto out_fail;
} }
idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); idx = ida_alloc(&nvme_fc_ctrl_cnt, GFP_KERNEL);
if (idx < 0) { if (idx < 0) {
ret = -ENOSPC; ret = -ENOSPC;
goto out_free_ctrl; goto out_free_ctrl;
@ -3635,7 +3633,7 @@ out_free_queues:
kfree(ctrl->queues); kfree(ctrl->queues);
out_free_ida: out_free_ida:
put_device(ctrl->dev); put_device(ctrl->dev);
ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
out_free_ctrl: out_free_ctrl:
kfree(ctrl); kfree(ctrl);
out_fail: out_fail:

View File

@ -56,7 +56,7 @@ out:
static int nvme_submit_user_cmd(struct request_queue *q, static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer, struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len, unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, u64 *result, unsigned timeout) u32 meta_seed, u64 *result, unsigned timeout, bool vec)
{ {
bool write = nvme_is_write(cmd); bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata; struct nvme_ns *ns = q->queuedata;
@ -75,8 +75,22 @@ static int nvme_submit_user_cmd(struct request_queue *q,
nvme_req(req)->flags |= NVME_REQ_USERCMD; nvme_req(req)->flags |= NVME_REQ_USERCMD;
if (ubuffer && bufflen) { if (ubuffer && bufflen) {
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, if (!vec)
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
GFP_KERNEL); GFP_KERNEL);
else {
struct iovec fast_iov[UIO_FASTIOV];
struct iovec *iov = fast_iov;
struct iov_iter iter;
ret = import_iovec(rq_data_dir(req), ubuffer, bufflen,
UIO_FASTIOV, &iov, &iter);
if (ret < 0)
goto out;
ret = blk_rq_map_user_iov(q, req, NULL, &iter,
GFP_KERNEL);
kfree(iov);
}
if (ret) if (ret)
goto out; goto out;
bio = req->bio; bio = req->bio;
@ -170,7 +184,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
return nvme_submit_user_cmd(ns->queue, &c, return nvme_submit_user_cmd(ns->queue, &c,
nvme_to_user_ptr(io.addr), length, nvme_to_user_ptr(io.addr), length,
metadata, meta_len, lower_32_bits(io.slba), NULL, 0); metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
false);
} }
static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
@ -224,7 +239,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len, nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &result, timeout); 0, &result, timeout, false);
if (status >= 0) { if (status >= 0) {
if (put_user(result, &ucmd->result)) if (put_user(result, &ucmd->result))
@ -235,7 +250,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
} }
static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd64 __user *ucmd) struct nvme_passthru_cmd64 __user *ucmd, bool vec)
{ {
struct nvme_passthru_cmd64 cmd; struct nvme_passthru_cmd64 cmd;
struct nvme_command c; struct nvme_command c;
@ -270,7 +285,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len, nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &cmd.result, timeout); 0, &cmd.result, timeout, vec);
if (status >= 0) { if (status >= 0) {
if (put_user(cmd.result, &ucmd->result)) if (put_user(cmd.result, &ucmd->result))
@ -296,7 +311,7 @@ static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
case NVME_IOCTL_ADMIN_CMD: case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp); return nvme_user_cmd(ctrl, NULL, argp);
case NVME_IOCTL_ADMIN64_CMD: case NVME_IOCTL_ADMIN64_CMD:
return nvme_user_cmd64(ctrl, NULL, argp); return nvme_user_cmd64(ctrl, NULL, argp, false);
default: default:
return sed_ioctl(ctrl->opal_dev, cmd, argp); return sed_ioctl(ctrl->opal_dev, cmd, argp);
} }
@ -340,7 +355,9 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
case NVME_IOCTL_SUBMIT_IO: case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, argp); return nvme_submit_io(ns, argp);
case NVME_IOCTL_IO64_CMD: case NVME_IOCTL_IO64_CMD:
return nvme_user_cmd64(ns->ctrl, ns, argp); return nvme_user_cmd64(ns->ctrl, ns, argp, false);
case NVME_IOCTL_IO64_CMD_VEC:
return nvme_user_cmd64(ns->ctrl, ns, argp, true);
default: default:
return -ENOTTY; return -ENOTTY;
} }
@ -480,7 +497,7 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
case NVME_IOCTL_ADMIN_CMD: case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp); return nvme_user_cmd(ctrl, NULL, argp);
case NVME_IOCTL_ADMIN64_CMD: case NVME_IOCTL_ADMIN64_CMD:
return nvme_user_cmd64(ctrl, NULL, argp); return nvme_user_cmd64(ctrl, NULL, argp, false);
case NVME_IOCTL_IO_CMD: case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp); return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET: case NVME_IOCTL_RESET:

View File

@ -280,7 +280,6 @@ struct nvme_ctrl {
u16 crdt[3]; u16 crdt[3];
u16 oncs; u16 oncs;
u16 oacs; u16 oacs;
u16 nssa;
u16 nr_streams; u16 nr_streams;
u16 sqsize; u16 sqsize;
u32 max_namespaces; u32 max_namespaces;
@ -349,6 +348,9 @@ struct nvme_ctrl {
unsigned long discard_page_busy; unsigned long discard_page_busy;
struct nvme_fault_inject fault_inject; struct nvme_fault_inject fault_inject;
enum nvme_ctrl_type cntrltype;
enum nvme_dctype dctype;
}; };
enum nvme_iopolicy { enum nvme_iopolicy {
@ -894,6 +896,14 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
} }
#endif #endif
static inline int nvme_ctrl_init_connect_q(struct nvme_ctrl *ctrl)
{
ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ctrl->connect_q))
return PTR_ERR(ctrl->connect_q);
return 0;
}
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{ {
return dev_to_disk(dev)->private_data; return dev_to_disk(dev)->private_data;
@ -930,4 +940,23 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI; return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
} }
#ifdef CONFIG_NVME_VERBOSE_ERRORS
const unsigned char *nvme_get_error_status_str(u16 status);
const unsigned char *nvme_get_opcode_str(u8 opcode);
const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
#else /* CONFIG_NVME_VERBOSE_ERRORS */
static inline const unsigned char *nvme_get_error_status_str(u16 status)
{
return "I/O Error";
}
static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
{
return "I/O Cmd";
}
static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
{
return "Admin Cmd";
}
#endif /* CONFIG_NVME_VERBOSE_ERRORS */
#endif /* _NVME_H */ #endif /* _NVME_H */

View File

@ -978,11 +978,9 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
goto out_free_io_queues; goto out_free_io_queues;
} }
ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
if (IS_ERR(ctrl->ctrl.connect_q)) { if (ret)
ret = PTR_ERR(ctrl->ctrl.connect_q);
goto out_free_tag_set; goto out_free_tag_set;
}
} }
ret = nvme_rdma_start_io_queues(ctrl); ret = nvme_rdma_start_io_queues(ctrl);
@ -1283,6 +1281,22 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
return ib_post_send(queue->qp, &wr, NULL); return ib_post_send(queue->qp, &wr, NULL);
} }
static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
if (blk_integrity_rq(rq)) {
ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents, rq_dma_dir(rq));
sg_free_table_chained(&req->metadata_sgl->sg_table,
NVME_INLINE_METADATA_SG_CNT);
}
ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
rq_dma_dir(rq));
sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
}
static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
struct request *rq) struct request *rq)
{ {
@ -1294,13 +1308,6 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
if (!blk_rq_nr_phys_segments(rq)) if (!blk_rq_nr_phys_segments(rq))
return; return;
if (blk_integrity_rq(rq)) {
ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents, rq_dma_dir(rq));
sg_free_table_chained(&req->metadata_sgl->sg_table,
NVME_INLINE_METADATA_SG_CNT);
}
if (req->use_sig_mr) if (req->use_sig_mr)
pool = &queue->qp->sig_mrs; pool = &queue->qp->sig_mrs;
@ -1309,9 +1316,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
req->mr = NULL; req->mr = NULL;
} }
ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, nvme_rdma_dma_unmap_req(ibdev, rq);
rq_dma_dir(rq));
sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
} }
static int nvme_rdma_set_sg_null(struct nvme_command *c) static int nvme_rdma_set_sg_null(struct nvme_command *c)
@ -1522,22 +1527,11 @@ mr_put:
return -EINVAL; return -EINVAL;
} }
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
struct request *rq, struct nvme_command *c) int *count, int *pi_count)
{ {
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device; int ret;
struct ib_device *ibdev = dev->dev;
int pi_count = 0;
int count, ret;
req->num_sge = 1;
refcount_set(&req->ref, 2); /* send and recv completions */
c->common.flags |= NVME_CMD_SGL_METABUF;
if (!blk_rq_nr_phys_segments(rq))
return nvme_rdma_set_sg_null(c);
req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1); req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
ret = sg_alloc_table_chained(&req->data_sgl.sg_table, ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
@ -1549,9 +1543,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->data_sgl.nents = blk_rq_map_sg(rq->q, rq, req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
req->data_sgl.sg_table.sgl); req->data_sgl.sg_table.sgl);
count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl, *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
req->data_sgl.nents, rq_dma_dir(rq)); req->data_sgl.nents, rq_dma_dir(rq));
if (unlikely(count <= 0)) { if (unlikely(*count <= 0)) {
ret = -EIO; ret = -EIO;
goto out_free_table; goto out_free_table;
} }
@ -1570,16 +1564,50 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q, req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
rq->bio, req->metadata_sgl->sg_table.sgl); rq->bio, req->metadata_sgl->sg_table.sgl);
pi_count = ib_dma_map_sg(ibdev, *pi_count = ib_dma_map_sg(ibdev,
req->metadata_sgl->sg_table.sgl, req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents, req->metadata_sgl->nents,
rq_dma_dir(rq)); rq_dma_dir(rq));
if (unlikely(pi_count <= 0)) { if (unlikely(*pi_count <= 0)) {
ret = -EIO; ret = -EIO;
goto out_free_pi_table; goto out_free_pi_table;
} }
} }
return 0;
out_free_pi_table:
sg_free_table_chained(&req->metadata_sgl->sg_table,
NVME_INLINE_METADATA_SG_CNT);
out_unmap_sg:
ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
rq_dma_dir(rq));
out_free_table:
sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
return ret;
}
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
struct request *rq, struct nvme_command *c)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
int pi_count = 0;
int count, ret;
req->num_sge = 1;
refcount_set(&req->ref, 2); /* send and recv completions */
c->common.flags |= NVME_CMD_SGL_METABUF;
if (!blk_rq_nr_phys_segments(rq))
return nvme_rdma_set_sg_null(c);
ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count);
if (unlikely(ret))
return ret;
if (req->use_sig_mr) { if (req->use_sig_mr) {
ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count); ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
goto out; goto out;
@ -1603,23 +1631,12 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
ret = nvme_rdma_map_sg_fr(queue, req, c, count); ret = nvme_rdma_map_sg_fr(queue, req, c, count);
out: out:
if (unlikely(ret)) if (unlikely(ret))
goto out_unmap_pi_sg; goto out_dma_unmap_req;
return 0; return 0;
out_unmap_pi_sg: out_dma_unmap_req:
if (blk_integrity_rq(rq)) nvme_rdma_dma_unmap_req(ibdev, rq);
ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents, rq_dma_dir(rq));
out_free_pi_table:
if (blk_integrity_rq(rq))
sg_free_table_chained(&req->metadata_sgl->sg_table,
NVME_INLINE_METADATA_SG_CNT);
out_unmap_sg:
ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
rq_dma_dir(rq));
out_free_table:
sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
return ret; return ret;
} }

View File

@ -1867,11 +1867,9 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
goto out_free_io_queues; goto out_free_io_queues;
} }
ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); ret = nvme_ctrl_init_connect_q(ctrl);
if (IS_ERR(ctrl->connect_q)) { if (ret)
ret = PTR_ERR(ctrl->connect_q);
goto out_free_tag_set; goto out_free_tag_set;
}
} }
ret = nvme_tcp_start_io_queues(ctrl); ret = nvme_tcp_start_io_queues(ctrl);

View File

@ -1400,7 +1400,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (subsys->cntlid_min > subsys->cntlid_max) if (subsys->cntlid_min > subsys->cntlid_max)
goto out_free_sqs; goto out_free_sqs;
ret = ida_simple_get(&cntlid_ida, ret = ida_alloc_range(&cntlid_ida,
subsys->cntlid_min, subsys->cntlid_max, subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL); GFP_KERNEL);
if (ret < 0) { if (ret < 0) {
@ -1459,7 +1459,7 @@ static void nvmet_ctrl_free(struct kref *ref)
flush_work(&ctrl->async_event_work); flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work); cancel_work_sync(&ctrl->fatal_err_work);
ida_simple_remove(&cntlid_ida, ctrl->cntlid); ida_free(&cntlid_ida, ctrl->cntlid);
nvmet_async_events_free(ctrl); nvmet_async_events_free(ctrl);
kfree(ctrl->sqs); kfree(ctrl->sqs);

View File

@ -1115,7 +1115,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
if (!assoc) if (!assoc)
return NULL; return NULL;
idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL); idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL);
if (idx < 0) if (idx < 0)
goto out_free_assoc; goto out_free_assoc;
@ -1157,7 +1157,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
out_put: out_put:
nvmet_fc_tgtport_put(tgtport); nvmet_fc_tgtport_put(tgtport);
out_ida: out_ida:
ida_simple_remove(&tgtport->assoc_cnt, idx); ida_free(&tgtport->assoc_cnt, idx);
out_free_assoc: out_free_assoc:
kfree(assoc); kfree(assoc);
return NULL; return NULL;
@ -1183,7 +1183,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
/* if pending Rcv Disconnect Association LS, send rsp now */ /* if pending Rcv Disconnect Association LS, send rsp now */
if (oldls) if (oldls)
nvmet_fc_xmt_ls_rsp(tgtport, oldls); nvmet_fc_xmt_ls_rsp(tgtport, oldls);
ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id); ida_free(&tgtport->assoc_cnt, assoc->a_id);
dev_info(tgtport->dev, dev_info(tgtport->dev,
"{%d:%d} Association freed\n", "{%d:%d} Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id); tgtport->fc_target_port.port_num, assoc->a_id);
@ -1383,7 +1383,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
goto out_regtgt_failed; goto out_regtgt_failed;
} }
idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL); idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL);
if (idx < 0) { if (idx < 0) {
ret = -ENOSPC; ret = -ENOSPC;
goto out_fail_kfree; goto out_fail_kfree;
@ -1433,7 +1433,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
out_free_newrec: out_free_newrec:
put_device(dev); put_device(dev);
out_ida_put: out_ida_put:
ida_simple_remove(&nvmet_fc_tgtport_cnt, idx); ida_free(&nvmet_fc_tgtport_cnt, idx);
out_fail_kfree: out_fail_kfree:
kfree(newrec); kfree(newrec);
out_regtgt_failed: out_regtgt_failed:
@ -1460,7 +1460,7 @@ nvmet_fc_free_tgtport(struct kref *ref)
/* let the LLDD know we've finished tearing it down */ /* let the LLDD know we've finished tearing it down */
tgtport->ops->targetport_delete(&tgtport->fc_target_port); tgtport->ops->targetport_delete(&tgtport->fc_target_port);
ida_simple_remove(&nvmet_fc_tgtport_cnt, ida_free(&nvmet_fc_tgtport_cnt,
tgtport->fc_target_port.port_num); tgtport->fc_target_port.port_num);
ida_destroy(&tgtport->assoc_cnt); ida_destroy(&tgtport->assoc_cnt);

View File

@ -76,6 +76,14 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
{ {
int ret; int ret;
/*
* When buffered_io namespace attribute is enabled that means user want
* this block device to be used as a file, so block device can take
* an advantage of cache.
*/
if (ns->buffered_io)
return -ENOTBLK;
ns->bdev = blkdev_get_by_path(ns->device_path, ns->bdev = blkdev_get_by_path(ns->device_path,
FMODE_READ | FMODE_WRITE, NULL); FMODE_READ | FMODE_WRITE, NULL);
if (IS_ERR(ns->bdev)) { if (IS_ERR(ns->bdev)) {

View File

@ -14,16 +14,9 @@
#define NVMET_MAX_MPOOL_BVEC 16 #define NVMET_MAX_MPOOL_BVEC 16
#define NVMET_MIN_MPOOL_OBJ 16 #define NVMET_MIN_MPOOL_OBJ 16
int nvmet_file_ns_revalidate(struct nvmet_ns *ns) void nvmet_file_ns_revalidate(struct nvmet_ns *ns)
{ {
struct kstat stat; ns->size = i_size_read(ns->file->f_mapping->host);
int ret;
ret = vfs_getattr(&ns->file->f_path, &stat, STATX_SIZE,
AT_STATX_FORCE_SYNC);
if (!ret)
ns->size = stat.size;
return ret;
} }
void nvmet_file_ns_disable(struct nvmet_ns *ns) void nvmet_file_ns_disable(struct nvmet_ns *ns)
@ -43,7 +36,7 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
int nvmet_file_ns_enable(struct nvmet_ns *ns) int nvmet_file_ns_enable(struct nvmet_ns *ns)
{ {
int flags = O_RDWR | O_LARGEFILE; int flags = O_RDWR | O_LARGEFILE;
int ret; int ret = 0;
if (!ns->buffered_io) if (!ns->buffered_io)
flags |= O_DIRECT; flags |= O_DIRECT;
@ -57,9 +50,7 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
return ret; return ret;
} }
ret = nvmet_file_ns_revalidate(ns); nvmet_file_ns_revalidate(ns);
if (ret)
goto err;
/* /*
* i_blkbits can be greater than the universally accepted upper bound, * i_blkbits can be greater than the universally accepted upper bound,

View File

@ -543,11 +543,9 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
if (ret) if (ret)
goto out_destroy_queues; goto out_destroy_queues;
ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
if (IS_ERR(ctrl->ctrl.connect_q)) { if (ret)
ret = PTR_ERR(ctrl->ctrl.connect_q);
goto out_free_tagset; goto out_free_tagset;
}
ret = nvme_loop_connect_io_queues(ctrl); ret = nvme_loop_connect_io_queues(ctrl);
if (ret) if (ret)

View File

@ -541,7 +541,7 @@ u16 nvmet_bdev_flush(struct nvmet_req *req);
u16 nvmet_file_flush(struct nvmet_req *req); u16 nvmet_file_flush(struct nvmet_req *req);
void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid); void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns); void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
int nvmet_file_ns_revalidate(struct nvmet_ns *ns); void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
void nvmet_ns_revalidate(struct nvmet_ns *ns); void nvmet_ns_revalidate(struct nvmet_ns *ns);
u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts); u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);

View File

@ -1356,7 +1356,7 @@ static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
!queue->host_qid); !queue->host_qid);
} }
nvmet_rdma_free_rsps(queue); nvmet_rdma_free_rsps(queue);
ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); ida_free(&nvmet_rdma_queue_ida, queue->idx);
kfree(queue); kfree(queue);
} }
@ -1459,7 +1459,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
spin_lock_init(&queue->rsps_lock); spin_lock_init(&queue->rsps_lock);
INIT_LIST_HEAD(&queue->queue_list); INIT_LIST_HEAD(&queue->queue_list);
queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL);
if (queue->idx < 0) { if (queue->idx < 0) {
ret = NVME_RDMA_CM_NO_RSC; ret = NVME_RDMA_CM_NO_RSC;
goto out_destroy_sq; goto out_destroy_sq;
@ -1510,7 +1510,7 @@ out_free_cmds:
out_free_responses: out_free_responses:
nvmet_rdma_free_rsps(queue); nvmet_rdma_free_rsps(queue);
out_ida_remove: out_ida_remove:
ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); ida_free(&nvmet_rdma_queue_ida, queue->idx);
out_destroy_sq: out_destroy_sq:
nvmet_sq_destroy(&queue->nvme_sq); nvmet_sq_destroy(&queue->nvme_sq);
out_free_queue: out_free_queue:

View File

@ -1473,7 +1473,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
nvmet_tcp_free_cmds(queue); nvmet_tcp_free_cmds(queue);
if (queue->hdr_digest || queue->data_digest) if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue); nvmet_tcp_free_crypto(queue);
ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx); ida_free(&nvmet_tcp_queue_ida, queue->idx);
page = virt_to_head_page(queue->pf_cache.va); page = virt_to_head_page(queue->pf_cache.va);
__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
@ -1613,7 +1613,7 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
init_llist_head(&queue->resp_list); init_llist_head(&queue->resp_list);
INIT_LIST_HEAD(&queue->resp_send_list); INIT_LIST_HEAD(&queue->resp_send_list);
queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL); queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
if (queue->idx < 0) { if (queue->idx < 0) {
ret = queue->idx; ret = queue->idx;
goto out_free_queue; goto out_free_queue;
@ -1646,7 +1646,7 @@ out_destroy_sq:
out_free_connect: out_free_connect:
nvmet_tcp_free_cmd(&queue->connect); nvmet_tcp_free_cmd(&queue->connect);
out_ida_remove: out_ida_remove:
ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx); ida_free(&nvmet_tcp_queue_ida, queue->idx);
out_free_queue: out_free_queue:
kfree(queue); kfree(queue);
return ret; return ret;

View File

@ -721,7 +721,7 @@ enum {
* *
* Fields with static values for the port. Initialized by the * Fields with static values for the port. Initialized by the
* port_info struct supplied to the registration call. * port_info struct supplied to the registration call.
* @port_num: NVME-FC transport subsytem port number * @port_num: NVME-FC transport subsystem port number
* @node_name: FC WWNN for the port * @node_name: FC WWNN for the port
* @port_name: FC WWPN for the port * @port_name: FC WWPN for the port
* @private: pointer to memory allocated alongside the local port * @private: pointer to memory allocated alongside the local port

View File

@ -43,6 +43,12 @@ enum nvme_ctrl_type {
NVME_CTRL_ADMIN = 3, /* Administrative controller */ NVME_CTRL_ADMIN = 3, /* Administrative controller */
}; };
enum nvme_dctype {
NVME_DCTYPE_NOT_REPORTED = 0,
NVME_DCTYPE_DDC = 1, /* Direct Discovery Controller */
NVME_DCTYPE_CDC = 2, /* Central Discovery Controller */
};
/* Address Family codes for Discovery Log Page entry ADRFAM field */ /* Address Family codes for Discovery Log Page entry ADRFAM field */
enum { enum {
NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */ NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */
@ -320,7 +326,9 @@ struct nvme_id_ctrl {
__le16 icdoff; __le16 icdoff;
__u8 ctrattr; __u8 ctrattr;
__u8 msdbd; __u8 msdbd;
__u8 rsvd1804[244]; __u8 rsvd1804[2];
__u8 dctype;
__u8 rsvd1807[241];
struct nvme_id_power_state psd[32]; struct nvme_id_power_state psd[32];
__u8 vs[1024]; __u8 vs[1024];
}; };
@ -1636,6 +1644,7 @@ enum {
NVME_SC_HOST_ABORTED_CMD = 0x371, NVME_SC_HOST_ABORTED_CMD = 0x371,
NVME_SC_CRD = 0x1800, NVME_SC_CRD = 0x1800,
NVME_SC_MORE = 0x2000,
NVME_SC_DNR = 0x4000, NVME_SC_DNR = 0x4000,
}; };

View File

@ -55,7 +55,10 @@ struct nvme_passthru_cmd64 {
__u64 metadata; __u64 metadata;
__u64 addr; __u64 addr;
__u32 metadata_len; __u32 metadata_len;
__u32 data_len; union {
__u32 data_len; /* for non-vectored io */
__u32 vec_cnt; /* for vectored io */
};
__u32 cdw10; __u32 cdw10;
__u32 cdw11; __u32 cdw11;
__u32 cdw12; __u32 cdw12;
@ -78,5 +81,6 @@ struct nvme_passthru_cmd64 {
#define NVME_IOCTL_RESCAN _IO('N', 0x46) #define NVME_IOCTL_RESCAN _IO('N', 0x46)
#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64) #define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64)
#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64) #define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64)
#define NVME_IOCTL_IO64_CMD_VEC _IOWR('N', 0x49, struct nvme_passthru_cmd64)
#endif /* _UAPI_LINUX_NVME_IOCTL_H */ #endif /* _UAPI_LINUX_NVME_IOCTL_H */