mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-03 02:49:09 +08:00
nvme updates for Linux 6.5
- Various cleanups all around (Irvin, Chaitanya, Christophe) - Better struct packing (Christophe JAILLET) - Reduce controller error logs for optional commands (Keith) - Support for >=64KiB block sizes (Daniel Gomez) - Fabrics fixes and code organization (Max, Chaitanya, Daniel Wagner) -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE3Fbyvv+648XNRdHTPe3zGtjzRgkFAmSMgAcACgkQPe3zGtjz Rgn0jhAAjhZrLdxsKd+mp4a9ywrg+P04sjYfxwfEu8A+J+OMDlgQXFrNzKGS5Riy WRF674sJih2KSjfuG8m0J1piRGfKph2+YjAsiwl2mxBuwZi8ckUU7QiOKYa4HudE DHOW9x7+mB+liGF4LvhWVBgjm0lWST7SQjGHwABToorDQ/10H6lyQR6CJHpyOwwQ LYVu19aU2mAnlMuExW1OpiE+mLD1QLmElAorYc+32hKCmeY7fOgheJk9gx4sx0lm bSrlinCLOJX3V4Ny7OIpYOH3KVFGBmxRQtrmnTyG6oSa3JCp8yTe5ytc9zhGCnli cNobu681XB6api7JMaPKmJk/vNfSxFlw9nndaX9u3PGLCv7RumUkMZm4eXtYmaBN FVLX3Jj0Ru4ULsbaA5WmHCo56bb1EoTA5bTuWNfVylILD71TfhzaeTkMwJ8izanV VlS0cZAiaEvlyrFznCirEpkNFmAgTp/XaeiV8lPbXMbvYtouoHHb+ex8uxxiSPBq lM8gKL28L3ZrGZZ29bIKqU3WJjt4d3bckHSytKA2keLxRbixV7tmlwaYkQF9PSCW bbgJ5knjJixYnN8jm8QEdtBhMOYpULK3sOeReFmJD73o5NHq+UkrdxzoQQhaa3ai a81wZZhDk5R1EJZehia7bQR1USXWXtjFoWsbc54bh9EG8cR/spk= =wMci -----END PGP SIGNATURE----- Merge tag 'nvme-6.5-2023-06-16' of git://git.infradead.org/nvme into for-6.5/block Pull NVMe updates from Keith: "nvme updates for Linux 6.5 - Various cleanups all around (Irvin, Chaitanya, Christophe) - Better struct packing (Christophe JAILLET) - Reduce controller error logs for optional commands (Keith) - Support for >=64KiB block sizes (Daniel Gomez) - Fabrics fixes and code organization (Max, Chaitanya, Daniel Wagner)" * tag 'nvme-6.5-2023-06-16' of git://git.infradead.org/nvme: (27 commits) nvme: forward port sysfs delete fix nvme: skip optional id ctrl csi if it failed nvme-core: use nvme_ns_head_multipath instead of ns->head->disk nvmet-fcloop: Do not wait on completion when unregister fails nvme-fabrics: open code __nvmf_host_find() nvme-fabrics: error out to unlock the mutex nvme: Increase block size variable size to 32-bit nvme-fcloop: no need to return from void function nvmet-auth: remove unnecessary break after goto nvmet-auth: remove some dead code nvme-core: remove redundant check from nvme_init_ns_head nvme: move sysfs code to a dedicated sysfs.c file nvme-fabrics: prevent overriding of existing host nvme-fabrics: check hostid using uuid_equal nvme-fabrics: unify common code in admin and io queue connect nvmet: reorder fields in 'struct nvmefc_fcp_req' nvmet: reorder fields in 'struct nvme_dhchap_queue_context' nvmet: reorder fields in 'struct nvmf_ctrl_options' nvme: reorder fields in 'struct nvme_ctrl' nvmet: reorder fields in 'struct nvmet_sq' ...
This commit is contained in:
commit
236f255296
@ -10,7 +10,7 @@ obj-$(CONFIG_NVME_FC) += nvme-fc.o
|
|||||||
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
|
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
|
||||||
obj-$(CONFIG_NVME_APPLE) += nvme-apple.o
|
obj-$(CONFIG_NVME_APPLE) += nvme-apple.o
|
||||||
|
|
||||||
nvme-core-y += core.o ioctl.o
|
nvme-core-y += core.o ioctl.o sysfs.o
|
||||||
nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o
|
nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o
|
||||||
nvme-core-$(CONFIG_TRACING) += trace.o
|
nvme-core-$(CONFIG_TRACING) += trace.o
|
||||||
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
|
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
|
||||||
|
@ -30,18 +30,18 @@ struct nvme_dhchap_queue_context {
|
|||||||
u32 s2;
|
u32 s2;
|
||||||
u16 transaction;
|
u16 transaction;
|
||||||
u8 status;
|
u8 status;
|
||||||
|
u8 dhgroup_id;
|
||||||
u8 hash_id;
|
u8 hash_id;
|
||||||
size_t hash_len;
|
size_t hash_len;
|
||||||
u8 dhgroup_id;
|
|
||||||
u8 c1[64];
|
u8 c1[64];
|
||||||
u8 c2[64];
|
u8 c2[64];
|
||||||
u8 response[64];
|
u8 response[64];
|
||||||
u8 *host_response;
|
u8 *host_response;
|
||||||
u8 *ctrl_key;
|
u8 *ctrl_key;
|
||||||
int ctrl_key_len;
|
|
||||||
u8 *host_key;
|
u8 *host_key;
|
||||||
int host_key_len;
|
|
||||||
u8 *sess_key;
|
u8 *sess_key;
|
||||||
|
int ctrl_key_len;
|
||||||
|
int host_key_len;
|
||||||
int sess_key_len;
|
int sess_key_len;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -237,7 +237,7 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
|
EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
|
||||||
|
|
||||||
static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
|
void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Keep a reference until nvme_do_delete_ctrl() complete,
|
* Keep a reference until nvme_do_delete_ctrl() complete,
|
||||||
@ -1835,7 +1835,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
|
|||||||
struct nvme_ns *ns, struct nvme_id_ns *id)
|
struct nvme_ns *ns, struct nvme_id_ns *id)
|
||||||
{
|
{
|
||||||
sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
|
sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
|
||||||
unsigned short bs = 1 << ns->lba_shift;
|
u32 bs = 1U << ns->lba_shift;
|
||||||
u32 atomic_bs, phys_bs, io_opt = 0;
|
u32 atomic_bs, phys_bs, io_opt = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2256,7 +2256,7 @@ static int nvme_report_zones(struct gendisk *disk, sector_t sector,
|
|||||||
#define nvme_report_zones NULL
|
#define nvme_report_zones NULL
|
||||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||||
|
|
||||||
static const struct block_device_operations nvme_bdev_ops = {
|
const struct block_device_operations nvme_bdev_ops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.ioctl = nvme_ioctl,
|
.ioctl = nvme_ioctl,
|
||||||
.compat_ioctl = blkdev_compat_ptr_ioctl,
|
.compat_ioctl = blkdev_compat_ptr_ioctl,
|
||||||
@ -2791,75 +2791,6 @@ static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SUBSYS_ATTR_RO(_name, _mode, _show) \
|
|
||||||
struct device_attribute subsys_attr_##_name = \
|
|
||||||
__ATTR(_name, _mode, _show, NULL)
|
|
||||||
|
|
||||||
static ssize_t nvme_subsys_show_nqn(struct device *dev,
|
|
||||||
struct device_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
struct nvme_subsystem *subsys =
|
|
||||||
container_of(dev, struct nvme_subsystem, dev);
|
|
||||||
|
|
||||||
return sysfs_emit(buf, "%s\n", subsys->subnqn);
|
|
||||||
}
|
|
||||||
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
|
|
||||||
|
|
||||||
static ssize_t nvme_subsys_show_type(struct device *dev,
|
|
||||||
struct device_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
struct nvme_subsystem *subsys =
|
|
||||||
container_of(dev, struct nvme_subsystem, dev);
|
|
||||||
|
|
||||||
switch (subsys->subtype) {
|
|
||||||
case NVME_NQN_DISC:
|
|
||||||
return sysfs_emit(buf, "discovery\n");
|
|
||||||
case NVME_NQN_NVME:
|
|
||||||
return sysfs_emit(buf, "nvm\n");
|
|
||||||
default:
|
|
||||||
return sysfs_emit(buf, "reserved\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
|
|
||||||
|
|
||||||
#define nvme_subsys_show_str_function(field) \
|
|
||||||
static ssize_t subsys_##field##_show(struct device *dev, \
|
|
||||||
struct device_attribute *attr, char *buf) \
|
|
||||||
{ \
|
|
||||||
struct nvme_subsystem *subsys = \
|
|
||||||
container_of(dev, struct nvme_subsystem, dev); \
|
|
||||||
return sysfs_emit(buf, "%.*s\n", \
|
|
||||||
(int)sizeof(subsys->field), subsys->field); \
|
|
||||||
} \
|
|
||||||
static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
|
|
||||||
|
|
||||||
nvme_subsys_show_str_function(model);
|
|
||||||
nvme_subsys_show_str_function(serial);
|
|
||||||
nvme_subsys_show_str_function(firmware_rev);
|
|
||||||
|
|
||||||
static struct attribute *nvme_subsys_attrs[] = {
|
|
||||||
&subsys_attr_model.attr,
|
|
||||||
&subsys_attr_serial.attr,
|
|
||||||
&subsys_attr_firmware_rev.attr,
|
|
||||||
&subsys_attr_subsysnqn.attr,
|
|
||||||
&subsys_attr_subsystype.attr,
|
|
||||||
#ifdef CONFIG_NVME_MULTIPATH
|
|
||||||
&subsys_attr_iopolicy.attr,
|
|
||||||
#endif
|
|
||||||
NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
static const struct attribute_group nvme_subsys_attrs_group = {
|
|
||||||
.attrs = nvme_subsys_attrs,
|
|
||||||
};
|
|
||||||
|
|
||||||
static const struct attribute_group *nvme_subsys_attrs_groups[] = {
|
|
||||||
&nvme_subsys_attrs_group,
|
|
||||||
NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
|
static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
return ctrl->opts && ctrl->opts->discovery_nqn;
|
return ctrl->opts && ctrl->opts->discovery_nqn;
|
||||||
@ -3064,7 +2995,8 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
|
|||||||
ctrl->max_zeroes_sectors = 0;
|
ctrl->max_zeroes_sectors = 0;
|
||||||
|
|
||||||
if (ctrl->subsys->subtype != NVME_NQN_NVME ||
|
if (ctrl->subsys->subtype != NVME_NQN_NVME ||
|
||||||
nvme_ctrl_limited_cns(ctrl))
|
nvme_ctrl_limited_cns(ctrl) ||
|
||||||
|
test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
id = kzalloc(sizeof(*id), GFP_KERNEL);
|
id = kzalloc(sizeof(*id), GFP_KERNEL);
|
||||||
@ -3086,6 +3018,8 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
|
|||||||
ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
|
ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
|
||||||
|
|
||||||
free_data:
|
free_data:
|
||||||
|
if (ret > 0)
|
||||||
|
set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags);
|
||||||
kfree(id);
|
kfree(id);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -3393,583 +3327,6 @@ static const struct file_operations nvme_dev_fops = {
|
|||||||
.uring_cmd = nvme_dev_uring_cmd,
|
.uring_cmd = nvme_dev_uring_cmd,
|
||||||
};
|
};
|
||||||
|
|
||||||
static ssize_t nvme_sysfs_reset(struct device *dev,
|
|
||||||
struct device_attribute *attr, const char *buf,
|
|
||||||
size_t count)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = nvme_reset_ctrl_sync(ctrl);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
|
|
||||||
|
|
||||||
static ssize_t nvme_sysfs_rescan(struct device *dev,
|
|
||||||
struct device_attribute *attr, const char *buf,
|
|
||||||
size_t count)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
nvme_queue_scan(ctrl);
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
|
|
||||||
|
|
||||||
static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
|
|
||||||
{
|
|
||||||
struct gendisk *disk = dev_to_disk(dev);
|
|
||||||
|
|
||||||
if (disk->fops == &nvme_bdev_ops)
|
|
||||||
return nvme_get_ns_from_dev(dev)->head;
|
|
||||||
else
|
|
||||||
return disk->private_data;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
struct nvme_ns_head *head = dev_to_ns_head(dev);
|
|
||||||
struct nvme_ns_ids *ids = &head->ids;
|
|
||||||
struct nvme_subsystem *subsys = head->subsys;
|
|
||||||
int serial_len = sizeof(subsys->serial);
|
|
||||||
int model_len = sizeof(subsys->model);
|
|
||||||
|
|
||||||
if (!uuid_is_null(&ids->uuid))
|
|
||||||
return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
|
|
||||||
|
|
||||||
if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
|
|
||||||
return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
|
|
||||||
|
|
||||||
if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
|
|
||||||
return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
|
|
||||||
|
|
||||||
while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
|
|
||||||
subsys->serial[serial_len - 1] == '\0'))
|
|
||||||
serial_len--;
|
|
||||||
while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
|
|
||||||
subsys->model[model_len - 1] == '\0'))
|
|
||||||
model_len--;
|
|
||||||
|
|
||||||
return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
|
|
||||||
serial_len, subsys->serial, model_len, subsys->model,
|
|
||||||
head->ns_id);
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR_RO(wwid);
|
|
||||||
|
|
||||||
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR_RO(nguid);
|
|
||||||
|
|
||||||
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
|
|
||||||
|
|
||||||
/* For backward compatibility expose the NGUID to userspace if
|
|
||||||
* we have no UUID set
|
|
||||||
*/
|
|
||||||
if (uuid_is_null(&ids->uuid)) {
|
|
||||||
dev_warn_ratelimited(dev,
|
|
||||||
"No UUID available providing old NGUID\n");
|
|
||||||
return sysfs_emit(buf, "%pU\n", ids->nguid);
|
|
||||||
}
|
|
||||||
return sysfs_emit(buf, "%pU\n", &ids->uuid);
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR_RO(uuid);
|
|
||||||
|
|
||||||
static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR_RO(eui);
|
|
||||||
|
|
||||||
static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR_RO(nsid);
|
|
||||||
|
|
||||||
static struct attribute *nvme_ns_id_attrs[] = {
|
|
||||||
&dev_attr_wwid.attr,
|
|
||||||
&dev_attr_uuid.attr,
|
|
||||||
&dev_attr_nguid.attr,
|
|
||||||
&dev_attr_eui.attr,
|
|
||||||
&dev_attr_nsid.attr,
|
|
||||||
#ifdef CONFIG_NVME_MULTIPATH
|
|
||||||
&dev_attr_ana_grpid.attr,
|
|
||||||
&dev_attr_ana_state.attr,
|
|
||||||
#endif
|
|
||||||
NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
|
|
||||||
struct attribute *a, int n)
|
|
||||||
{
|
|
||||||
struct device *dev = container_of(kobj, struct device, kobj);
|
|
||||||
struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
|
|
||||||
|
|
||||||
if (a == &dev_attr_uuid.attr) {
|
|
||||||
if (uuid_is_null(&ids->uuid) &&
|
|
||||||
!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
if (a == &dev_attr_nguid.attr) {
|
|
||||||
if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
if (a == &dev_attr_eui.attr) {
|
|
||||||
if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#ifdef CONFIG_NVME_MULTIPATH
|
|
||||||
if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
|
|
||||||
if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
|
|
||||||
return 0;
|
|
||||||
if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
return a->mode;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct attribute_group nvme_ns_id_attr_group = {
|
|
||||||
.attrs = nvme_ns_id_attrs,
|
|
||||||
.is_visible = nvme_ns_id_attrs_are_visible,
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct attribute_group *nvme_ns_id_attr_groups[] = {
|
|
||||||
&nvme_ns_id_attr_group,
|
|
||||||
NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
#define nvme_show_str_function(field) \
|
|
||||||
static ssize_t field##_show(struct device *dev, \
|
|
||||||
struct device_attribute *attr, char *buf) \
|
|
||||||
{ \
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
|
|
||||||
return sysfs_emit(buf, "%.*s\n", \
|
|
||||||
(int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
|
|
||||||
} \
|
|
||||||
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
|
|
||||||
|
|
||||||
nvme_show_str_function(model);
|
|
||||||
nvme_show_str_function(serial);
|
|
||||||
nvme_show_str_function(firmware_rev);
|
|
||||||
|
|
||||||
#define nvme_show_int_function(field) \
|
|
||||||
static ssize_t field##_show(struct device *dev, \
|
|
||||||
struct device_attribute *attr, char *buf) \
|
|
||||||
{ \
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
|
|
||||||
return sysfs_emit(buf, "%d\n", ctrl->field); \
|
|
||||||
} \
|
|
||||||
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
|
|
||||||
|
|
||||||
nvme_show_int_function(cntlid);
|
|
||||||
nvme_show_int_function(numa_node);
|
|
||||||
nvme_show_int_function(queue_count);
|
|
||||||
nvme_show_int_function(sqsize);
|
|
||||||
nvme_show_int_function(kato);
|
|
||||||
|
|
||||||
static ssize_t nvme_sysfs_delete(struct device *dev,
|
|
||||||
struct device_attribute *attr, const char *buf,
|
|
||||||
size_t count)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
if (device_remove_file_self(dev, attr))
|
|
||||||
nvme_delete_ctrl_sync(ctrl);
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
|
|
||||||
|
|
||||||
static ssize_t nvme_sysfs_show_transport(struct device *dev,
|
|
||||||
struct device_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
return sysfs_emit(buf, "%s\n", ctrl->ops->name);
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
|
|
||||||
|
|
||||||
static ssize_t nvme_sysfs_show_state(struct device *dev,
|
|
||||||
struct device_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
static const char *const state_name[] = {
|
|
||||||
[NVME_CTRL_NEW] = "new",
|
|
||||||
[NVME_CTRL_LIVE] = "live",
|
|
||||||
[NVME_CTRL_RESETTING] = "resetting",
|
|
||||||
[NVME_CTRL_CONNECTING] = "connecting",
|
|
||||||
[NVME_CTRL_DELETING] = "deleting",
|
|
||||||
[NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
|
|
||||||
[NVME_CTRL_DEAD] = "dead",
|
|
||||||
};
|
|
||||||
|
|
||||||
if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
|
|
||||||
state_name[ctrl->state])
|
|
||||||
return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
|
|
||||||
|
|
||||||
return sysfs_emit(buf, "unknown state\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
|
|
||||||
|
|
||||||
static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
|
|
||||||
struct device_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
|
|
||||||
|
|
||||||
static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
|
|
||||||
struct device_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
|
|
||||||
|
|
||||||
static ssize_t nvme_sysfs_show_hostid(struct device *dev,
|
|
||||||
struct device_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
|
|
||||||
|
|
||||||
static ssize_t nvme_sysfs_show_address(struct device *dev,
|
|
||||||
struct device_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
|
|
||||||
|
|
||||||
static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
|
|
||||||
struct device_attribute *attr, char *buf)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
struct nvmf_ctrl_options *opts = ctrl->opts;
|
|
||||||
|
|
||||||
if (ctrl->opts->max_reconnects == -1)
|
|
||||||
return sysfs_emit(buf, "off\n");
|
|
||||||
return sysfs_emit(buf, "%d\n",
|
|
||||||
opts->max_reconnects * opts->reconnect_delay);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
|
|
||||||
struct device_attribute *attr, const char *buf, size_t count)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
struct nvmf_ctrl_options *opts = ctrl->opts;
|
|
||||||
int ctrl_loss_tmo, err;
|
|
||||||
|
|
||||||
err = kstrtoint(buf, 10, &ctrl_loss_tmo);
|
|
||||||
if (err)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (ctrl_loss_tmo < 0)
|
|
||||||
opts->max_reconnects = -1;
|
|
||||||
else
|
|
||||||
opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
|
|
||||||
opts->reconnect_delay);
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
|
|
||||||
nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
|
|
||||||
|
|
||||||
static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
|
|
||||||
struct device_attribute *attr, char *buf)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
if (ctrl->opts->reconnect_delay == -1)
|
|
||||||
return sysfs_emit(buf, "off\n");
|
|
||||||
return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
|
|
||||||
struct device_attribute *attr, const char *buf, size_t count)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
unsigned int v;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
err = kstrtou32(buf, 10, &v);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
ctrl->opts->reconnect_delay = v;
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
|
|
||||||
nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
|
|
||||||
|
|
||||||
static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
|
|
||||||
struct device_attribute *attr, char *buf)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
if (ctrl->opts->fast_io_fail_tmo == -1)
|
|
||||||
return sysfs_emit(buf, "off\n");
|
|
||||||
return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
|
|
||||||
struct device_attribute *attr, const char *buf, size_t count)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
struct nvmf_ctrl_options *opts = ctrl->opts;
|
|
||||||
int fast_io_fail_tmo, err;
|
|
||||||
|
|
||||||
err = kstrtoint(buf, 10, &fast_io_fail_tmo);
|
|
||||||
if (err)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (fast_io_fail_tmo < 0)
|
|
||||||
opts->fast_io_fail_tmo = -1;
|
|
||||||
else
|
|
||||||
opts->fast_io_fail_tmo = fast_io_fail_tmo;
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
|
|
||||||
nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
|
|
||||||
|
|
||||||
static ssize_t cntrltype_show(struct device *dev,
|
|
||||||
struct device_attribute *attr, char *buf)
|
|
||||||
{
|
|
||||||
static const char * const type[] = {
|
|
||||||
[NVME_CTRL_IO] = "io\n",
|
|
||||||
[NVME_CTRL_DISC] = "discovery\n",
|
|
||||||
[NVME_CTRL_ADMIN] = "admin\n",
|
|
||||||
};
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
|
|
||||||
return sysfs_emit(buf, "reserved\n");
|
|
||||||
|
|
||||||
return sysfs_emit(buf, type[ctrl->cntrltype]);
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR_RO(cntrltype);
|
|
||||||
|
|
||||||
static ssize_t dctype_show(struct device *dev,
|
|
||||||
struct device_attribute *attr, char *buf)
|
|
||||||
{
|
|
||||||
static const char * const type[] = {
|
|
||||||
[NVME_DCTYPE_NOT_REPORTED] = "none\n",
|
|
||||||
[NVME_DCTYPE_DDC] = "ddc\n",
|
|
||||||
[NVME_DCTYPE_CDC] = "cdc\n",
|
|
||||||
};
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
|
|
||||||
return sysfs_emit(buf, "reserved\n");
|
|
||||||
|
|
||||||
return sysfs_emit(buf, type[ctrl->dctype]);
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR_RO(dctype);
|
|
||||||
|
|
||||||
#ifdef CONFIG_NVME_AUTH
|
|
||||||
static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
|
|
||||||
struct device_attribute *attr, char *buf)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
struct nvmf_ctrl_options *opts = ctrl->opts;
|
|
||||||
|
|
||||||
if (!opts->dhchap_secret)
|
|
||||||
return sysfs_emit(buf, "none\n");
|
|
||||||
return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
|
|
||||||
struct device_attribute *attr, const char *buf, size_t count)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
struct nvmf_ctrl_options *opts = ctrl->opts;
|
|
||||||
char *dhchap_secret;
|
|
||||||
|
|
||||||
if (!ctrl->opts->dhchap_secret)
|
|
||||||
return -EINVAL;
|
|
||||||
if (count < 7)
|
|
||||||
return -EINVAL;
|
|
||||||
if (memcmp(buf, "DHHC-1:", 7))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
|
|
||||||
if (!dhchap_secret)
|
|
||||||
return -ENOMEM;
|
|
||||||
memcpy(dhchap_secret, buf, count);
|
|
||||||
nvme_auth_stop(ctrl);
|
|
||||||
if (strcmp(dhchap_secret, opts->dhchap_secret)) {
|
|
||||||
struct nvme_dhchap_key *key, *host_key;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = nvme_auth_generate_key(dhchap_secret, &key);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
kfree(opts->dhchap_secret);
|
|
||||||
opts->dhchap_secret = dhchap_secret;
|
|
||||||
host_key = ctrl->host_key;
|
|
||||||
mutex_lock(&ctrl->dhchap_auth_mutex);
|
|
||||||
ctrl->host_key = key;
|
|
||||||
mutex_unlock(&ctrl->dhchap_auth_mutex);
|
|
||||||
nvme_auth_free_key(host_key);
|
|
||||||
}
|
|
||||||
/* Start re-authentication */
|
|
||||||
dev_info(ctrl->device, "re-authenticating controller\n");
|
|
||||||
queue_work(nvme_wq, &ctrl->dhchap_auth_work);
|
|
||||||
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
|
|
||||||
nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
|
|
||||||
|
|
||||||
static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
|
|
||||||
struct device_attribute *attr, char *buf)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
struct nvmf_ctrl_options *opts = ctrl->opts;
|
|
||||||
|
|
||||||
if (!opts->dhchap_ctrl_secret)
|
|
||||||
return sysfs_emit(buf, "none\n");
|
|
||||||
return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
|
|
||||||
struct device_attribute *attr, const char *buf, size_t count)
|
|
||||||
{
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
struct nvmf_ctrl_options *opts = ctrl->opts;
|
|
||||||
char *dhchap_secret;
|
|
||||||
|
|
||||||
if (!ctrl->opts->dhchap_ctrl_secret)
|
|
||||||
return -EINVAL;
|
|
||||||
if (count < 7)
|
|
||||||
return -EINVAL;
|
|
||||||
if (memcmp(buf, "DHHC-1:", 7))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
|
|
||||||
if (!dhchap_secret)
|
|
||||||
return -ENOMEM;
|
|
||||||
memcpy(dhchap_secret, buf, count);
|
|
||||||
nvme_auth_stop(ctrl);
|
|
||||||
if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
|
|
||||||
struct nvme_dhchap_key *key, *ctrl_key;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = nvme_auth_generate_key(dhchap_secret, &key);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
kfree(opts->dhchap_ctrl_secret);
|
|
||||||
opts->dhchap_ctrl_secret = dhchap_secret;
|
|
||||||
ctrl_key = ctrl->ctrl_key;
|
|
||||||
mutex_lock(&ctrl->dhchap_auth_mutex);
|
|
||||||
ctrl->ctrl_key = key;
|
|
||||||
mutex_unlock(&ctrl->dhchap_auth_mutex);
|
|
||||||
nvme_auth_free_key(ctrl_key);
|
|
||||||
}
|
|
||||||
/* Start re-authentication */
|
|
||||||
dev_info(ctrl->device, "re-authenticating controller\n");
|
|
||||||
queue_work(nvme_wq, &ctrl->dhchap_auth_work);
|
|
||||||
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
|
|
||||||
nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static struct attribute *nvme_dev_attrs[] = {
|
|
||||||
&dev_attr_reset_controller.attr,
|
|
||||||
&dev_attr_rescan_controller.attr,
|
|
||||||
&dev_attr_model.attr,
|
|
||||||
&dev_attr_serial.attr,
|
|
||||||
&dev_attr_firmware_rev.attr,
|
|
||||||
&dev_attr_cntlid.attr,
|
|
||||||
&dev_attr_delete_controller.attr,
|
|
||||||
&dev_attr_transport.attr,
|
|
||||||
&dev_attr_subsysnqn.attr,
|
|
||||||
&dev_attr_address.attr,
|
|
||||||
&dev_attr_state.attr,
|
|
||||||
&dev_attr_numa_node.attr,
|
|
||||||
&dev_attr_queue_count.attr,
|
|
||||||
&dev_attr_sqsize.attr,
|
|
||||||
&dev_attr_hostnqn.attr,
|
|
||||||
&dev_attr_hostid.attr,
|
|
||||||
&dev_attr_ctrl_loss_tmo.attr,
|
|
||||||
&dev_attr_reconnect_delay.attr,
|
|
||||||
&dev_attr_fast_io_fail_tmo.attr,
|
|
||||||
&dev_attr_kato.attr,
|
|
||||||
&dev_attr_cntrltype.attr,
|
|
||||||
&dev_attr_dctype.attr,
|
|
||||||
#ifdef CONFIG_NVME_AUTH
|
|
||||||
&dev_attr_dhchap_secret.attr,
|
|
||||||
&dev_attr_dhchap_ctrl_secret.attr,
|
|
||||||
#endif
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
|
|
||||||
struct attribute *a, int n)
|
|
||||||
{
|
|
||||||
struct device *dev = container_of(kobj, struct device, kobj);
|
|
||||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
||||||
|
|
||||||
if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
|
|
||||||
return 0;
|
|
||||||
if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
|
|
||||||
return 0;
|
|
||||||
if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
|
|
||||||
return 0;
|
|
||||||
if (a == &dev_attr_hostid.attr && !ctrl->opts)
|
|
||||||
return 0;
|
|
||||||
if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
|
|
||||||
return 0;
|
|
||||||
if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
|
|
||||||
return 0;
|
|
||||||
if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
|
|
||||||
return 0;
|
|
||||||
#ifdef CONFIG_NVME_AUTH
|
|
||||||
if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
|
|
||||||
return 0;
|
|
||||||
if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
|
|
||||||
return 0;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return a->mode;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct attribute_group nvme_dev_attrs_group = {
|
|
||||||
.attrs = nvme_dev_attrs,
|
|
||||||
.is_visible = nvme_dev_attrs_are_visible,
|
|
||||||
};
|
|
||||||
EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
|
|
||||||
|
|
||||||
static const struct attribute_group *nvme_dev_attr_groups[] = {
|
|
||||||
&nvme_dev_attrs_group,
|
|
||||||
NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
|
static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
|
||||||
unsigned nsid)
|
unsigned nsid)
|
||||||
{
|
{
|
||||||
@ -4209,7 +3566,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
|
|||||||
goto out_put_ns_head;
|
goto out_put_ns_head;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!multipath && !list_empty(&head->list)) {
|
if (!multipath) {
|
||||||
dev_warn(ctrl->device,
|
dev_warn(ctrl->device,
|
||||||
"Found shared namespace %d, but multipathing not supported.\n",
|
"Found shared namespace %d, but multipathing not supported.\n",
|
||||||
info->nsid);
|
info->nsid);
|
||||||
@ -4310,7 +3667,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
|
|||||||
* instance as shared namespaces will show up as multiple block
|
* instance as shared namespaces will show up as multiple block
|
||||||
* devices.
|
* devices.
|
||||||
*/
|
*/
|
||||||
if (ns->head->disk) {
|
if (nvme_ns_head_multipath(ns->head)) {
|
||||||
sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
|
sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
|
||||||
ctrl->instance, ns->head->instance);
|
ctrl->instance, ns->head->instance);
|
||||||
disk->flags |= GENHD_FL_HIDDEN;
|
disk->flags |= GENHD_FL_HIDDEN;
|
||||||
@ -5195,6 +4552,8 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
out_free_cdev:
|
out_free_cdev:
|
||||||
|
nvme_fault_inject_fini(&ctrl->fault_inject);
|
||||||
|
dev_pm_qos_hide_latency_tolerance(ctrl->device);
|
||||||
cdev_device_del(&ctrl->cdev, ctrl->device);
|
cdev_device_del(&ctrl->cdev, ctrl->device);
|
||||||
out_free_name:
|
out_free_name:
|
||||||
nvme_put_ctrl(ctrl);
|
nvme_put_ctrl(ctrl);
|
||||||
|
@ -21,35 +21,60 @@ static DEFINE_MUTEX(nvmf_hosts_mutex);
|
|||||||
|
|
||||||
static struct nvmf_host *nvmf_default_host;
|
static struct nvmf_host *nvmf_default_host;
|
||||||
|
|
||||||
static struct nvmf_host *__nvmf_host_find(const char *hostnqn)
|
static struct nvmf_host *nvmf_host_alloc(const char *hostnqn, uuid_t *id)
|
||||||
{
|
{
|
||||||
struct nvmf_host *host;
|
struct nvmf_host *host;
|
||||||
|
|
||||||
list_for_each_entry(host, &nvmf_hosts, list) {
|
host = kmalloc(sizeof(*host), GFP_KERNEL);
|
||||||
if (!strcmp(host->nqn, hostnqn))
|
if (!host)
|
||||||
return host;
|
return NULL;
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
kref_init(&host->ref);
|
||||||
|
uuid_copy(&host->id, id);
|
||||||
|
strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
|
||||||
|
|
||||||
|
return host;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nvmf_host *nvmf_host_add(const char *hostnqn)
|
static struct nvmf_host *nvmf_host_add(const char *hostnqn, uuid_t *id)
|
||||||
{
|
{
|
||||||
struct nvmf_host *host;
|
struct nvmf_host *host;
|
||||||
|
|
||||||
mutex_lock(&nvmf_hosts_mutex);
|
mutex_lock(&nvmf_hosts_mutex);
|
||||||
host = __nvmf_host_find(hostnqn);
|
|
||||||
if (host) {
|
/*
|
||||||
kref_get(&host->ref);
|
* We have defined a host as how it is perceived by the target.
|
||||||
goto out_unlock;
|
* Therefore, we don't allow different Host NQNs with the same Host ID.
|
||||||
|
* Similarly, we do not allow the usage of the same Host NQN with
|
||||||
|
* different Host IDs. This'll maintain unambiguous host identification.
|
||||||
|
*/
|
||||||
|
list_for_each_entry(host, &nvmf_hosts, list) {
|
||||||
|
bool same_hostnqn = !strcmp(host->nqn, hostnqn);
|
||||||
|
bool same_hostid = uuid_equal(&host->id, id);
|
||||||
|
|
||||||
|
if (same_hostnqn && same_hostid) {
|
||||||
|
kref_get(&host->ref);
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
if (same_hostnqn) {
|
||||||
|
pr_err("found same hostnqn %s but different hostid %pUb\n",
|
||||||
|
hostnqn, id);
|
||||||
|
host = ERR_PTR(-EINVAL);
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
if (same_hostid) {
|
||||||
|
pr_err("found same hostid %pUb but different hostnqn %s\n",
|
||||||
|
id, hostnqn);
|
||||||
|
host = ERR_PTR(-EINVAL);
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
host = kmalloc(sizeof(*host), GFP_KERNEL);
|
host = nvmf_host_alloc(hostnqn, id);
|
||||||
if (!host)
|
if (!host) {
|
||||||
|
host = ERR_PTR(-ENOMEM);
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
}
|
||||||
kref_init(&host->ref);
|
|
||||||
strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
|
|
||||||
|
|
||||||
list_add_tail(&host->list, &nvmf_hosts);
|
list_add_tail(&host->list, &nvmf_hosts);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
@ -60,16 +85,17 @@ out_unlock:
|
|||||||
static struct nvmf_host *nvmf_host_default(void)
|
static struct nvmf_host *nvmf_host_default(void)
|
||||||
{
|
{
|
||||||
struct nvmf_host *host;
|
struct nvmf_host *host;
|
||||||
|
char nqn[NVMF_NQN_SIZE];
|
||||||
|
uuid_t id;
|
||||||
|
|
||||||
host = kmalloc(sizeof(*host), GFP_KERNEL);
|
uuid_gen(&id);
|
||||||
|
snprintf(nqn, NVMF_NQN_SIZE,
|
||||||
|
"nqn.2014-08.org.nvmexpress:uuid:%pUb", &id);
|
||||||
|
|
||||||
|
host = nvmf_host_alloc(nqn, &id);
|
||||||
if (!host)
|
if (!host)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
kref_init(&host->ref);
|
|
||||||
uuid_gen(&host->id);
|
|
||||||
snprintf(host->nqn, NVMF_NQN_SIZE,
|
|
||||||
"nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
|
|
||||||
|
|
||||||
mutex_lock(&nvmf_hosts_mutex);
|
mutex_lock(&nvmf_hosts_mutex);
|
||||||
list_add_tail(&host->list, &nvmf_hosts);
|
list_add_tail(&host->list, &nvmf_hosts);
|
||||||
mutex_unlock(&nvmf_hosts_mutex);
|
mutex_unlock(&nvmf_hosts_mutex);
|
||||||
@ -349,6 +375,45 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct nvmf_connect_data *nvmf_connect_data_prep(struct nvme_ctrl *ctrl,
|
||||||
|
u16 cntlid)
|
||||||
|
{
|
||||||
|
struct nvmf_connect_data *data;
|
||||||
|
|
||||||
|
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||||
|
if (!data)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
uuid_copy(&data->hostid, &ctrl->opts->host->id);
|
||||||
|
data->cntlid = cpu_to_le16(cntlid);
|
||||||
|
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
|
||||||
|
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
|
||||||
|
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void nvmf_connect_cmd_prep(struct nvme_ctrl *ctrl, u16 qid,
|
||||||
|
struct nvme_command *cmd)
|
||||||
|
{
|
||||||
|
cmd->connect.opcode = nvme_fabrics_command;
|
||||||
|
cmd->connect.fctype = nvme_fabrics_type_connect;
|
||||||
|
cmd->connect.qid = cpu_to_le16(qid);
|
||||||
|
|
||||||
|
if (qid) {
|
||||||
|
cmd->connect.sqsize = cpu_to_le16(ctrl->sqsize);
|
||||||
|
} else {
|
||||||
|
cmd->connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* set keep-alive timeout in seconds granularity (ms * 1000)
|
||||||
|
*/
|
||||||
|
cmd->connect.kato = cpu_to_le32(ctrl->kato * 1000);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ctrl->opts->disable_sqflow)
|
||||||
|
cmd->connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
|
* nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
|
||||||
* API function.
|
* API function.
|
||||||
@ -377,28 +442,12 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
|
|||||||
int ret;
|
int ret;
|
||||||
u32 result;
|
u32 result;
|
||||||
|
|
||||||
cmd.connect.opcode = nvme_fabrics_command;
|
nvmf_connect_cmd_prep(ctrl, 0, &cmd);
|
||||||
cmd.connect.fctype = nvme_fabrics_type_connect;
|
|
||||||
cmd.connect.qid = 0;
|
|
||||||
cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
|
|
||||||
|
|
||||||
/*
|
data = nvmf_connect_data_prep(ctrl, 0xffff);
|
||||||
* Set keep-alive timeout in seconds granularity (ms * 1000)
|
|
||||||
*/
|
|
||||||
cmd.connect.kato = cpu_to_le32(ctrl->kato * 1000);
|
|
||||||
|
|
||||||
if (ctrl->opts->disable_sqflow)
|
|
||||||
cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
|
|
||||||
|
|
||||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
|
||||||
if (!data)
|
if (!data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
uuid_copy(&data->hostid, &ctrl->opts->host->id);
|
|
||||||
data->cntlid = cpu_to_le16(0xffff);
|
|
||||||
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
|
|
||||||
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
|
|
||||||
|
|
||||||
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
|
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
|
||||||
data, sizeof(*data), NVME_QID_ANY, 1,
|
data, sizeof(*data), NVME_QID_ANY, 1,
|
||||||
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
|
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
|
||||||
@ -468,23 +517,12 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
|
|||||||
int ret;
|
int ret;
|
||||||
u32 result;
|
u32 result;
|
||||||
|
|
||||||
cmd.connect.opcode = nvme_fabrics_command;
|
nvmf_connect_cmd_prep(ctrl, qid, &cmd);
|
||||||
cmd.connect.fctype = nvme_fabrics_type_connect;
|
|
||||||
cmd.connect.qid = cpu_to_le16(qid);
|
|
||||||
cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
|
|
||||||
|
|
||||||
if (ctrl->opts->disable_sqflow)
|
data = nvmf_connect_data_prep(ctrl, ctrl->cntlid);
|
||||||
cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
|
|
||||||
|
|
||||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
|
||||||
if (!data)
|
if (!data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
uuid_copy(&data->hostid, &ctrl->opts->host->id);
|
|
||||||
data->cntlid = cpu_to_le16(ctrl->cntlid);
|
|
||||||
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
|
|
||||||
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
|
|
||||||
|
|
||||||
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
|
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
|
||||||
data, sizeof(*data), qid, 1,
|
data, sizeof(*data), qid, 1,
|
||||||
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
|
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
|
||||||
@ -621,6 +659,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|||||||
size_t nqnlen = 0;
|
size_t nqnlen = 0;
|
||||||
int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO;
|
int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO;
|
||||||
uuid_t hostid;
|
uuid_t hostid;
|
||||||
|
char hostnqn[NVMF_NQN_SIZE];
|
||||||
|
|
||||||
/* Set defaults */
|
/* Set defaults */
|
||||||
opts->queue_size = NVMF_DEF_QUEUE_SIZE;
|
opts->queue_size = NVMF_DEF_QUEUE_SIZE;
|
||||||
@ -637,7 +676,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|||||||
if (!options)
|
if (!options)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
uuid_gen(&hostid);
|
/* use default host if not given by user space */
|
||||||
|
uuid_copy(&hostid, &nvmf_default_host->id);
|
||||||
|
strscpy(hostnqn, nvmf_default_host->nqn, NVMF_NQN_SIZE);
|
||||||
|
|
||||||
while ((p = strsep(&o, ",\n")) != NULL) {
|
while ((p = strsep(&o, ",\n")) != NULL) {
|
||||||
if (!*p)
|
if (!*p)
|
||||||
@ -783,12 +824,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
opts->host = nvmf_host_add(p);
|
strscpy(hostnqn, p, NVMF_NQN_SIZE);
|
||||||
kfree(p);
|
kfree(p);
|
||||||
if (!opts->host) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case NVMF_OPT_RECONNECT_DELAY:
|
case NVMF_OPT_RECONNECT_DELAY:
|
||||||
if (match_int(args, &token)) {
|
if (match_int(args, &token)) {
|
||||||
@ -945,18 +982,94 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|||||||
opts->fast_io_fail_tmo, ctrl_loss_tmo);
|
opts->fast_io_fail_tmo, ctrl_loss_tmo);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!opts->host) {
|
opts->host = nvmf_host_add(hostnqn, &hostid);
|
||||||
kref_get(&nvmf_default_host->ref);
|
if (IS_ERR(opts->host)) {
|
||||||
opts->host = nvmf_default_host;
|
ret = PTR_ERR(opts->host);
|
||||||
|
opts->host = NULL;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
uuid_copy(&opts->host->id, &hostid);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
kfree(options);
|
kfree(options);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues,
|
||||||
|
u32 io_queues[HCTX_MAX_TYPES])
|
||||||
|
{
|
||||||
|
if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
|
||||||
|
/*
|
||||||
|
* separate read/write queues
|
||||||
|
* hand out dedicated default queues only after we have
|
||||||
|
* sufficient read queues.
|
||||||
|
*/
|
||||||
|
io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
|
||||||
|
nr_io_queues -= io_queues[HCTX_TYPE_READ];
|
||||||
|
io_queues[HCTX_TYPE_DEFAULT] =
|
||||||
|
min(opts->nr_write_queues, nr_io_queues);
|
||||||
|
nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT];
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* shared read/write queues
|
||||||
|
* either no write queues were requested, or we don't have
|
||||||
|
* sufficient queue count to have dedicated default queues.
|
||||||
|
*/
|
||||||
|
io_queues[HCTX_TYPE_DEFAULT] =
|
||||||
|
min(opts->nr_io_queues, nr_io_queues);
|
||||||
|
nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (opts->nr_poll_queues && nr_io_queues) {
|
||||||
|
/* map dedicated poll queues only if we have queues left */
|
||||||
|
io_queues[HCTX_TYPE_POLL] =
|
||||||
|
min(opts->nr_poll_queues, nr_io_queues);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(nvmf_set_io_queues);
|
||||||
|
|
||||||
|
void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl,
|
||||||
|
u32 io_queues[HCTX_MAX_TYPES])
|
||||||
|
{
|
||||||
|
struct nvmf_ctrl_options *opts = ctrl->opts;
|
||||||
|
|
||||||
|
if (opts->nr_write_queues && io_queues[HCTX_TYPE_READ]) {
|
||||||
|
/* separate read/write queues */
|
||||||
|
set->map[HCTX_TYPE_DEFAULT].nr_queues =
|
||||||
|
io_queues[HCTX_TYPE_DEFAULT];
|
||||||
|
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
|
||||||
|
set->map[HCTX_TYPE_READ].nr_queues =
|
||||||
|
io_queues[HCTX_TYPE_READ];
|
||||||
|
set->map[HCTX_TYPE_READ].queue_offset =
|
||||||
|
io_queues[HCTX_TYPE_DEFAULT];
|
||||||
|
} else {
|
||||||
|
/* shared read/write queues */
|
||||||
|
set->map[HCTX_TYPE_DEFAULT].nr_queues =
|
||||||
|
io_queues[HCTX_TYPE_DEFAULT];
|
||||||
|
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
|
||||||
|
set->map[HCTX_TYPE_READ].nr_queues =
|
||||||
|
io_queues[HCTX_TYPE_DEFAULT];
|
||||||
|
set->map[HCTX_TYPE_READ].queue_offset = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
|
||||||
|
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
|
||||||
|
if (opts->nr_poll_queues && io_queues[HCTX_TYPE_POLL]) {
|
||||||
|
/* map dedicated poll queues only if we have queues left */
|
||||||
|
set->map[HCTX_TYPE_POLL].nr_queues = io_queues[HCTX_TYPE_POLL];
|
||||||
|
set->map[HCTX_TYPE_POLL].queue_offset =
|
||||||
|
io_queues[HCTX_TYPE_DEFAULT] +
|
||||||
|
io_queues[HCTX_TYPE_READ];
|
||||||
|
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_info(ctrl->device,
|
||||||
|
"mapped %d/%d/%d default/read/poll queues.\n",
|
||||||
|
io_queues[HCTX_TYPE_DEFAULT],
|
||||||
|
io_queues[HCTX_TYPE_READ],
|
||||||
|
io_queues[HCTX_TYPE_POLL]);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(nvmf_map_queues);
|
||||||
|
|
||||||
static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
|
static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
|
||||||
unsigned int required_opts)
|
unsigned int required_opts)
|
||||||
{
|
{
|
||||||
|
@ -77,6 +77,9 @@ enum {
|
|||||||
* with the parsing opts enum.
|
* with the parsing opts enum.
|
||||||
* @mask: Used by the fabrics library to parse through sysfs options
|
* @mask: Used by the fabrics library to parse through sysfs options
|
||||||
* on adding a NVMe controller.
|
* on adding a NVMe controller.
|
||||||
|
* @max_reconnects: maximum number of allowed reconnect attempts before removing
|
||||||
|
* the controller, (-1) means reconnect forever, zero means remove
|
||||||
|
* immediately;
|
||||||
* @transport: Holds the fabric transport "technology name" (for a lack of
|
* @transport: Holds the fabric transport "technology name" (for a lack of
|
||||||
* better description) that will be used by an NVMe controller
|
* better description) that will be used by an NVMe controller
|
||||||
* being added.
|
* being added.
|
||||||
@ -96,9 +99,6 @@ enum {
|
|||||||
* @discovery_nqn: indicates if the subsysnqn is the well-known discovery NQN.
|
* @discovery_nqn: indicates if the subsysnqn is the well-known discovery NQN.
|
||||||
* @kato: Keep-alive timeout.
|
* @kato: Keep-alive timeout.
|
||||||
* @host: Virtual NVMe host, contains the NQN and Host ID.
|
* @host: Virtual NVMe host, contains the NQN and Host ID.
|
||||||
* @max_reconnects: maximum number of allowed reconnect attempts before removing
|
|
||||||
* the controller, (-1) means reconnect forever, zero means remove
|
|
||||||
* immediately;
|
|
||||||
* @dhchap_secret: DH-HMAC-CHAP secret
|
* @dhchap_secret: DH-HMAC-CHAP secret
|
||||||
* @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional
|
* @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional
|
||||||
* authentication
|
* authentication
|
||||||
@ -112,6 +112,7 @@ enum {
|
|||||||
*/
|
*/
|
||||||
struct nvmf_ctrl_options {
|
struct nvmf_ctrl_options {
|
||||||
unsigned mask;
|
unsigned mask;
|
||||||
|
int max_reconnects;
|
||||||
char *transport;
|
char *transport;
|
||||||
char *subsysnqn;
|
char *subsysnqn;
|
||||||
char *traddr;
|
char *traddr;
|
||||||
@ -125,7 +126,6 @@ struct nvmf_ctrl_options {
|
|||||||
bool duplicate_connect;
|
bool duplicate_connect;
|
||||||
unsigned int kato;
|
unsigned int kato;
|
||||||
struct nvmf_host *host;
|
struct nvmf_host *host;
|
||||||
int max_reconnects;
|
|
||||||
char *dhchap_secret;
|
char *dhchap_secret;
|
||||||
char *dhchap_ctrl_secret;
|
char *dhchap_ctrl_secret;
|
||||||
bool disable_sqflow;
|
bool disable_sqflow;
|
||||||
@ -181,7 +181,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
|
|||||||
ctrl->state == NVME_CTRL_DEAD ||
|
ctrl->state == NVME_CTRL_DEAD ||
|
||||||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
|
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
|
||||||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
|
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
|
||||||
memcmp(&opts->host->id, &ctrl->opts->host->id, sizeof(uuid_t)))
|
!uuid_equal(&opts->host->id, &ctrl->opts->host->id))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -203,6 +203,13 @@ static inline void nvmf_complete_timed_out_request(struct request *rq)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts)
|
||||||
|
{
|
||||||
|
return min(opts->nr_io_queues, num_online_cpus()) +
|
||||||
|
min(opts->nr_write_queues, num_online_cpus()) +
|
||||||
|
min(opts->nr_poll_queues, num_online_cpus());
|
||||||
|
}
|
||||||
|
|
||||||
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
|
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
|
||||||
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
|
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
|
||||||
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
|
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
|
||||||
@ -215,5 +222,9 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
|
|||||||
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
|
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
|
||||||
bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
|
bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
|
||||||
struct nvmf_ctrl_options *opts);
|
struct nvmf_ctrl_options *opts);
|
||||||
|
void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues,
|
||||||
|
u32 io_queues[HCTX_MAX_TYPES]);
|
||||||
|
void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl,
|
||||||
|
u32 io_queues[HCTX_MAX_TYPES]);
|
||||||
|
|
||||||
#endif /* _NVME_FABRICS_H */
|
#endif /* _NVME_FABRICS_H */
|
||||||
|
@ -242,12 +242,13 @@ enum nvme_ctrl_flags {
|
|||||||
NVME_CTRL_ADMIN_Q_STOPPED = 1,
|
NVME_CTRL_ADMIN_Q_STOPPED = 1,
|
||||||
NVME_CTRL_STARTED_ONCE = 2,
|
NVME_CTRL_STARTED_ONCE = 2,
|
||||||
NVME_CTRL_STOPPED = 3,
|
NVME_CTRL_STOPPED = 3,
|
||||||
|
NVME_CTRL_SKIP_ID_CNS_CS = 4,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nvme_ctrl {
|
struct nvme_ctrl {
|
||||||
bool comp_seen;
|
bool comp_seen;
|
||||||
enum nvme_ctrl_state state;
|
|
||||||
bool identified;
|
bool identified;
|
||||||
|
enum nvme_ctrl_state state;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
struct mutex scan_lock;
|
struct mutex scan_lock;
|
||||||
const struct nvme_ctrl_ops *ops;
|
const struct nvme_ctrl_ops *ops;
|
||||||
@ -279,8 +280,8 @@ struct nvme_ctrl {
|
|||||||
char name[12];
|
char name[12];
|
||||||
u16 cntlid;
|
u16 cntlid;
|
||||||
|
|
||||||
u32 ctrl_config;
|
|
||||||
u16 mtfa;
|
u16 mtfa;
|
||||||
|
u32 ctrl_config;
|
||||||
u32 queue_count;
|
u32 queue_count;
|
||||||
|
|
||||||
u64 cap;
|
u64 cap;
|
||||||
@ -353,10 +354,10 @@ struct nvme_ctrl {
|
|||||||
bool apst_enabled;
|
bool apst_enabled;
|
||||||
|
|
||||||
/* PCIe only: */
|
/* PCIe only: */
|
||||||
|
u16 hmmaxd;
|
||||||
u32 hmpre;
|
u32 hmpre;
|
||||||
u32 hmmin;
|
u32 hmmin;
|
||||||
u32 hmminds;
|
u32 hmminds;
|
||||||
u16 hmmaxd;
|
|
||||||
|
|
||||||
/* Fabrics only */
|
/* Fabrics only */
|
||||||
u32 ioccsz;
|
u32 ioccsz;
|
||||||
@ -860,7 +861,11 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[];
|
|||||||
extern const struct pr_ops nvme_pr_ops;
|
extern const struct pr_ops nvme_pr_ops;
|
||||||
extern const struct block_device_operations nvme_ns_head_ops;
|
extern const struct block_device_operations nvme_ns_head_ops;
|
||||||
extern const struct attribute_group nvme_dev_attrs_group;
|
extern const struct attribute_group nvme_dev_attrs_group;
|
||||||
|
extern const struct attribute_group *nvme_subsys_attrs_groups[];
|
||||||
|
extern const struct attribute_group *nvme_dev_attr_groups[];
|
||||||
|
extern const struct block_device_operations nvme_bdev_ops;
|
||||||
|
|
||||||
|
void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
|
||||||
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
|
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
|
||||||
#ifdef CONFIG_NVME_MULTIPATH
|
#ifdef CONFIG_NVME_MULTIPATH
|
||||||
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
|
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
|
||||||
|
@ -420,10 +420,9 @@ static int nvme_pci_init_request(struct blk_mq_tag_set *set,
|
|||||||
struct request *req, unsigned int hctx_idx,
|
struct request *req, unsigned int hctx_idx,
|
||||||
unsigned int numa_node)
|
unsigned int numa_node)
|
||||||
{
|
{
|
||||||
struct nvme_dev *dev = to_nvme_dev(set->driver_data);
|
|
||||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||||
|
|
||||||
nvme_req(req)->ctrl = &dev->ctrl;
|
nvme_req(req)->ctrl = set->driver_data;
|
||||||
nvme_req(req)->cmd = &iod->cmd;
|
nvme_req(req)->cmd = &iod->cmd;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -501,7 +501,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
|
|||||||
}
|
}
|
||||||
ibdev = queue->device->dev;
|
ibdev = queue->device->dev;
|
||||||
|
|
||||||
/* +1 for ib_stop_cq */
|
/* +1 for ib_drain_qp */
|
||||||
queue->cq_size = cq_factor * queue->queue_size + 1;
|
queue->cq_size = cq_factor * queue->queue_size + 1;
|
||||||
|
|
||||||
ret = nvme_rdma_create_cq(ibdev, queue);
|
ret = nvme_rdma_create_cq(ibdev, queue);
|
||||||
@ -713,18 +713,10 @@ out_stop_queues:
|
|||||||
static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
|
static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
||||||
struct ib_device *ibdev = ctrl->device->dev;
|
unsigned int nr_io_queues;
|
||||||
unsigned int nr_io_queues, nr_default_queues;
|
|
||||||
unsigned int nr_read_queues, nr_poll_queues;
|
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors,
|
nr_io_queues = nvmf_nr_io_queues(opts);
|
||||||
min(opts->nr_io_queues, num_online_cpus()));
|
|
||||||
nr_default_queues = min_t(unsigned int, ibdev->num_comp_vectors,
|
|
||||||
min(opts->nr_write_queues, num_online_cpus()));
|
|
||||||
nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus());
|
|
||||||
nr_io_queues = nr_read_queues + nr_default_queues + nr_poll_queues;
|
|
||||||
|
|
||||||
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
|
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -739,34 +731,7 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
|
|||||||
dev_info(ctrl->ctrl.device,
|
dev_info(ctrl->ctrl.device,
|
||||||
"creating %d I/O queues.\n", nr_io_queues);
|
"creating %d I/O queues.\n", nr_io_queues);
|
||||||
|
|
||||||
if (opts->nr_write_queues && nr_read_queues < nr_io_queues) {
|
nvmf_set_io_queues(opts, nr_io_queues, ctrl->io_queues);
|
||||||
/*
|
|
||||||
* separate read/write queues
|
|
||||||
* hand out dedicated default queues only after we have
|
|
||||||
* sufficient read queues.
|
|
||||||
*/
|
|
||||||
ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues;
|
|
||||||
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT] =
|
|
||||||
min(nr_default_queues, nr_io_queues);
|
|
||||||
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* shared read/write queues
|
|
||||||
* either no write queues were requested, or we don't have
|
|
||||||
* sufficient queue count to have dedicated default queues.
|
|
||||||
*/
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT] =
|
|
||||||
min(nr_read_queues, nr_io_queues);
|
|
||||||
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (opts->nr_poll_queues && nr_io_queues) {
|
|
||||||
/* map dedicated poll queues only if we have queues left */
|
|
||||||
ctrl->io_queues[HCTX_TYPE_POLL] =
|
|
||||||
min(nr_poll_queues, nr_io_queues);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
|
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
|
||||||
ret = nvme_rdma_alloc_queue(ctrl, i,
|
ret = nvme_rdma_alloc_queue(ctrl, i,
|
||||||
ctrl->ctrl.sqsize + 1);
|
ctrl->ctrl.sqsize + 1);
|
||||||
@ -2138,44 +2103,8 @@ static void nvme_rdma_complete_rq(struct request *rq)
|
|||||||
static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
|
static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
|
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
|
||||||
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
|
||||||
|
|
||||||
if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
|
nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
|
||||||
/* separate read/write queues */
|
|
||||||
set->map[HCTX_TYPE_DEFAULT].nr_queues =
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
|
||||||
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
|
|
||||||
set->map[HCTX_TYPE_READ].nr_queues =
|
|
||||||
ctrl->io_queues[HCTX_TYPE_READ];
|
|
||||||
set->map[HCTX_TYPE_READ].queue_offset =
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
|
||||||
} else {
|
|
||||||
/* shared read/write queues */
|
|
||||||
set->map[HCTX_TYPE_DEFAULT].nr_queues =
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
|
||||||
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
|
|
||||||
set->map[HCTX_TYPE_READ].nr_queues =
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
|
||||||
set->map[HCTX_TYPE_READ].queue_offset = 0;
|
|
||||||
}
|
|
||||||
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
|
|
||||||
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
|
|
||||||
|
|
||||||
if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
|
|
||||||
/* map dedicated poll queues only if we have queues left */
|
|
||||||
set->map[HCTX_TYPE_POLL].nr_queues =
|
|
||||||
ctrl->io_queues[HCTX_TYPE_POLL];
|
|
||||||
set->map[HCTX_TYPE_POLL].queue_offset =
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT] +
|
|
||||||
ctrl->io_queues[HCTX_TYPE_READ];
|
|
||||||
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_info(ctrl->ctrl.device,
|
|
||||||
"mapped %d/%d/%d default/read/poll queues.\n",
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT],
|
|
||||||
ctrl->io_queues[HCTX_TYPE_READ],
|
|
||||||
ctrl->io_queues[HCTX_TYPE_POLL]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct blk_mq_ops nvme_rdma_mq_ops = {
|
static const struct blk_mq_ops nvme_rdma_mq_ops = {
|
||||||
|
668
drivers/nvme/host/sysfs.c
Normal file
668
drivers/nvme/host/sysfs.c
Normal file
@ -0,0 +1,668 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* Sysfs interface for the NVMe core driver.
|
||||||
|
*
|
||||||
|
* Copyright (c) 2011-2014, Intel Corporation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/nvme-auth.h>
|
||||||
|
|
||||||
|
#include "nvme.h"
|
||||||
|
#include "fabrics.h"
|
||||||
|
|
||||||
|
static ssize_t nvme_sysfs_reset(struct device *dev,
|
||||||
|
struct device_attribute *attr, const char *buf,
|
||||||
|
size_t count)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = nvme_reset_ctrl_sync(ctrl);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
|
||||||
|
|
||||||
|
static ssize_t nvme_sysfs_rescan(struct device *dev,
|
||||||
|
struct device_attribute *attr, const char *buf,
|
||||||
|
size_t count)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
nvme_queue_scan(ctrl);
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
|
||||||
|
|
||||||
|
static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
|
||||||
|
{
|
||||||
|
struct gendisk *disk = dev_to_disk(dev);
|
||||||
|
|
||||||
|
if (disk->fops == &nvme_bdev_ops)
|
||||||
|
return nvme_get_ns_from_dev(dev)->head;
|
||||||
|
else
|
||||||
|
return disk->private_data;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct nvme_ns_head *head = dev_to_ns_head(dev);
|
||||||
|
struct nvme_ns_ids *ids = &head->ids;
|
||||||
|
struct nvme_subsystem *subsys = head->subsys;
|
||||||
|
int serial_len = sizeof(subsys->serial);
|
||||||
|
int model_len = sizeof(subsys->model);
|
||||||
|
|
||||||
|
if (!uuid_is_null(&ids->uuid))
|
||||||
|
return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
|
||||||
|
|
||||||
|
if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
|
||||||
|
return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
|
||||||
|
|
||||||
|
if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
|
||||||
|
return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
|
||||||
|
|
||||||
|
while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
|
||||||
|
subsys->serial[serial_len - 1] == '\0'))
|
||||||
|
serial_len--;
|
||||||
|
while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
|
||||||
|
subsys->model[model_len - 1] == '\0'))
|
||||||
|
model_len--;
|
||||||
|
|
||||||
|
return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
|
||||||
|
serial_len, subsys->serial, model_len, subsys->model,
|
||||||
|
head->ns_id);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RO(wwid);
|
||||||
|
|
||||||
|
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RO(nguid);
|
||||||
|
|
||||||
|
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
|
||||||
|
|
||||||
|
/* For backward compatibility expose the NGUID to userspace if
|
||||||
|
* we have no UUID set
|
||||||
|
*/
|
||||||
|
if (uuid_is_null(&ids->uuid)) {
|
||||||
|
dev_warn_ratelimited(dev,
|
||||||
|
"No UUID available providing old NGUID\n");
|
||||||
|
return sysfs_emit(buf, "%pU\n", ids->nguid);
|
||||||
|
}
|
||||||
|
return sysfs_emit(buf, "%pU\n", &ids->uuid);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RO(uuid);
|
||||||
|
|
||||||
|
static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RO(eui);
|
||||||
|
|
||||||
|
static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RO(nsid);
|
||||||
|
|
||||||
|
static struct attribute *nvme_ns_id_attrs[] = {
|
||||||
|
&dev_attr_wwid.attr,
|
||||||
|
&dev_attr_uuid.attr,
|
||||||
|
&dev_attr_nguid.attr,
|
||||||
|
&dev_attr_eui.attr,
|
||||||
|
&dev_attr_nsid.attr,
|
||||||
|
#ifdef CONFIG_NVME_MULTIPATH
|
||||||
|
&dev_attr_ana_grpid.attr,
|
||||||
|
&dev_attr_ana_state.attr,
|
||||||
|
#endif
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
|
||||||
|
struct attribute *a, int n)
|
||||||
|
{
|
||||||
|
struct device *dev = container_of(kobj, struct device, kobj);
|
||||||
|
struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
|
||||||
|
|
||||||
|
if (a == &dev_attr_uuid.attr) {
|
||||||
|
if (uuid_is_null(&ids->uuid) &&
|
||||||
|
!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (a == &dev_attr_nguid.attr) {
|
||||||
|
if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (a == &dev_attr_eui.attr) {
|
||||||
|
if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#ifdef CONFIG_NVME_MULTIPATH
|
||||||
|
if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
|
||||||
|
if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
|
||||||
|
return 0;
|
||||||
|
if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
return a->mode;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct attribute_group nvme_ns_id_attr_group = {
|
||||||
|
.attrs = nvme_ns_id_attrs,
|
||||||
|
.is_visible = nvme_ns_id_attrs_are_visible,
|
||||||
|
};
|
||||||
|
|
||||||
|
const struct attribute_group *nvme_ns_id_attr_groups[] = {
|
||||||
|
&nvme_ns_id_attr_group,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define nvme_show_str_function(field) \
|
||||||
|
static ssize_t field##_show(struct device *dev, \
|
||||||
|
struct device_attribute *attr, char *buf) \
|
||||||
|
{ \
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
|
||||||
|
return sysfs_emit(buf, "%.*s\n", \
|
||||||
|
(int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
|
||||||
|
} \
|
||||||
|
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
|
||||||
|
|
||||||
|
nvme_show_str_function(model);
|
||||||
|
nvme_show_str_function(serial);
|
||||||
|
nvme_show_str_function(firmware_rev);
|
||||||
|
|
||||||
|
#define nvme_show_int_function(field) \
|
||||||
|
static ssize_t field##_show(struct device *dev, \
|
||||||
|
struct device_attribute *attr, char *buf) \
|
||||||
|
{ \
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
|
||||||
|
return sysfs_emit(buf, "%d\n", ctrl->field); \
|
||||||
|
} \
|
||||||
|
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
|
||||||
|
|
||||||
|
nvme_show_int_function(cntlid);
|
||||||
|
nvme_show_int_function(numa_node);
|
||||||
|
nvme_show_int_function(queue_count);
|
||||||
|
nvme_show_int_function(sqsize);
|
||||||
|
nvme_show_int_function(kato);
|
||||||
|
|
||||||
|
static ssize_t nvme_sysfs_delete(struct device *dev,
|
||||||
|
struct device_attribute *attr, const char *buf,
|
||||||
|
size_t count)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
if (device_remove_file_self(dev, attr))
|
||||||
|
nvme_delete_ctrl_sync(ctrl);
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
|
||||||
|
|
||||||
|
static ssize_t nvme_sysfs_show_transport(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
return sysfs_emit(buf, "%s\n", ctrl->ops->name);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
|
||||||
|
|
||||||
|
static ssize_t nvme_sysfs_show_state(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
static const char *const state_name[] = {
|
||||||
|
[NVME_CTRL_NEW] = "new",
|
||||||
|
[NVME_CTRL_LIVE] = "live",
|
||||||
|
[NVME_CTRL_RESETTING] = "resetting",
|
||||||
|
[NVME_CTRL_CONNECTING] = "connecting",
|
||||||
|
[NVME_CTRL_DELETING] = "deleting",
|
||||||
|
[NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
|
||||||
|
[NVME_CTRL_DEAD] = "dead",
|
||||||
|
};
|
||||||
|
|
||||||
|
if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
|
||||||
|
state_name[ctrl->state])
|
||||||
|
return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
|
||||||
|
|
||||||
|
return sysfs_emit(buf, "unknown state\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
|
||||||
|
|
||||||
|
static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
|
||||||
|
|
||||||
|
static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
|
||||||
|
|
||||||
|
static ssize_t nvme_sysfs_show_hostid(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
|
||||||
|
|
||||||
|
static ssize_t nvme_sysfs_show_address(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
|
||||||
|
|
||||||
|
static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
struct nvmf_ctrl_options *opts = ctrl->opts;
|
||||||
|
|
||||||
|
if (ctrl->opts->max_reconnects == -1)
|
||||||
|
return sysfs_emit(buf, "off\n");
|
||||||
|
return sysfs_emit(buf, "%d\n",
|
||||||
|
opts->max_reconnects * opts->reconnect_delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
|
||||||
|
struct device_attribute *attr, const char *buf, size_t count)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
struct nvmf_ctrl_options *opts = ctrl->opts;
|
||||||
|
int ctrl_loss_tmo, err;
|
||||||
|
|
||||||
|
err = kstrtoint(buf, 10, &ctrl_loss_tmo);
|
||||||
|
if (err)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (ctrl_loss_tmo < 0)
|
||||||
|
opts->max_reconnects = -1;
|
||||||
|
else
|
||||||
|
opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
|
||||||
|
opts->reconnect_delay);
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
|
||||||
|
nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
|
||||||
|
|
||||||
|
static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
if (ctrl->opts->reconnect_delay == -1)
|
||||||
|
return sysfs_emit(buf, "off\n");
|
||||||
|
return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
|
||||||
|
struct device_attribute *attr, const char *buf, size_t count)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
unsigned int v;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = kstrtou32(buf, 10, &v);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
ctrl->opts->reconnect_delay = v;
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
|
||||||
|
nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
|
||||||
|
|
||||||
|
static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
if (ctrl->opts->fast_io_fail_tmo == -1)
|
||||||
|
return sysfs_emit(buf, "off\n");
|
||||||
|
return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
|
||||||
|
struct device_attribute *attr, const char *buf, size_t count)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
struct nvmf_ctrl_options *opts = ctrl->opts;
|
||||||
|
int fast_io_fail_tmo, err;
|
||||||
|
|
||||||
|
err = kstrtoint(buf, 10, &fast_io_fail_tmo);
|
||||||
|
if (err)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (fast_io_fail_tmo < 0)
|
||||||
|
opts->fast_io_fail_tmo = -1;
|
||||||
|
else
|
||||||
|
opts->fast_io_fail_tmo = fast_io_fail_tmo;
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
|
||||||
|
nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
|
||||||
|
|
||||||
|
static ssize_t cntrltype_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
static const char * const type[] = {
|
||||||
|
[NVME_CTRL_IO] = "io\n",
|
||||||
|
[NVME_CTRL_DISC] = "discovery\n",
|
||||||
|
[NVME_CTRL_ADMIN] = "admin\n",
|
||||||
|
};
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
|
||||||
|
return sysfs_emit(buf, "reserved\n");
|
||||||
|
|
||||||
|
return sysfs_emit(buf, type[ctrl->cntrltype]);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RO(cntrltype);
|
||||||
|
|
||||||
|
static ssize_t dctype_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
static const char * const type[] = {
|
||||||
|
[NVME_DCTYPE_NOT_REPORTED] = "none\n",
|
||||||
|
[NVME_DCTYPE_DDC] = "ddc\n",
|
||||||
|
[NVME_DCTYPE_CDC] = "cdc\n",
|
||||||
|
};
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
|
||||||
|
return sysfs_emit(buf, "reserved\n");
|
||||||
|
|
||||||
|
return sysfs_emit(buf, type[ctrl->dctype]);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RO(dctype);
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVME_AUTH
|
||||||
|
static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
struct nvmf_ctrl_options *opts = ctrl->opts;
|
||||||
|
|
||||||
|
if (!opts->dhchap_secret)
|
||||||
|
return sysfs_emit(buf, "none\n");
|
||||||
|
return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
|
||||||
|
struct device_attribute *attr, const char *buf, size_t count)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
struct nvmf_ctrl_options *opts = ctrl->opts;
|
||||||
|
char *dhchap_secret;
|
||||||
|
|
||||||
|
if (!ctrl->opts->dhchap_secret)
|
||||||
|
return -EINVAL;
|
||||||
|
if (count < 7)
|
||||||
|
return -EINVAL;
|
||||||
|
if (memcmp(buf, "DHHC-1:", 7))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
|
||||||
|
if (!dhchap_secret)
|
||||||
|
return -ENOMEM;
|
||||||
|
memcpy(dhchap_secret, buf, count);
|
||||||
|
nvme_auth_stop(ctrl);
|
||||||
|
if (strcmp(dhchap_secret, opts->dhchap_secret)) {
|
||||||
|
struct nvme_dhchap_key *key, *host_key;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = nvme_auth_generate_key(dhchap_secret, &key);
|
||||||
|
if (ret) {
|
||||||
|
kfree(dhchap_secret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
kfree(opts->dhchap_secret);
|
||||||
|
opts->dhchap_secret = dhchap_secret;
|
||||||
|
host_key = ctrl->host_key;
|
||||||
|
mutex_lock(&ctrl->dhchap_auth_mutex);
|
||||||
|
ctrl->host_key = key;
|
||||||
|
mutex_unlock(&ctrl->dhchap_auth_mutex);
|
||||||
|
nvme_auth_free_key(host_key);
|
||||||
|
} else
|
||||||
|
kfree(dhchap_secret);
|
||||||
|
/* Start re-authentication */
|
||||||
|
dev_info(ctrl->device, "re-authenticating controller\n");
|
||||||
|
queue_work(nvme_wq, &ctrl->dhchap_auth_work);
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
|
||||||
|
nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
|
||||||
|
|
||||||
|
static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
struct nvmf_ctrl_options *opts = ctrl->opts;
|
||||||
|
|
||||||
|
if (!opts->dhchap_ctrl_secret)
|
||||||
|
return sysfs_emit(buf, "none\n");
|
||||||
|
return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
|
||||||
|
struct device_attribute *attr, const char *buf, size_t count)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
struct nvmf_ctrl_options *opts = ctrl->opts;
|
||||||
|
char *dhchap_secret;
|
||||||
|
|
||||||
|
if (!ctrl->opts->dhchap_ctrl_secret)
|
||||||
|
return -EINVAL;
|
||||||
|
if (count < 7)
|
||||||
|
return -EINVAL;
|
||||||
|
if (memcmp(buf, "DHHC-1:", 7))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
|
||||||
|
if (!dhchap_secret)
|
||||||
|
return -ENOMEM;
|
||||||
|
memcpy(dhchap_secret, buf, count);
|
||||||
|
nvme_auth_stop(ctrl);
|
||||||
|
if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
|
||||||
|
struct nvme_dhchap_key *key, *ctrl_key;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = nvme_auth_generate_key(dhchap_secret, &key);
|
||||||
|
if (ret) {
|
||||||
|
kfree(dhchap_secret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
kfree(opts->dhchap_ctrl_secret);
|
||||||
|
opts->dhchap_ctrl_secret = dhchap_secret;
|
||||||
|
ctrl_key = ctrl->ctrl_key;
|
||||||
|
mutex_lock(&ctrl->dhchap_auth_mutex);
|
||||||
|
ctrl->ctrl_key = key;
|
||||||
|
mutex_unlock(&ctrl->dhchap_auth_mutex);
|
||||||
|
nvme_auth_free_key(ctrl_key);
|
||||||
|
} else
|
||||||
|
kfree(dhchap_secret);
|
||||||
|
/* Start re-authentication */
|
||||||
|
dev_info(ctrl->device, "re-authenticating controller\n");
|
||||||
|
queue_work(nvme_wq, &ctrl->dhchap_auth_work);
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
|
||||||
|
nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static struct attribute *nvme_dev_attrs[] = {
|
||||||
|
&dev_attr_reset_controller.attr,
|
||||||
|
&dev_attr_rescan_controller.attr,
|
||||||
|
&dev_attr_model.attr,
|
||||||
|
&dev_attr_serial.attr,
|
||||||
|
&dev_attr_firmware_rev.attr,
|
||||||
|
&dev_attr_cntlid.attr,
|
||||||
|
&dev_attr_delete_controller.attr,
|
||||||
|
&dev_attr_transport.attr,
|
||||||
|
&dev_attr_subsysnqn.attr,
|
||||||
|
&dev_attr_address.attr,
|
||||||
|
&dev_attr_state.attr,
|
||||||
|
&dev_attr_numa_node.attr,
|
||||||
|
&dev_attr_queue_count.attr,
|
||||||
|
&dev_attr_sqsize.attr,
|
||||||
|
&dev_attr_hostnqn.attr,
|
||||||
|
&dev_attr_hostid.attr,
|
||||||
|
&dev_attr_ctrl_loss_tmo.attr,
|
||||||
|
&dev_attr_reconnect_delay.attr,
|
||||||
|
&dev_attr_fast_io_fail_tmo.attr,
|
||||||
|
&dev_attr_kato.attr,
|
||||||
|
&dev_attr_cntrltype.attr,
|
||||||
|
&dev_attr_dctype.attr,
|
||||||
|
#ifdef CONFIG_NVME_AUTH
|
||||||
|
&dev_attr_dhchap_secret.attr,
|
||||||
|
&dev_attr_dhchap_ctrl_secret.attr,
|
||||||
|
#endif
|
||||||
|
NULL
|
||||||
|
};
|
||||||
|
|
||||||
|
static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
|
||||||
|
struct attribute *a, int n)
|
||||||
|
{
|
||||||
|
struct device *dev = container_of(kobj, struct device, kobj);
|
||||||
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
|
||||||
|
return 0;
|
||||||
|
if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
|
||||||
|
return 0;
|
||||||
|
if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
|
||||||
|
return 0;
|
||||||
|
if (a == &dev_attr_hostid.attr && !ctrl->opts)
|
||||||
|
return 0;
|
||||||
|
if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
|
||||||
|
return 0;
|
||||||
|
if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
|
||||||
|
return 0;
|
||||||
|
if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
|
||||||
|
return 0;
|
||||||
|
#ifdef CONFIG_NVME_AUTH
|
||||||
|
if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
|
||||||
|
return 0;
|
||||||
|
if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return a->mode;
|
||||||
|
}
|
||||||
|
|
||||||
|
const struct attribute_group nvme_dev_attrs_group = {
|
||||||
|
.attrs = nvme_dev_attrs,
|
||||||
|
.is_visible = nvme_dev_attrs_are_visible,
|
||||||
|
};
|
||||||
|
EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
|
||||||
|
|
||||||
|
const struct attribute_group *nvme_dev_attr_groups[] = {
|
||||||
|
&nvme_dev_attrs_group,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define SUBSYS_ATTR_RO(_name, _mode, _show) \
|
||||||
|
struct device_attribute subsys_attr_##_name = \
|
||||||
|
__ATTR(_name, _mode, _show, NULL)
|
||||||
|
|
||||||
|
static ssize_t nvme_subsys_show_nqn(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct nvme_subsystem *subsys =
|
||||||
|
container_of(dev, struct nvme_subsystem, dev);
|
||||||
|
|
||||||
|
return sysfs_emit(buf, "%s\n", subsys->subnqn);
|
||||||
|
}
|
||||||
|
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
|
||||||
|
|
||||||
|
static ssize_t nvme_subsys_show_type(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct nvme_subsystem *subsys =
|
||||||
|
container_of(dev, struct nvme_subsystem, dev);
|
||||||
|
|
||||||
|
switch (subsys->subtype) {
|
||||||
|
case NVME_NQN_DISC:
|
||||||
|
return sysfs_emit(buf, "discovery\n");
|
||||||
|
case NVME_NQN_NVME:
|
||||||
|
return sysfs_emit(buf, "nvm\n");
|
||||||
|
default:
|
||||||
|
return sysfs_emit(buf, "reserved\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
|
||||||
|
|
||||||
|
#define nvme_subsys_show_str_function(field) \
|
||||||
|
static ssize_t subsys_##field##_show(struct device *dev, \
|
||||||
|
struct device_attribute *attr, char *buf) \
|
||||||
|
{ \
|
||||||
|
struct nvme_subsystem *subsys = \
|
||||||
|
container_of(dev, struct nvme_subsystem, dev); \
|
||||||
|
return sysfs_emit(buf, "%.*s\n", \
|
||||||
|
(int)sizeof(subsys->field), subsys->field); \
|
||||||
|
} \
|
||||||
|
static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
|
||||||
|
|
||||||
|
nvme_subsys_show_str_function(model);
|
||||||
|
nvme_subsys_show_str_function(serial);
|
||||||
|
nvme_subsys_show_str_function(firmware_rev);
|
||||||
|
|
||||||
|
static struct attribute *nvme_subsys_attrs[] = {
|
||||||
|
&subsys_attr_model.attr,
|
||||||
|
&subsys_attr_serial.attr,
|
||||||
|
&subsys_attr_firmware_rev.attr,
|
||||||
|
&subsys_attr_subsysnqn.attr,
|
||||||
|
&subsys_attr_subsystype.attr,
|
||||||
|
#ifdef CONFIG_NVME_MULTIPATH
|
||||||
|
&subsys_attr_iopolicy.attr,
|
||||||
|
#endif
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct attribute_group nvme_subsys_attrs_group = {
|
||||||
|
.attrs = nvme_subsys_attrs,
|
||||||
|
};
|
||||||
|
|
||||||
|
const struct attribute_group *nvme_subsys_attrs_groups[] = {
|
||||||
|
&nvme_subsys_attrs_group,
|
||||||
|
NULL,
|
||||||
|
};
|
@ -1802,58 +1802,12 @@ out_free_queues:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
|
|
||||||
{
|
|
||||||
unsigned int nr_io_queues;
|
|
||||||
|
|
||||||
nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
|
|
||||||
nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
|
|
||||||
nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
|
|
||||||
|
|
||||||
return nr_io_queues;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
|
|
||||||
unsigned int nr_io_queues)
|
|
||||||
{
|
|
||||||
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
|
|
||||||
struct nvmf_ctrl_options *opts = nctrl->opts;
|
|
||||||
|
|
||||||
if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
|
|
||||||
/*
|
|
||||||
* separate read/write queues
|
|
||||||
* hand out dedicated default queues only after we have
|
|
||||||
* sufficient read queues.
|
|
||||||
*/
|
|
||||||
ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
|
|
||||||
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT] =
|
|
||||||
min(opts->nr_write_queues, nr_io_queues);
|
|
||||||
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* shared read/write queues
|
|
||||||
* either no write queues were requested, or we don't have
|
|
||||||
* sufficient queue count to have dedicated default queues.
|
|
||||||
*/
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT] =
|
|
||||||
min(opts->nr_io_queues, nr_io_queues);
|
|
||||||
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (opts->nr_poll_queues && nr_io_queues) {
|
|
||||||
/* map dedicated poll queues only if we have queues left */
|
|
||||||
ctrl->io_queues[HCTX_TYPE_POLL] =
|
|
||||||
min(opts->nr_poll_queues, nr_io_queues);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
|
static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
unsigned int nr_io_queues;
|
unsigned int nr_io_queues;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
|
nr_io_queues = nvmf_nr_io_queues(ctrl->opts);
|
||||||
ret = nvme_set_queue_count(ctrl, &nr_io_queues);
|
ret = nvme_set_queue_count(ctrl, &nr_io_queues);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -1868,8 +1822,8 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
|
|||||||
dev_info(ctrl->device,
|
dev_info(ctrl->device,
|
||||||
"creating %d I/O queues.\n", nr_io_queues);
|
"creating %d I/O queues.\n", nr_io_queues);
|
||||||
|
|
||||||
nvme_tcp_set_io_queues(ctrl, nr_io_queues);
|
nvmf_set_io_queues(ctrl->opts, nr_io_queues,
|
||||||
|
to_tcp_ctrl(ctrl)->io_queues);
|
||||||
return __nvme_tcp_alloc_io_queues(ctrl);
|
return __nvme_tcp_alloc_io_queues(ctrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2449,44 +2403,8 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
|
static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
|
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
|
||||||
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
|
||||||
|
|
||||||
if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
|
nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
|
||||||
/* separate read/write queues */
|
|
||||||
set->map[HCTX_TYPE_DEFAULT].nr_queues =
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
|
||||||
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
|
|
||||||
set->map[HCTX_TYPE_READ].nr_queues =
|
|
||||||
ctrl->io_queues[HCTX_TYPE_READ];
|
|
||||||
set->map[HCTX_TYPE_READ].queue_offset =
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
|
||||||
} else {
|
|
||||||
/* shared read/write queues */
|
|
||||||
set->map[HCTX_TYPE_DEFAULT].nr_queues =
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
|
||||||
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
|
|
||||||
set->map[HCTX_TYPE_READ].nr_queues =
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
|
||||||
set->map[HCTX_TYPE_READ].queue_offset = 0;
|
|
||||||
}
|
|
||||||
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
|
|
||||||
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
|
|
||||||
|
|
||||||
if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
|
|
||||||
/* map dedicated poll queues only if we have queues left */
|
|
||||||
set->map[HCTX_TYPE_POLL].nr_queues =
|
|
||||||
ctrl->io_queues[HCTX_TYPE_POLL];
|
|
||||||
set->map[HCTX_TYPE_POLL].queue_offset =
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT] +
|
|
||||||
ctrl->io_queues[HCTX_TYPE_READ];
|
|
||||||
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_info(ctrl->ctrl.device,
|
|
||||||
"mapped %d/%d/%d default/read/poll queues.\n",
|
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT],
|
|
||||||
ctrl->io_queues[HCTX_TYPE_READ],
|
|
||||||
ctrl->io_queues[HCTX_TYPE_POLL]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
|
static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
|
||||||
|
@ -295,13 +295,11 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
|
|||||||
status = 0;
|
status = 0;
|
||||||
}
|
}
|
||||||
goto done_kfree;
|
goto done_kfree;
|
||||||
break;
|
|
||||||
case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
|
case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
|
||||||
req->sq->authenticated = true;
|
req->sq->authenticated = true;
|
||||||
pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
|
pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
|
||||||
__func__, ctrl->cntlid, req->sq->qid);
|
__func__, ctrl->cntlid, req->sq->qid);
|
||||||
goto done_kfree;
|
goto done_kfree;
|
||||||
break;
|
|
||||||
case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
|
case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
|
||||||
status = nvmet_auth_failure2(d);
|
status = nvmet_auth_failure2(d);
|
||||||
if (status) {
|
if (status) {
|
||||||
@ -312,7 +310,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
|
|||||||
status = 0;
|
status = 0;
|
||||||
}
|
}
|
||||||
goto done_kfree;
|
goto done_kfree;
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
req->sq->dhchap_status =
|
req->sq->dhchap_status =
|
||||||
NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
|
NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
|
||||||
@ -320,7 +317,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
|
|||||||
NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
|
NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
|
||||||
req->sq->authenticated = false;
|
req->sq->authenticated = false;
|
||||||
goto done_kfree;
|
goto done_kfree;
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
done_failure1:
|
done_failure1:
|
||||||
req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
|
req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
|
||||||
@ -483,15 +479,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
|
|||||||
status = NVME_SC_INTERNAL;
|
status = NVME_SC_INTERNAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (status) {
|
|
||||||
req->sq->dhchap_status = status;
|
|
||||||
nvmet_auth_failure1(req, d, al);
|
|
||||||
pr_warn("ctrl %d qid %d: challenge status (%x)\n",
|
|
||||||
ctrl->cntlid, req->sq->qid,
|
|
||||||
req->sq->dhchap_status);
|
|
||||||
status = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
|
req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
|
||||||
break;
|
break;
|
||||||
case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
|
case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
|
||||||
|
@ -645,8 +645,6 @@ fcloop_fcp_recv_work(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
if (ret)
|
if (ret)
|
||||||
fcloop_call_host_done(fcpreq, tfcp_req, ret);
|
fcloop_call_host_done(fcpreq, tfcp_req, ret);
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -1168,7 +1166,8 @@ __wait_localport_unreg(struct fcloop_lport *lport)
|
|||||||
|
|
||||||
ret = nvme_fc_unregister_localport(lport->localport);
|
ret = nvme_fc_unregister_localport(lport->localport);
|
||||||
|
|
||||||
wait_for_completion(&lport->unreg_done);
|
if (!ret)
|
||||||
|
wait_for_completion(&lport->unreg_done);
|
||||||
|
|
||||||
kfree(lport);
|
kfree(lport);
|
||||||
|
|
||||||
|
@ -109,8 +109,8 @@ struct nvmet_sq {
|
|||||||
u32 sqhd;
|
u32 sqhd;
|
||||||
bool sqhd_disabled;
|
bool sqhd_disabled;
|
||||||
#ifdef CONFIG_NVME_TARGET_AUTH
|
#ifdef CONFIG_NVME_TARGET_AUTH
|
||||||
struct delayed_work auth_expired_work;
|
|
||||||
bool authenticated;
|
bool authenticated;
|
||||||
|
struct delayed_work auth_expired_work;
|
||||||
u16 dhchap_tid;
|
u16 dhchap_tid;
|
||||||
u16 dhchap_status;
|
u16 dhchap_status;
|
||||||
int dhchap_step;
|
int dhchap_step;
|
||||||
|
@ -185,7 +185,6 @@ enum nvmefc_fcp_datadir {
|
|||||||
* @first_sgl: memory for 1st scatter/gather list segment for payload data
|
* @first_sgl: memory for 1st scatter/gather list segment for payload data
|
||||||
* @sg_cnt: number of elements in the scatter/gather list
|
* @sg_cnt: number of elements in the scatter/gather list
|
||||||
* @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx)
|
* @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx)
|
||||||
* @sqid: The nvme SQID the command is being issued on
|
|
||||||
* @done: The callback routine the LLDD is to invoke upon completion of
|
* @done: The callback routine the LLDD is to invoke upon completion of
|
||||||
* the FCP operation. req argument is the pointer to the original
|
* the FCP operation. req argument is the pointer to the original
|
||||||
* FCP IO operation.
|
* FCP IO operation.
|
||||||
@ -194,12 +193,13 @@ enum nvmefc_fcp_datadir {
|
|||||||
* while processing the operation. The length of the buffer
|
* while processing the operation. The length of the buffer
|
||||||
* corresponds to the fcprqst_priv_sz value specified in the
|
* corresponds to the fcprqst_priv_sz value specified in the
|
||||||
* nvme_fc_port_template supplied by the LLDD.
|
* nvme_fc_port_template supplied by the LLDD.
|
||||||
|
* @sqid: The nvme SQID the command is being issued on
|
||||||
*
|
*
|
||||||
* Values set by the LLDD indicating completion status of the FCP operation.
|
* Values set by the LLDD indicating completion status of the FCP operation.
|
||||||
* Must be set prior to calling the done() callback.
|
* Must be set prior to calling the done() callback.
|
||||||
|
* @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
|
||||||
* @transferred_length: amount of payload data, in bytes, that were
|
* @transferred_length: amount of payload data, in bytes, that were
|
||||||
* transferred. Should equal payload_length on success.
|
* transferred. Should equal payload_length on success.
|
||||||
* @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
|
|
||||||
* @status: Completion status of the FCP operation. must be 0 upon success,
|
* @status: Completion status of the FCP operation. must be 0 upon success,
|
||||||
* negative errno value upon failure (ex: -EIO). Note: this is
|
* negative errno value upon failure (ex: -EIO). Note: this is
|
||||||
* NOT a reflection of the NVME CQE completion status. Only the
|
* NOT a reflection of the NVME CQE completion status. Only the
|
||||||
@ -219,14 +219,14 @@ struct nvmefc_fcp_req {
|
|||||||
int sg_cnt;
|
int sg_cnt;
|
||||||
enum nvmefc_fcp_datadir io_dir;
|
enum nvmefc_fcp_datadir io_dir;
|
||||||
|
|
||||||
__le16 sqid;
|
|
||||||
|
|
||||||
void (*done)(struct nvmefc_fcp_req *req);
|
void (*done)(struct nvmefc_fcp_req *req);
|
||||||
|
|
||||||
void *private;
|
void *private;
|
||||||
|
|
||||||
u32 transferred_length;
|
__le16 sqid;
|
||||||
|
|
||||||
u16 rcv_rsplen;
|
u16 rcv_rsplen;
|
||||||
|
u32 transferred_length;
|
||||||
u32 status;
|
u32 status;
|
||||||
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
|
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user