2019-02-18 18:36:11 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2016-06-22 00:04:20 +08:00
|
|
|
/*
|
|
|
|
* NVMe admin command implementation.
|
|
|
|
* Copyright (c) 2015-2016 HGST, a Western Digital Company.
|
|
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/module.h>
|
2017-02-04 08:27:20 +08:00
|
|
|
#include <linux/rculist.h>
|
2020-03-25 23:48:42 +08:00
|
|
|
#include <linux/part_stat.h>
|
2017-02-04 08:27:20 +08:00
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
#include <generated/utsrelease.h>
|
2016-09-02 03:45:03 +08:00
|
|
|
#include <asm/unaligned.h>
|
2016-06-22 00:04:20 +08:00
|
|
|
#include "nvmet.h"
|
|
|
|
|
|
|
|
u32 nvmet_get_log_page_len(struct nvme_command *cmd)
|
|
|
|
{
|
|
|
|
u32 len = le16_to_cpu(cmd->get_log_page.numdu);
|
|
|
|
|
|
|
|
len <<= 16;
|
|
|
|
len += le16_to_cpu(cmd->get_log_page.numdl);
|
|
|
|
/* NUMD is a 0's based value */
|
|
|
|
len += 1;
|
|
|
|
len *= sizeof(u32);
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2020-01-08 00:47:24 +08:00
|
|
|
static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
|
|
|
|
{
|
|
|
|
switch (cdw10 & 0xff) {
|
|
|
|
case NVME_FEAT_HOST_ID:
|
|
|
|
return sizeof(req->sq->ctrl->hostid);
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-10 00:03:59 +08:00
|
|
|
u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
|
|
|
|
{
|
|
|
|
return le64_to_cpu(cmd->get_log_page.lpo);
|
|
|
|
}
|
|
|
|
|
2018-05-22 17:10:03 +08:00
|
|
|
static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
|
|
|
|
{
|
2019-10-24 00:35:44 +08:00
|
|
|
nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
|
2018-05-22 17:10:03 +08:00
|
|
|
}
|
|
|
|
|
2018-12-13 07:11:47 +08:00
|
|
|
static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
|
|
|
unsigned long flags;
|
|
|
|
off_t offset = 0;
|
|
|
|
u64 slot;
|
|
|
|
u64 i;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ctrl->error_lock, flags);
|
|
|
|
slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
|
|
|
|
|
|
|
|
for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
|
2019-09-12 13:29:39 +08:00
|
|
|
if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
|
|
|
|
sizeof(struct nvme_error_slot)))
|
2018-12-13 07:11:47 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
if (slot == 0)
|
|
|
|
slot = NVMET_ERROR_LOG_SLOTS - 1;
|
|
|
|
else
|
|
|
|
slot--;
|
|
|
|
offset += sizeof(struct nvme_error_slot);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&ctrl->error_lock, flags);
|
2019-09-12 13:29:39 +08:00
|
|
|
nvmet_req_complete(req, 0);
|
2018-12-13 07:11:47 +08:00
|
|
|
}
|
|
|
|
|
2016-09-02 03:45:03 +08:00
|
|
|
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
|
|
|
|
struct nvme_smart_log *slog)
|
|
|
|
{
|
|
|
|
u64 host_reads, host_writes, data_units_read, data_units_written;
|
2021-02-10 13:47:54 +08:00
|
|
|
u16 status;
|
2016-09-02 03:45:03 +08:00
|
|
|
|
2021-02-10 13:47:54 +08:00
|
|
|
status = nvmet_req_find_ns(req);
|
|
|
|
if (status)
|
|
|
|
return status;
|
2016-09-02 03:45:03 +08:00
|
|
|
|
2018-05-23 12:34:39 +08:00
|
|
|
/* we don't have the right data for file backed ns */
|
2021-01-14 09:33:52 +08:00
|
|
|
if (!req->ns->bdev)
|
|
|
|
return NVME_SC_SUCCESS;
|
2018-05-23 12:34:39 +08:00
|
|
|
|
2021-01-14 09:33:52 +08:00
|
|
|
host_reads = part_stat_read(req->ns->bdev, ios[READ]);
|
2020-11-24 16:36:54 +08:00
|
|
|
data_units_read =
|
2021-01-14 09:33:52 +08:00
|
|
|
DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
|
|
|
|
host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
|
2020-11-24 16:36:54 +08:00
|
|
|
data_units_written =
|
2021-01-14 09:33:52 +08:00
|
|
|
DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
|
2016-09-02 03:45:03 +08:00
|
|
|
|
|
|
|
put_unaligned_le64(host_reads, &slog->host_reads[0]);
|
|
|
|
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
|
|
|
|
put_unaligned_le64(host_writes, &slog->host_writes[0]);
|
|
|
|
put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
|
2017-11-08 18:00:30 +08:00
|
|
|
|
|
|
|
return NVME_SC_SUCCESS;
|
2016-09-02 03:45:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
|
|
|
|
struct nvme_smart_log *slog)
|
|
|
|
{
|
|
|
|
u64 host_reads = 0, host_writes = 0;
|
|
|
|
u64 data_units_read = 0, data_units_written = 0;
|
|
|
|
struct nvmet_ns *ns;
|
|
|
|
struct nvmet_ctrl *ctrl;
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-20 11:32:02 +08:00
|
|
|
unsigned long idx;
|
2016-09-02 03:45:03 +08:00
|
|
|
|
|
|
|
ctrl = req->sq->ctrl;
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-20 11:32:02 +08:00
|
|
|
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
|
2018-05-23 12:34:39 +08:00
|
|
|
/* we don't have the right data for file backed ns */
|
|
|
|
if (!ns->bdev)
|
|
|
|
continue;
|
2020-11-24 16:36:54 +08:00
|
|
|
host_reads += part_stat_read(ns->bdev, ios[READ]);
|
2019-08-08 10:22:36 +08:00
|
|
|
data_units_read += DIV_ROUND_UP(
|
2020-11-24 16:36:54 +08:00
|
|
|
part_stat_read(ns->bdev, sectors[READ]), 1000);
|
|
|
|
host_writes += part_stat_read(ns->bdev, ios[WRITE]);
|
2019-08-08 10:22:36 +08:00
|
|
|
data_units_written += DIV_ROUND_UP(
|
2020-11-24 16:36:54 +08:00
|
|
|
part_stat_read(ns->bdev, sectors[WRITE]), 1000);
|
2016-09-02 03:45:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
put_unaligned_le64(host_reads, &slog->host_reads[0]);
|
|
|
|
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
|
|
|
|
put_unaligned_le64(host_writes, &slog->host_writes[0]);
|
|
|
|
put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
|
|
|
|
|
2017-11-08 18:00:30 +08:00
|
|
|
return NVME_SC_SUCCESS;
|
2016-09-02 03:45:03 +08:00
|
|
|
}
|
|
|
|
|
2018-05-22 17:10:03 +08:00
|
|
|
static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
|
2016-06-22 00:04:20 +08:00
|
|
|
{
|
2018-05-22 17:10:03 +08:00
|
|
|
struct nvme_smart_log *log;
|
|
|
|
u16 status = NVME_SC_INTERNAL;
|
2018-12-13 07:11:48 +08:00
|
|
|
unsigned long flags;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2019-10-24 00:35:44 +08:00
|
|
|
if (req->transfer_len != sizeof(*log))
|
2016-06-22 00:04:20 +08:00
|
|
|
goto out;
|
|
|
|
|
2018-05-22 17:10:03 +08:00
|
|
|
log = kzalloc(sizeof(*log), GFP_KERNEL);
|
|
|
|
if (!log)
|
|
|
|
goto out;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2018-05-22 17:10:03 +08:00
|
|
|
if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
|
|
|
|
status = nvmet_get_smart_log_all(req, log);
|
|
|
|
else
|
|
|
|
status = nvmet_get_smart_log_nsid(req, log);
|
|
|
|
if (status)
|
2018-06-11 15:20:24 +08:00
|
|
|
goto out_free_log;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2018-12-13 07:11:48 +08:00
|
|
|
spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
|
|
|
|
put_unaligned_le64(req->sq->ctrl->err_counter,
|
|
|
|
&log->num_err_log_entries);
|
|
|
|
spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
|
|
|
|
|
2018-05-22 17:10:03 +08:00
|
|
|
status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
|
2018-06-11 15:20:24 +08:00
|
|
|
out_free_log:
|
|
|
|
kfree(log);
|
2016-06-22 00:04:20 +08:00
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2021-06-10 09:32:51 +08:00
|
|
|
static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
|
2018-06-12 01:40:07 +08:00
|
|
|
{
|
|
|
|
log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
|
|
|
|
log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
|
|
|
|
log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
|
|
|
|
log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
|
|
|
|
log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
|
|
|
|
log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
|
|
|
|
log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
|
|
|
|
|
|
|
|
log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
|
|
|
|
log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
|
|
|
|
log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
|
|
|
|
log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
|
|
|
|
log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
|
2021-06-10 09:32:51 +08:00
|
|
|
}
|
2018-06-12 01:40:07 +08:00
|
|
|
|
2021-06-10 09:32:52 +08:00
|
|
|
static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
|
|
|
|
{
|
|
|
|
log->iocs[nvme_cmd_zone_append] = cpu_to_le32(1 << 0);
|
|
|
|
log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(1 << 0);
|
|
|
|
log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(1 << 0);
|
|
|
|
}
|
|
|
|
|
2021-06-10 09:32:51 +08:00
|
|
|
static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvme_effects_log *log;
|
|
|
|
u16 status = NVME_SC_SUCCESS;
|
2018-06-12 01:40:07 +08:00
|
|
|
|
2021-06-10 09:32:51 +08:00
|
|
|
log = kzalloc(sizeof(*log), GFP_KERNEL);
|
|
|
|
if (!log) {
|
|
|
|
status = NVME_SC_INTERNAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (req->cmd->get_log_page.csi) {
|
|
|
|
case NVME_CSI_NVM:
|
|
|
|
nvmet_get_cmd_effects_nvm(log);
|
|
|
|
break;
|
2021-06-10 09:32:52 +08:00
|
|
|
case NVME_CSI_ZNS:
|
|
|
|
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
|
|
|
|
status = NVME_SC_INVALID_IO_CMD_SET;
|
|
|
|
goto free;
|
|
|
|
}
|
|
|
|
nvmet_get_cmd_effects_nvm(log);
|
|
|
|
nvmet_get_cmd_effects_zns(log);
|
|
|
|
break;
|
2021-06-10 09:32:51 +08:00
|
|
|
default:
|
|
|
|
status = NVME_SC_INVALID_LOG_PAGE;
|
|
|
|
goto free;
|
|
|
|
}
|
|
|
|
|
|
|
|
status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
|
|
|
|
free:
|
2018-06-12 01:40:07 +08:00
|
|
|
kfree(log);
|
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2018-05-25 23:16:09 +08:00
|
|
|
static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
|
|
|
u16 status = NVME_SC_INTERNAL;
|
|
|
|
size_t len;
|
|
|
|
|
2019-10-24 00:35:44 +08:00
|
|
|
if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
|
2018-05-25 23:16:09 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
mutex_lock(&ctrl->lock);
|
|
|
|
if (ctrl->nr_changed_ns == U32_MAX)
|
|
|
|
len = sizeof(__le32);
|
|
|
|
else
|
|
|
|
len = ctrl->nr_changed_ns * sizeof(__le32);
|
|
|
|
status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
|
|
|
|
if (!status)
|
2019-10-24 00:35:44 +08:00
|
|
|
status = nvmet_zero_sgl(req, len, req->transfer_len - len);
|
2018-05-25 23:16:09 +08:00
|
|
|
ctrl->nr_changed_ns = 0;
|
2018-11-13 05:56:34 +08:00
|
|
|
nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
|
2018-05-25 23:16:09 +08:00
|
|
|
mutex_unlock(&ctrl->lock);
|
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2018-07-19 22:35:20 +08:00
|
|
|
static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
|
|
|
|
struct nvme_ana_group_desc *desc)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
|
|
|
struct nvmet_ns *ns;
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-20 11:32:02 +08:00
|
|
|
unsigned long idx;
|
2018-07-19 22:35:20 +08:00
|
|
|
u32 count = 0;
|
|
|
|
|
|
|
|
if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-20 11:32:02 +08:00
|
|
|
xa_for_each(&ctrl->subsys->namespaces, idx, ns)
|
2018-07-19 22:35:20 +08:00
|
|
|
if (ns->anagrpid == grpid)
|
|
|
|
desc->nsids[count++] = cpu_to_le32(ns->nsid);
|
|
|
|
}
|
|
|
|
|
|
|
|
desc->grpid = cpu_to_le32(grpid);
|
|
|
|
desc->nnsids = cpu_to_le32(count);
|
|
|
|
desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
|
|
|
|
desc->state = req->port->ana_state[grpid];
|
|
|
|
memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
|
2021-10-25 01:29:21 +08:00
|
|
|
return struct_size(desc, nsids, count);
|
2018-07-19 22:35:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvme_ana_rsp_hdr hdr = { 0, };
|
|
|
|
struct nvme_ana_group_desc *desc;
|
|
|
|
size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
|
|
|
|
size_t len;
|
|
|
|
u32 grpid;
|
|
|
|
u16 ngrps = 0;
|
|
|
|
u16 status;
|
|
|
|
|
|
|
|
status = NVME_SC_INTERNAL;
|
2021-10-17 17:56:50 +08:00
|
|
|
desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
|
|
|
|
GFP_KERNEL);
|
2018-07-19 22:35:20 +08:00
|
|
|
if (!desc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
down_read(&nvmet_ana_sem);
|
|
|
|
for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
|
|
|
|
if (!nvmet_ana_group_enabled[grpid])
|
|
|
|
continue;
|
|
|
|
len = nvmet_format_ana_group(req, grpid, desc);
|
|
|
|
status = nvmet_copy_to_sgl(req, offset, desc, len);
|
|
|
|
if (status)
|
|
|
|
break;
|
|
|
|
offset += len;
|
|
|
|
ngrps++;
|
|
|
|
}
|
2018-07-16 18:58:33 +08:00
|
|
|
for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
|
|
|
|
if (nvmet_ana_group_enabled[grpid])
|
|
|
|
ngrps++;
|
|
|
|
}
|
2018-07-19 22:35:20 +08:00
|
|
|
|
|
|
|
hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
|
|
|
|
hdr.ngrps = cpu_to_le16(ngrps);
|
2018-11-13 05:56:34 +08:00
|
|
|
nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
|
2018-07-19 22:35:20 +08:00
|
|
|
up_read(&nvmet_ana_sem);
|
|
|
|
|
|
|
|
kfree(desc);
|
|
|
|
|
|
|
|
/* copy the header last once we know the number of groups */
|
|
|
|
status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
|
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2019-10-24 00:35:41 +08:00
|
|
|
static void nvmet_execute_get_log_page(struct nvmet_req *req)
|
|
|
|
{
|
2020-05-19 22:05:59 +08:00
|
|
|
if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
|
2019-10-24 00:35:44 +08:00
|
|
|
return;
|
|
|
|
|
2019-10-24 00:35:41 +08:00
|
|
|
switch (req->cmd->get_log_page.lid) {
|
|
|
|
case NVME_LOG_ERROR:
|
|
|
|
return nvmet_execute_get_log_page_error(req);
|
|
|
|
case NVME_LOG_SMART:
|
|
|
|
return nvmet_execute_get_log_page_smart(req);
|
|
|
|
case NVME_LOG_FW_SLOT:
|
|
|
|
/*
|
|
|
|
* We only support a single firmware slot which always is
|
|
|
|
* active, so we can zero out the whole firmware slot log and
|
|
|
|
* still claim to fully implement this mandatory log page.
|
|
|
|
*/
|
|
|
|
return nvmet_execute_get_log_page_noop(req);
|
|
|
|
case NVME_LOG_CHANGED_NS:
|
|
|
|
return nvmet_execute_get_log_changed_ns(req);
|
|
|
|
case NVME_LOG_CMD_EFFECTS:
|
|
|
|
return nvmet_execute_get_log_cmd_effects_ns(req);
|
|
|
|
case NVME_LOG_ANA:
|
|
|
|
return nvmet_execute_get_log_page_ana(req);
|
|
|
|
}
|
2021-04-29 12:25:58 +08:00
|
|
|
pr_debug("unhandled lid %d on qid %d\n",
|
2019-10-24 00:35:41 +08:00
|
|
|
req->cmd->get_log_page.lid, req->sq->qid);
|
|
|
|
req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
|
|
|
|
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
|
|
|
|
}
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
2021-02-18 01:19:40 +08:00
|
|
|
struct nvmet_subsys *subsys = ctrl->subsys;
|
2016-06-22 00:04:20 +08:00
|
|
|
struct nvme_id_ctrl *id;
|
2020-05-19 22:06:01 +08:00
|
|
|
u32 cmd_capsule_size;
|
2016-06-22 00:04:20 +08:00
|
|
|
u16 status = 0;
|
|
|
|
|
2021-06-07 17:23:22 +08:00
|
|
|
if (!subsys->subsys_discovered) {
|
|
|
|
mutex_lock(&subsys->lock);
|
|
|
|
subsys->subsys_discovered = true;
|
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
}
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
id = kzalloc(sizeof(*id), GFP_KERNEL);
|
|
|
|
if (!id) {
|
|
|
|
status = NVME_SC_INTERNAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX: figure out how to assign real vendors IDs. */
|
|
|
|
id->vid = 0;
|
|
|
|
id->ssvid = 0;
|
|
|
|
|
2021-06-07 17:23:21 +08:00
|
|
|
memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
|
2021-02-18 01:19:40 +08:00
|
|
|
memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
|
|
|
|
strlen(subsys->model_number), ' ');
|
2017-08-15 04:12:39 +08:00
|
|
|
memcpy_and_pad(id->fr, sizeof(id->fr),
|
|
|
|
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
|
2016-06-22 00:04:20 +08:00
|
|
|
|
|
|
|
id->rab = 6;
|
|
|
|
|
2021-09-22 14:35:22 +08:00
|
|
|
if (nvmet_is_disc_subsys(ctrl->subsys))
|
|
|
|
id->cntrltype = NVME_CTRL_DISC;
|
|
|
|
else
|
|
|
|
id->cntrltype = NVME_CTRL_IO;
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
/*
|
|
|
|
* XXX: figure out how we can assign a IEEE OUI, but until then
|
|
|
|
* the safest is to leave it as zeroes.
|
|
|
|
*/
|
|
|
|
|
2018-07-19 22:35:20 +08:00
|
|
|
/* we support multiple ports, multiples hosts and ANA: */
|
2021-09-23 18:17:44 +08:00
|
|
|
id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
|
|
|
|
NVME_CTRL_CMIC_ANA;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2020-03-08 18:55:03 +08:00
|
|
|
/* Limit MDTS according to transport capability */
|
|
|
|
if (ctrl->ops->get_mdts)
|
|
|
|
id->mdts = ctrl->ops->get_mdts(ctrl);
|
|
|
|
else
|
|
|
|
id->mdts = 0;
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
id->cntlid = cpu_to_le16(ctrl->cntlid);
|
|
|
|
id->ver = cpu_to_le32(ctrl->subsys->ver);
|
|
|
|
|
|
|
|
/* XXX: figure out what to do about RTD3R/RTD3 */
|
2018-05-30 21:04:47 +08:00
|
|
|
id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
|
2018-11-03 01:28:13 +08:00
|
|
|
id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
|
|
|
|
NVME_CTRL_ATTR_TBKAS);
|
2016-06-22 00:04:20 +08:00
|
|
|
|
|
|
|
id->oacs = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't really have a practical limit on the number of abort
|
|
|
|
* comands. But we don't do anything useful for abort either, so
|
|
|
|
* no point in allowing more abort commands than the spec requires.
|
|
|
|
*/
|
|
|
|
id->acl = 3;
|
|
|
|
|
|
|
|
id->aerl = NVMET_ASYNC_EVENTS - 1;
|
|
|
|
|
|
|
|
/* first slot is read-only, only one slot supported */
|
|
|
|
id->frmw = (1 << 0) | (1 << 1);
|
2018-06-12 01:40:07 +08:00
|
|
|
id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
|
2016-06-22 00:04:20 +08:00
|
|
|
id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
|
|
|
|
id->npss = 0;
|
|
|
|
|
|
|
|
/* We support keep-alive timeout in granularity of seconds */
|
|
|
|
id->kas = cpu_to_le16(NVMET_KAS);
|
|
|
|
|
|
|
|
id->sqes = (0x6 << 4) | 0x6;
|
|
|
|
id->cqes = (0x4 << 4) | 0x4;
|
|
|
|
|
|
|
|
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
|
|
|
|
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
|
|
|
|
|
2021-06-21 11:01:09 +08:00
|
|
|
id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
|
2018-05-14 01:00:13 +08:00
|
|
|
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
|
2016-12-01 04:29:02 +08:00
|
|
|
id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
|
|
|
|
NVME_CTRL_ONCS_WRITE_ZEROES);
|
2016-06-22 00:04:20 +08:00
|
|
|
|
|
|
|
/* XXX: don't report vwc if the underlying device is write through */
|
|
|
|
id->vwc = NVME_CTRL_VWC_PRESENT;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can't support atomic writes bigger than a LBA without support
|
|
|
|
* from the backend device.
|
|
|
|
*/
|
|
|
|
id->awun = 0;
|
|
|
|
id->awupf = 0;
|
|
|
|
|
|
|
|
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
|
2020-06-02 21:15:46 +08:00
|
|
|
if (ctrl->ops->flags & NVMF_KEYED_SGLS)
|
2016-06-22 00:04:20 +08:00
|
|
|
id->sgls |= cpu_to_le32(1 << 2);
|
nvmet-rdma: support max(16KB, PAGE_SIZE) inline data
The patch enables inline data sizes using up to 4 recv sges, and capping
the size at 16KB or at least 1 page size. So on a 4K page system, up to
16KB is supported, and for a 64K page system 1 page of 64KB is supported.
We avoid > 0 order page allocations for the inline buffers by using
multiple recv sges, one for each page. If the device cannot support
the configured inline data size due to lack of enough recv sges, then
log a warning and reduce the inline size.
Add a new configfs port attribute, called param_inline_data_size,
to allow configuring the size of inline data for a given nvmf port.
The maximum size allowed is still enforced by nvmet-rdma with
NVMET_RDMA_MAX_INLINE_DATA_SIZE, which is now max(16KB, PAGE_SIZE).
And the default size, if not specified via configfs, is still PAGE_SIZE.
This preserves the existing behavior, but allows larger inline sizes
for small page systems. If the configured inline data size exceeds
NVMET_RDMA_MAX_INLINE_DATA_SIZE, a warning is logged and the size is
reduced. If param_inline_data_size is set to 0, then inline data is
disabled for that nvmf port.
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2018-06-20 22:15:10 +08:00
|
|
|
if (req->port->inline_data_size)
|
2016-06-22 00:04:20 +08:00
|
|
|
id->sgls |= cpu_to_le32(1 << 20);
|
|
|
|
|
2022-08-19 05:00:52 +08:00
|
|
|
strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2020-05-19 22:06:01 +08:00
|
|
|
/*
|
|
|
|
* Max command capsule size is sqe + in-capsule data size.
|
|
|
|
* Disable in-capsule data for Metadata capable controllers.
|
|
|
|
*/
|
|
|
|
cmd_capsule_size = sizeof(struct nvme_command);
|
|
|
|
if (!ctrl->pi_support)
|
|
|
|
cmd_capsule_size += req->port->inline_data_size;
|
|
|
|
id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
/* Max response capsule size is cqe */
|
|
|
|
id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
|
|
|
|
|
|
|
|
id->msdbd = ctrl->ops->msdbd;
|
|
|
|
|
2018-07-19 22:35:20 +08:00
|
|
|
id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
|
|
|
|
id->anatt = 10; /* random value */
|
|
|
|
id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
|
|
|
|
id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
/*
|
|
|
|
* Meh, we don't really support any power state. Fake up the same
|
|
|
|
* values that qemu does.
|
|
|
|
*/
|
|
|
|
id->psd[0].max_power = cpu_to_le16(0x9c4);
|
|
|
|
id->psd[0].entry_lat = cpu_to_le32(0x10);
|
|
|
|
id->psd[0].exit_lat = cpu_to_le32(0x4);
|
|
|
|
|
2018-08-08 14:01:07 +08:00
|
|
|
id->nwpc = 1 << 0; /* write protect and no write protect */
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
|
|
|
|
|
|
|
|
kfree(id);
|
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvme_id_ns *id;
|
2021-02-10 13:47:54 +08:00
|
|
|
u16 status;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2018-06-01 00:23:48 +08:00
|
|
|
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
|
2018-12-13 07:11:46 +08:00
|
|
|
req->error_loc = offsetof(struct nvme_identify, nsid);
|
2016-06-22 00:04:20 +08:00
|
|
|
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
id = kzalloc(sizeof(*id), GFP_KERNEL);
|
|
|
|
if (!id) {
|
|
|
|
status = NVME_SC_INTERNAL;
|
2018-06-01 00:23:48 +08:00
|
|
|
goto out;
|
2016-06-22 00:04:20 +08:00
|
|
|
}
|
|
|
|
|
2018-06-01 00:23:48 +08:00
|
|
|
/* return an all zeroed buffer if we can't find an active namespace */
|
2021-02-10 13:47:54 +08:00
|
|
|
status = nvmet_req_find_ns(req);
|
|
|
|
if (status) {
|
2021-02-10 13:47:52 +08:00
|
|
|
status = 0;
|
2018-06-01 00:23:48 +08:00
|
|
|
goto done;
|
2021-01-14 09:33:51 +08:00
|
|
|
}
|
2018-06-01 00:23:48 +08:00
|
|
|
|
2022-03-15 15:13:04 +08:00
|
|
|
if (nvmet_ns_revalidate(req->ns)) {
|
|
|
|
mutex_lock(&req->ns->subsys->lock);
|
|
|
|
nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
|
|
|
|
mutex_unlock(&req->ns->subsys->lock);
|
|
|
|
}
|
2020-04-20 07:48:50 +08:00
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
/*
|
2017-11-07 20:10:22 +08:00
|
|
|
* nuse = ncap = nsze isn't always true, but we have no way to find
|
2016-06-22 00:04:20 +08:00
|
|
|
* that out from the underlying device.
|
|
|
|
*/
|
2021-01-14 09:33:54 +08:00
|
|
|
id->ncap = id->nsze =
|
|
|
|
cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
|
|
|
|
switch (req->port->ana_state[req->ns->anagrpid]) {
|
2018-07-19 22:35:20 +08:00
|
|
|
case NVME_ANA_INACCESSIBLE:
|
|
|
|
case NVME_ANA_PERSISTENT_LOSS:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
id->nuse = id->nsze;
|
|
|
|
break;
|
2021-02-25 09:56:42 +08:00
|
|
|
}
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2021-01-14 09:33:54 +08:00
|
|
|
if (req->ns->bdev)
|
|
|
|
nvmet_bdev_set_limits(req->ns->bdev, id);
|
2019-06-29 00:53:30 +08:00
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
/*
|
|
|
|
* We just provide a single LBA format that matches what the
|
|
|
|
* underlying device reports.
|
|
|
|
*/
|
|
|
|
id->nlbaf = 0;
|
|
|
|
id->flbas = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Our namespace might always be shared. Not just with other
|
|
|
|
* controllers, but also with any other user of the block device.
|
|
|
|
*/
|
2021-09-23 18:17:43 +08:00
|
|
|
id->nmic = NVME_NS_NMIC_SHARED;
|
2021-01-14 09:33:54 +08:00
|
|
|
id->anagrpid = cpu_to_le32(req->ns->anagrpid);
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2021-01-14 09:33:54 +08:00
|
|
|
memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2021-01-14 09:33:54 +08:00
|
|
|
id->lbaf[0].ds = req->ns->blksize_shift;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2021-02-10 13:47:55 +08:00
|
|
|
if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
|
2020-05-19 22:06:01 +08:00
|
|
|
id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
|
|
|
|
NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
|
|
|
|
NVME_NS_DPC_PI_TYPE3;
|
|
|
|
id->mc = NVME_MC_EXTENDED_LBA;
|
2021-01-14 09:33:54 +08:00
|
|
|
id->dps = req->ns->pi_type;
|
2020-05-19 22:06:01 +08:00
|
|
|
id->flbas = NVME_NS_FLBAS_META_EXT;
|
2021-01-14 09:33:54 +08:00
|
|
|
id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
|
2020-05-19 22:06:01 +08:00
|
|
|
}
|
|
|
|
|
2021-01-14 09:33:54 +08:00
|
|
|
if (req->ns->readonly)
|
2018-08-08 14:01:07 +08:00
|
|
|
id->nsattr |= (1 << 0);
|
2018-06-01 00:23:48 +08:00
|
|
|
done:
|
2021-01-14 09:33:51 +08:00
|
|
|
if (!status)
|
|
|
|
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
kfree(id);
|
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_execute_identify_nslist(struct nvmet_req *req)
|
|
|
|
{
|
2017-06-07 17:45:29 +08:00
|
|
|
static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
|
2016-06-22 00:04:20 +08:00
|
|
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
|
|
|
struct nvmet_ns *ns;
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-20 11:32:02 +08:00
|
|
|
unsigned long idx;
|
2016-06-22 00:04:20 +08:00
|
|
|
u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
|
|
|
|
__le32 *list;
|
|
|
|
u16 status = 0;
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
list = kzalloc(buf_size, GFP_KERNEL);
|
|
|
|
if (!list) {
|
|
|
|
status = NVME_SC_INTERNAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-20 11:32:02 +08:00
|
|
|
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
|
2016-06-22 00:04:20 +08:00
|
|
|
if (ns->nsid <= min_nsid)
|
|
|
|
continue;
|
|
|
|
list[i++] = cpu_to_le32(ns->nsid);
|
|
|
|
if (i == buf_size / sizeof(__le32))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
status = nvmet_copy_to_sgl(req, 0, list, buf_size);
|
|
|
|
|
|
|
|
kfree(list);
|
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2017-06-07 17:45:32 +08:00
|
|
|
static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
|
|
|
|
void *id, off_t *off)
|
|
|
|
{
|
|
|
|
struct nvme_ns_id_desc desc = {
|
|
|
|
.nidt = type,
|
|
|
|
.nidl = len,
|
|
|
|
};
|
|
|
|
u16 status;
|
|
|
|
|
|
|
|
status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
*off += sizeof(desc);
|
|
|
|
|
|
|
|
status = nvmet_copy_to_sgl(req, *off, id, len);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
*off += len;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_execute_identify_desclist(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
off_t off = 0;
|
2021-02-10 13:47:54 +08:00
|
|
|
u16 status;
|
2017-06-07 17:45:32 +08:00
|
|
|
|
2021-02-10 13:47:54 +08:00
|
|
|
status = nvmet_req_find_ns(req);
|
|
|
|
if (status)
|
2017-06-07 17:45:32 +08:00
|
|
|
goto out;
|
|
|
|
|
2021-01-14 09:33:53 +08:00
|
|
|
if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
|
2017-06-07 17:45:32 +08:00
|
|
|
status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
|
|
|
|
NVME_NIDT_UUID_LEN,
|
2021-01-14 09:33:53 +08:00
|
|
|
&req->ns->uuid, &off);
|
2017-06-07 17:45:32 +08:00
|
|
|
if (status)
|
2021-01-14 09:33:53 +08:00
|
|
|
goto out;
|
2017-06-07 17:45:32 +08:00
|
|
|
}
|
2021-01-14 09:33:53 +08:00
|
|
|
if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
|
2017-06-07 17:45:32 +08:00
|
|
|
status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
|
|
|
|
NVME_NIDT_NGUID_LEN,
|
2021-01-14 09:33:53 +08:00
|
|
|
&req->ns->nguid, &off);
|
2017-06-07 17:45:32 +08:00
|
|
|
if (status)
|
2021-01-14 09:33:53 +08:00
|
|
|
goto out;
|
2017-06-07 17:45:32 +08:00
|
|
|
}
|
|
|
|
|
2021-06-10 09:32:51 +08:00
|
|
|
status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
|
|
|
|
NVME_NIDT_CSI_LEN,
|
|
|
|
&req->ns->csi, &off);
|
|
|
|
if (status)
|
|
|
|
goto out;
|
|
|
|
|
2017-06-07 17:45:32 +08:00
|
|
|
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
|
|
|
|
off) != NVME_IDENTIFY_DATA_SIZE - off)
|
|
|
|
status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
2021-01-14 09:33:53 +08:00
|
|
|
|
2017-06-07 17:45:32 +08:00
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2021-06-10 09:32:51 +08:00
|
|
|
static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
switch (req->cmd->identify.csi) {
|
|
|
|
case NVME_CSI_NVM:
|
|
|
|
nvmet_execute_identify_desclist(req);
|
|
|
|
return true;
|
2021-06-10 09:32:52 +08:00
|
|
|
case NVME_CSI_ZNS:
|
|
|
|
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
|
|
|
|
nvmet_execute_identify_desclist(req);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
2021-06-10 09:32:51 +08:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-24 00:35:41 +08:00
|
|
|
static void nvmet_execute_identify(struct nvmet_req *req)
|
|
|
|
{
|
2020-05-19 22:05:59 +08:00
|
|
|
if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
|
2019-10-24 00:35:44 +08:00
|
|
|
return;
|
|
|
|
|
2019-10-24 00:35:41 +08:00
|
|
|
switch (req->cmd->identify.cns) {
|
|
|
|
case NVME_ID_CNS_NS:
|
2021-06-10 09:32:51 +08:00
|
|
|
switch (req->cmd->identify.csi) {
|
|
|
|
case NVME_CSI_NVM:
|
|
|
|
return nvmet_execute_identify_ns(req);
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
2021-06-10 09:32:52 +08:00
|
|
|
case NVME_ID_CNS_CS_NS:
|
|
|
|
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
|
|
|
|
switch (req->cmd->identify.csi) {
|
|
|
|
case NVME_CSI_ZNS:
|
|
|
|
return nvmet_execute_identify_cns_cs_ns(req);
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2019-10-24 00:35:41 +08:00
|
|
|
case NVME_ID_CNS_CTRL:
|
2021-06-10 09:32:51 +08:00
|
|
|
switch (req->cmd->identify.csi) {
|
|
|
|
case NVME_CSI_NVM:
|
|
|
|
return nvmet_execute_identify_ctrl(req);
|
|
|
|
}
|
|
|
|
break;
|
2021-06-10 09:32:52 +08:00
|
|
|
case NVME_ID_CNS_CS_CTRL:
|
|
|
|
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
|
|
|
|
switch (req->cmd->identify.csi) {
|
|
|
|
case NVME_CSI_ZNS:
|
|
|
|
return nvmet_execute_identify_cns_cs_ctrl(req);
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2019-10-24 00:35:41 +08:00
|
|
|
case NVME_ID_CNS_NS_ACTIVE_LIST:
|
2021-06-10 09:32:51 +08:00
|
|
|
switch (req->cmd->identify.csi) {
|
|
|
|
case NVME_CSI_NVM:
|
|
|
|
return nvmet_execute_identify_nslist(req);
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
2019-10-24 00:35:41 +08:00
|
|
|
case NVME_ID_CNS_NS_DESC_LIST:
|
2021-06-10 09:32:51 +08:00
|
|
|
if (nvmet_handle_identify_desclist(req) == true)
|
|
|
|
return;
|
|
|
|
break;
|
2019-10-24 00:35:41 +08:00
|
|
|
}
|
|
|
|
|
2021-06-10 09:32:49 +08:00
|
|
|
nvmet_req_cns_error_complete(req);
|
2019-10-24 00:35:41 +08:00
|
|
|
}
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
/*
|
2017-11-07 20:10:22 +08:00
|
|
|
* A "minimum viable" abort implementation: the command is mandatory in the
|
2016-06-22 00:04:20 +08:00
|
|
|
* spec, but we are not required to do any useful work. We couldn't really
|
|
|
|
* do a useful abort, so don't bother even with waiting for the command
|
|
|
|
* to be exectuted and return immediately telling the command to abort
|
|
|
|
* wasn't found.
|
|
|
|
*/
|
|
|
|
static void nvmet_execute_abort(struct nvmet_req *req)
|
|
|
|
{
|
2020-05-19 22:05:59 +08:00
|
|
|
if (!nvmet_check_transfer_len(req, 0))
|
2019-10-24 00:35:44 +08:00
|
|
|
return;
|
2016-06-22 00:04:20 +08:00
|
|
|
nvmet_set_result(req, 1);
|
|
|
|
nvmet_req_complete(req, 0);
|
|
|
|
}
|
|
|
|
|
2018-08-08 14:01:07 +08:00
|
|
|
static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
u16 status;
|
|
|
|
|
|
|
|
if (req->ns->file)
|
|
|
|
status = nvmet_file_flush(req);
|
|
|
|
else
|
|
|
|
status = nvmet_bdev_flush(req);
|
|
|
|
|
|
|
|
if (status)
|
|
|
|
pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
|
|
|
|
{
|
2018-12-13 07:11:37 +08:00
|
|
|
u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
|
2021-02-10 13:48:01 +08:00
|
|
|
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
|
2021-02-10 13:47:54 +08:00
|
|
|
u16 status;
|
2018-08-08 14:01:07 +08:00
|
|
|
|
2021-02-10 13:47:54 +08:00
|
|
|
status = nvmet_req_find_ns(req);
|
|
|
|
if (status)
|
|
|
|
return status;
|
2018-08-08 14:01:07 +08:00
|
|
|
|
|
|
|
mutex_lock(&subsys->lock);
|
|
|
|
switch (write_protect) {
|
|
|
|
case NVME_NS_WRITE_PROTECT:
|
|
|
|
req->ns->readonly = true;
|
|
|
|
status = nvmet_write_protect_flush_sync(req);
|
|
|
|
if (status)
|
|
|
|
req->ns->readonly = false;
|
|
|
|
break;
|
|
|
|
case NVME_NS_NO_WRITE_PROTECT:
|
|
|
|
req->ns->readonly = false;
|
|
|
|
status = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!status)
|
|
|
|
nvmet_ns_changed(subsys, req->ns->nsid);
|
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2018-11-13 05:56:36 +08:00
|
|
|
u16 nvmet_set_feat_kato(struct nvmet_req *req)
|
|
|
|
{
|
2018-12-13 07:11:37 +08:00
|
|
|
u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
|
2018-11-13 05:56:36 +08:00
|
|
|
|
2020-09-17 01:47:20 +08:00
|
|
|
nvmet_stop_keep_alive_timer(req->sq->ctrl);
|
2018-11-13 05:56:36 +08:00
|
|
|
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
|
2020-09-17 01:47:20 +08:00
|
|
|
nvmet_start_keep_alive_timer(req->sq->ctrl);
|
2018-11-13 05:56:36 +08:00
|
|
|
|
|
|
|
nvmet_set_result(req, req->sq->ctrl->kato);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
|
|
|
|
{
|
2018-12-13 07:11:37 +08:00
|
|
|
u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
|
2018-11-13 05:56:36 +08:00
|
|
|
|
2018-12-13 07:11:46 +08:00
|
|
|
if (val32 & ~mask) {
|
|
|
|
req->error_loc = offsetof(struct nvme_common_command, cdw11);
|
2018-11-13 05:56:36 +08:00
|
|
|
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
2018-12-13 07:11:46 +08:00
|
|
|
}
|
2018-11-13 05:56:36 +08:00
|
|
|
|
|
|
|
WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
|
|
|
|
nvmet_set_result(req, val32);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-25 01:25:17 +08:00
|
|
|
void nvmet_execute_set_features(struct nvmet_req *req)
|
2016-06-22 00:04:20 +08:00
|
|
|
{
|
2021-02-10 13:48:01 +08:00
|
|
|
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
|
2018-12-13 07:11:37 +08:00
|
|
|
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
|
2020-03-01 08:28:41 +08:00
|
|
|
u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
|
2016-06-22 00:04:20 +08:00
|
|
|
u16 status = 0;
|
2020-03-01 08:28:41 +08:00
|
|
|
u16 nsqr;
|
|
|
|
u16 ncqr;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2020-05-19 22:05:59 +08:00
|
|
|
if (!nvmet_check_transfer_len(req, 0))
|
2019-10-24 00:35:44 +08:00
|
|
|
return;
|
|
|
|
|
2017-08-30 20:22:59 +08:00
|
|
|
switch (cdw10 & 0xff) {
|
2016-06-22 00:04:20 +08:00
|
|
|
case NVME_FEAT_NUM_QUEUES:
|
2020-03-01 08:28:41 +08:00
|
|
|
ncqr = (cdw11 >> 16) & 0xffff;
|
|
|
|
nsqr = cdw11 & 0xffff;
|
|
|
|
if (ncqr == 0xffff || nsqr == 0xffff) {
|
|
|
|
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
|
|
|
break;
|
|
|
|
}
|
2016-06-22 00:04:20 +08:00
|
|
|
nvmet_set_result(req,
|
|
|
|
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_KATO:
|
2018-11-13 05:56:36 +08:00
|
|
|
status = nvmet_set_feat_kato(req);
|
2016-06-22 00:04:20 +08:00
|
|
|
break;
|
2018-05-30 21:04:47 +08:00
|
|
|
case NVME_FEAT_ASYNC_EVENT:
|
2018-11-13 05:56:36 +08:00
|
|
|
status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
|
2018-05-30 21:04:47 +08:00
|
|
|
break;
|
2017-08-30 20:22:59 +08:00
|
|
|
case NVME_FEAT_HOST_ID:
|
|
|
|
status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
|
|
|
|
break;
|
2018-08-08 14:01:07 +08:00
|
|
|
case NVME_FEAT_WRITE_PROTECT:
|
|
|
|
status = nvmet_set_feat_write_protect(req);
|
|
|
|
break;
|
2016-06-22 00:04:20 +08:00
|
|
|
default:
|
2018-12-13 07:11:46 +08:00
|
|
|
req->error_loc = offsetof(struct nvme_common_command, cdw10);
|
2016-06-22 00:04:20 +08:00
|
|
|
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2018-08-08 14:01:07 +08:00
|
|
|
static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
|
|
|
|
{
|
2021-02-10 13:48:01 +08:00
|
|
|
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
|
2018-08-08 14:01:07 +08:00
|
|
|
u32 result;
|
|
|
|
|
2021-02-10 13:47:54 +08:00
|
|
|
result = nvmet_req_find_ns(req);
|
|
|
|
if (result)
|
|
|
|
return result;
|
|
|
|
|
2018-08-08 14:01:07 +08:00
|
|
|
mutex_lock(&subsys->lock);
|
|
|
|
if (req->ns->readonly == true)
|
|
|
|
result = NVME_NS_WRITE_PROTECT;
|
|
|
|
else
|
|
|
|
result = NVME_NS_NO_WRITE_PROTECT;
|
|
|
|
nvmet_set_result(req, result);
|
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-13 05:56:36 +08:00
|
|
|
void nvmet_get_feat_kato(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
nvmet_set_result(req, req->sq->ctrl->kato * 1000);
|
|
|
|
}
|
|
|
|
|
|
|
|
void nvmet_get_feat_async_event(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
|
|
|
|
}
|
|
|
|
|
2020-07-25 01:25:17 +08:00
|
|
|
void nvmet_execute_get_features(struct nvmet_req *req)
|
2016-06-22 00:04:20 +08:00
|
|
|
{
|
2021-02-10 13:48:01 +08:00
|
|
|
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
|
2018-12-13 07:11:37 +08:00
|
|
|
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
|
2016-06-22 00:04:20 +08:00
|
|
|
u16 status = 0;
|
|
|
|
|
2020-05-19 22:05:59 +08:00
|
|
|
if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
|
2019-10-24 00:35:44 +08:00
|
|
|
return;
|
|
|
|
|
2017-08-30 20:22:59 +08:00
|
|
|
switch (cdw10 & 0xff) {
|
2016-06-22 00:04:20 +08:00
|
|
|
/*
|
|
|
|
* These features are mandatory in the spec, but we don't
|
|
|
|
* have a useful way to implement them. We'll eventually
|
|
|
|
* need to come up with some fake values for these.
|
|
|
|
*/
|
|
|
|
#if 0
|
|
|
|
case NVME_FEAT_ARBITRATION:
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_POWER_MGMT:
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_TEMP_THRESH:
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_ERR_RECOVERY:
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_IRQ_COALESCE:
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_IRQ_CONFIG:
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_WRITE_ATOMIC:
|
|
|
|
break;
|
2018-05-30 21:04:47 +08:00
|
|
|
#endif
|
2016-06-22 00:04:20 +08:00
|
|
|
case NVME_FEAT_ASYNC_EVENT:
|
2018-11-13 05:56:36 +08:00
|
|
|
nvmet_get_feat_async_event(req);
|
2016-06-22 00:04:20 +08:00
|
|
|
break;
|
|
|
|
case NVME_FEAT_VOLATILE_WC:
|
|
|
|
nvmet_set_result(req, 1);
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_NUM_QUEUES:
|
|
|
|
nvmet_set_result(req,
|
|
|
|
(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_KATO:
|
2018-11-13 05:56:36 +08:00
|
|
|
nvmet_get_feat_kato(req);
|
2016-06-22 00:04:20 +08:00
|
|
|
break;
|
2017-08-30 20:22:59 +08:00
|
|
|
case NVME_FEAT_HOST_ID:
|
|
|
|
/* need 128-bit host identifier flag */
|
2018-12-13 07:11:37 +08:00
|
|
|
if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
|
2018-12-13 07:11:46 +08:00
|
|
|
req->error_loc =
|
|
|
|
offsetof(struct nvme_common_command, cdw11);
|
2017-08-30 20:22:59 +08:00
|
|
|
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
|
|
|
|
sizeof(req->sq->ctrl->hostid));
|
|
|
|
break;
|
2018-08-08 14:01:07 +08:00
|
|
|
case NVME_FEAT_WRITE_PROTECT:
|
|
|
|
status = nvmet_get_feat_write_protect(req);
|
|
|
|
break;
|
2016-06-22 00:04:20 +08:00
|
|
|
default:
|
2018-12-13 07:11:46 +08:00
|
|
|
req->error_loc =
|
|
|
|
offsetof(struct nvme_common_command, cdw10);
|
2016-06-22 00:04:20 +08:00
|
|
|
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2018-11-13 05:56:36 +08:00
|
|
|
void nvmet_execute_async_event(struct nvmet_req *req)
|
2016-06-22 00:04:20 +08:00
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
|
|
|
|
2020-05-19 22:05:59 +08:00
|
|
|
if (!nvmet_check_transfer_len(req, 0))
|
2019-10-24 00:35:44 +08:00
|
|
|
return;
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
mutex_lock(&ctrl->lock);
|
|
|
|
if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
|
|
|
|
mutex_unlock(&ctrl->lock);
|
|
|
|
nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
|
|
|
|
mutex_unlock(&ctrl->lock);
|
|
|
|
|
2022-03-21 19:57:27 +08:00
|
|
|
queue_work(nvmet_wq, &ctrl->async_event_work);
|
2016-06-22 00:04:20 +08:00
|
|
|
}
|
|
|
|
|
2018-11-13 05:56:35 +08:00
|
|
|
void nvmet_execute_keep_alive(struct nvmet_req *req)
|
2016-06-22 00:04:20 +08:00
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
2021-04-16 10:45:21 +08:00
|
|
|
u16 status = 0;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2020-05-19 22:05:59 +08:00
|
|
|
if (!nvmet_check_transfer_len(req, 0))
|
2019-10-24 00:35:44 +08:00
|
|
|
return;
|
|
|
|
|
2021-04-16 10:45:21 +08:00
|
|
|
if (!ctrl->kato) {
|
|
|
|
status = NVME_SC_KA_TIMEOUT_INVALID;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
pr_debug("ctrl %d update keep-alive timer for %d secs\n",
|
|
|
|
ctrl->cntlid, ctrl->kato);
|
|
|
|
mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
|
2021-04-16 10:45:21 +08:00
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
2016-06-22 00:04:20 +08:00
|
|
|
}
|
|
|
|
|
2017-02-28 13:21:33 +08:00
|
|
|
u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
|
2016-06-22 00:04:20 +08:00
|
|
|
{
|
|
|
|
struct nvme_command *cmd = req->cmd;
|
2017-02-28 13:21:33 +08:00
|
|
|
u16 ret;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2019-10-25 21:38:58 +08:00
|
|
|
if (nvme_is_fabrics(cmd))
|
2022-06-27 17:52:04 +08:00
|
|
|
return nvmet_parse_fabrics_admin_cmd(req);
|
2022-06-27 17:52:05 +08:00
|
|
|
if (unlikely(!nvmet_check_auth_status(req)))
|
|
|
|
return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
|
2021-09-22 14:35:21 +08:00
|
|
|
if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
|
2019-10-25 21:38:58 +08:00
|
|
|
return nvmet_parse_discovery_cmd(req);
|
|
|
|
|
2021-02-25 09:56:40 +08:00
|
|
|
ret = nvmet_check_ctrl_status(req);
|
2017-02-28 13:21:33 +08:00
|
|
|
if (unlikely(ret))
|
|
|
|
return ret;
|
2016-06-22 00:04:20 +08:00
|
|
|
|
2021-08-27 14:11:12 +08:00
|
|
|
if (nvmet_is_passthru_req(req))
|
2020-07-25 01:25:17 +08:00
|
|
|
return nvmet_parse_passthru_admin_cmd(req);
|
|
|
|
|
2016-06-22 00:04:20 +08:00
|
|
|
switch (cmd->common.opcode) {
|
|
|
|
case nvme_admin_get_log_page:
|
2019-10-24 00:35:41 +08:00
|
|
|
req->execute = nvmet_execute_get_log_page;
|
|
|
|
return 0;
|
2016-06-22 00:04:20 +08:00
|
|
|
case nvme_admin_identify:
|
2019-10-24 00:35:41 +08:00
|
|
|
req->execute = nvmet_execute_identify;
|
|
|
|
return 0;
|
2016-06-22 00:04:20 +08:00
|
|
|
case nvme_admin_abort_cmd:
|
|
|
|
req->execute = nvmet_execute_abort;
|
|
|
|
return 0;
|
|
|
|
case nvme_admin_set_features:
|
|
|
|
req->execute = nvmet_execute_set_features;
|
|
|
|
return 0;
|
|
|
|
case nvme_admin_get_features:
|
|
|
|
req->execute = nvmet_execute_get_features;
|
|
|
|
return 0;
|
|
|
|
case nvme_admin_async_event:
|
|
|
|
req->execute = nvmet_execute_async_event;
|
|
|
|
return 0;
|
|
|
|
case nvme_admin_keep_alive:
|
|
|
|
req->execute = nvmet_execute_keep_alive;
|
|
|
|
return 0;
|
2021-05-11 03:15:37 +08:00
|
|
|
default:
|
|
|
|
return nvmet_report_invalid_opcode(req);
|
2016-06-22 00:04:20 +08:00
|
|
|
}
|
|
|
|
}
|