mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 06:34:12 +08:00
nvme updates for Linux 6.4
- drop redundant pci_enable_pcie_error_reporting (Bjorn Helgaas) - validate nvmet module parameters (Chaitanya Kulkarni) - fence TCP socket on receive error (Chris Leech) - fix async event trace event (Keith Busch) - minor cleanups (Chaitanya Kulkarni, zhenwei pi) - fix and cleanup nvmet Identify handling (Damien Le Moal, Christoph Hellwig) - fix double blk_mq_complete_request race in the timeout handler (Lei Yin) - fix irq locking in nvme-fcloop (Ming Lei) - remove queue mapping helper for rdma devices (Sagi Grimberg) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmQ44sALHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYMH7hAAxN15Sw3pkk1UBpDQDXjAqzftc1nJ/wBZabsQ9k3s Qoye1TRAnv/qt78wTzl660sj/tNnz8vMXSb9Wh5Vi+y+tNB3IN7JnVDpng9M6bsH /RNxLmUTiaa7sT/IhqU7dq7kxHL1aFWawsQwnnGQnXYOjc3RC/Hf25f59WKRDQic kAjsE55F6fpn/ry+DU8Ia8IPq22IUk56JONO01LpxGrfRgNC4P4hkpQJk7n2CFkd xBKntuCLDiLzRS5RVH8KcNOhhx/L6JRvl1xwkc/CRWt/DvGHfhbnTZ9e4Vn30XF4 3aCpBQu+CiNJPcpdiOD0CH0iOAio0o0klbOLmlo5Bg19Cw+ALqPIZrHU+UivJxw4 U1I4mkmB3ydHQlurVm4KemRih9PT/rw2cgTwogyhfNGw9rKjV/F2Exs6HFHIpP8X SgvomWXFSJ5saYswMoNIYvJHz+CISbq+XsLv0iBCAS7U3ZCqw4U5VkKLHH4hIYXG wjyGdGNwPE6JghCtHVkS4ZwSqkAwAaOWqdX3E4CzHYN6zn9nkPLurcwgfksgrnPP Z/Nzfz3Wwh7NzZlUyyFjUB4Iu80Up5zZZiz0ZQC+QiLVvy89weNPpnpN1vkd8dex hRKa2D0cfUyhpYzZssa/6CTHGOLYgpymUYGNitZtf0LKyhwgBSLOwcfk8XLxrFru U7E= =wI74 -----END PGP SIGNATURE----- Merge tag 'nvme-6.4-2023-04-14' of git://git.infradead.org/nvme into for-6.4/block Pull NVMe updates from Christoph: "nvme updates for Linux 6.4 - drop redundant pci_enable_pcie_error_reporting (Bjorn Helgaas) - validate nvmet module parameters (Chaitanya Kulkarni) - fence TCP socket on receive error (Chris Leech) - fix async event trace event (Keith Busch) - minor cleanups (Chaitanya Kulkarni, zhenwei pi) - fix and cleanup nvmet Identify handling (Damien Le Moal, Christoph Hellwig) - fix double blk_mq_complete_request race in the timeout handler (Lei Yin) - fix irq locking in nvme-fcloop (Ming Lei) - remove queue mapping helper for rdma devices (Sagi Grimberg)" * tag 'nvme-6.4-2023-04-14' of git://git.infradead.org/nvme: nvme-fcloop: fix "inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage" blk-mq-rdma: remove queue mapping helper for rdma devices nvme-rdma: minor cleanup in nvme_rdma_create_cq() nvme: fix double blk_mq_complete_request for timeout request with low probability nvme: fix async event trace event nvme-apple: return directly instead of else nvme-apple: return directly instead of else nvmet-tcp: validate idle poll modparam value nvmet-tcp: validate so_priority modparam value nvme-tcp: fence TCP socket on receive error nvmet: remove nvmet_req_cns_error_complete nvmet: rename nvmet_execute_identify_cns_cs_ns nvmet: fix Identify Identification Descriptor List handling nvmet: cleanup nvmet_execute_identify() nvmet: fix I/O Command Set specific Identify Controller nvmet: fix Identify Active Namespace ID list handling nvmet: fix Identify Controller handling nvmet: fix Identify Namespace handling nvmet: fix error handling in nvmet_execute_identify_cns_cs_ns() nvme-pci: drop redundant pci_enable_pcie_error_reporting()
This commit is contained in:
commit
d2a1d45ced
@ -215,11 +215,6 @@ config BLK_MQ_VIRTIO
|
|||||||
depends on VIRTIO
|
depends on VIRTIO
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config BLK_MQ_RDMA
|
|
||||||
bool
|
|
||||||
depends on INFINIBAND
|
|
||||||
default y
|
|
||||||
|
|
||||||
config BLK_PM
|
config BLK_PM
|
||||||
def_bool PM
|
def_bool PM
|
||||||
|
|
||||||
|
@ -30,7 +30,6 @@ obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o
|
|||||||
obj-$(CONFIG_BLK_DEV_INTEGRITY_T10) += t10-pi.o
|
obj-$(CONFIG_BLK_DEV_INTEGRITY_T10) += t10-pi.o
|
||||||
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
|
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
|
||||||
obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o
|
obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o
|
||||||
obj-$(CONFIG_BLK_MQ_RDMA) += blk-mq-rdma.o
|
|
||||||
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
|
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
|
||||||
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
|
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
|
||||||
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
|
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
|
||||||
|
@ -1,44 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
|
||||||
/*
|
|
||||||
* Copyright (c) 2017 Sagi Grimberg.
|
|
||||||
*/
|
|
||||||
#include <linux/blk-mq.h>
|
|
||||||
#include <linux/blk-mq-rdma.h>
|
|
||||||
#include <rdma/ib_verbs.h>
|
|
||||||
|
|
||||||
/**
|
|
||||||
* blk_mq_rdma_map_queues - provide a default queue mapping for rdma device
|
|
||||||
* @map: CPU to hardware queue map.
|
|
||||||
* @dev: rdma device to provide a mapping for.
|
|
||||||
* @first_vec: first interrupt vectors to use for queues (usually 0)
|
|
||||||
*
|
|
||||||
* This function assumes the rdma device @dev has at least as many available
|
|
||||||
* interrupt vetors as @set has queues. It will then query it's affinity mask
|
|
||||||
* and built queue mapping that maps a queue to the CPUs that have irq affinity
|
|
||||||
* for the corresponding vector.
|
|
||||||
*
|
|
||||||
* In case either the driver passed a @dev with less vectors than
|
|
||||||
* @set->nr_hw_queues, or @dev does not provide an affinity mask for a
|
|
||||||
* vector, we fallback to the naive mapping.
|
|
||||||
*/
|
|
||||||
void blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
|
|
||||||
struct ib_device *dev, int first_vec)
|
|
||||||
{
|
|
||||||
const struct cpumask *mask;
|
|
||||||
unsigned int queue, cpu;
|
|
||||||
|
|
||||||
for (queue = 0; queue < map->nr_queues; queue++) {
|
|
||||||
mask = ib_get_vector_affinity(dev, first_vec + queue);
|
|
||||||
if (!mask)
|
|
||||||
goto fallback;
|
|
||||||
|
|
||||||
for_each_cpu(cpu, mask)
|
|
||||||
map->mq_map[cpu] = map->queue_offset + queue;
|
|
||||||
}
|
|
||||||
|
|
||||||
return;
|
|
||||||
|
|
||||||
fallback:
|
|
||||||
blk_mq_map_queues(map);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
|
|
@ -209,16 +209,16 @@ static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
|
|||||||
{
|
{
|
||||||
if (q->is_adminq)
|
if (q->is_adminq)
|
||||||
return container_of(q, struct apple_nvme, adminq);
|
return container_of(q, struct apple_nvme, adminq);
|
||||||
else
|
|
||||||
return container_of(q, struct apple_nvme, ioq);
|
return container_of(q, struct apple_nvme, ioq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
|
static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
|
||||||
{
|
{
|
||||||
if (q->is_adminq)
|
if (q->is_adminq)
|
||||||
return APPLE_NVME_AQ_DEPTH;
|
return APPLE_NVME_AQ_DEPTH;
|
||||||
else
|
|
||||||
return APPLE_ANS_MAX_QUEUE_DEPTH;
|
return APPLE_ANS_MAX_QUEUE_DEPTH;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void apple_nvme_rtkit_crashed(void *cookie)
|
static void apple_nvme_rtkit_crashed(void *cookie)
|
||||||
|
@ -450,8 +450,8 @@ bool nvme_cancel_request(struct request *req, void *data)
|
|||||||
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
|
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
|
||||||
"Cancelling I/O %d", req->tag);
|
"Cancelling I/O %d", req->tag);
|
||||||
|
|
||||||
/* don't abort one completed request */
|
/* don't abort one completed or idle request */
|
||||||
if (blk_mq_request_completed(req))
|
if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
|
nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
|
||||||
@ -4808,8 +4808,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
|
|||||||
u32 aer_notice_type = nvme_aer_subtype(result);
|
u32 aer_notice_type = nvme_aer_subtype(result);
|
||||||
bool requeue = true;
|
bool requeue = true;
|
||||||
|
|
||||||
trace_nvme_async_event(ctrl, aer_notice_type);
|
|
||||||
|
|
||||||
switch (aer_notice_type) {
|
switch (aer_notice_type) {
|
||||||
case NVME_AER_NOTICE_NS_CHANGED:
|
case NVME_AER_NOTICE_NS_CHANGED:
|
||||||
set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
|
set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
|
||||||
@ -4845,7 +4843,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
|
|||||||
|
|
||||||
static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
|
static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
trace_nvme_async_event(ctrl, NVME_AER_ERROR);
|
|
||||||
dev_warn(ctrl->device, "resetting controller due to AER\n");
|
dev_warn(ctrl->device, "resetting controller due to AER\n");
|
||||||
nvme_reset_ctrl(ctrl);
|
nvme_reset_ctrl(ctrl);
|
||||||
}
|
}
|
||||||
@ -4861,6 +4858,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
|
|||||||
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
|
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
trace_nvme_async_event(ctrl, result);
|
||||||
switch (aer_type) {
|
switch (aer_type) {
|
||||||
case NVME_AER_NOTICE:
|
case NVME_AER_NOTICE:
|
||||||
requeue = nvme_handle_aen_notice(ctrl, result);
|
requeue = nvme_handle_aen_notice(ctrl, result);
|
||||||
@ -4878,7 +4876,6 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
|
|||||||
case NVME_AER_SMART:
|
case NVME_AER_SMART:
|
||||||
case NVME_AER_CSS:
|
case NVME_AER_CSS:
|
||||||
case NVME_AER_VS:
|
case NVME_AER_VS:
|
||||||
trace_nvme_async_event(ctrl, aer_type);
|
|
||||||
ctrl->aen_result = result;
|
ctrl->aen_result = result;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -5,7 +5,6 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/acpi.h>
|
#include <linux/acpi.h>
|
||||||
#include <linux/aer.h>
|
|
||||||
#include <linux/async.h>
|
#include <linux/async.h>
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
#include <linux/blk-mq.h>
|
#include <linux/blk-mq.h>
|
||||||
@ -2535,7 +2534,6 @@ static int nvme_pci_enable(struct nvme_dev *dev)
|
|||||||
|
|
||||||
nvme_map_cmb(dev);
|
nvme_map_cmb(dev);
|
||||||
|
|
||||||
pci_enable_pcie_error_reporting(pdev);
|
|
||||||
pci_save_state(pdev);
|
pci_save_state(pdev);
|
||||||
|
|
||||||
result = nvme_pci_configure_admin_queue(dev);
|
result = nvme_pci_configure_admin_queue(dev);
|
||||||
@ -2600,10 +2598,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
|
|||||||
nvme_suspend_io_queues(dev);
|
nvme_suspend_io_queues(dev);
|
||||||
nvme_suspend_queue(dev, 0);
|
nvme_suspend_queue(dev, 0);
|
||||||
pci_free_irq_vectors(pdev);
|
pci_free_irq_vectors(pdev);
|
||||||
if (pci_is_enabled(pdev)) {
|
if (pci_is_enabled(pdev))
|
||||||
pci_disable_pcie_error_reporting(pdev);
|
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
}
|
|
||||||
nvme_reap_pending_cqes(dev);
|
nvme_reap_pending_cqes(dev);
|
||||||
|
|
||||||
nvme_cancel_tagset(&dev->ctrl);
|
nvme_cancel_tagset(&dev->ctrl);
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/blk-mq.h>
|
#include <linux/blk-mq.h>
|
||||||
#include <linux/blk-mq-rdma.h>
|
|
||||||
#include <linux/blk-integrity.h>
|
#include <linux/blk-integrity.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
@ -464,7 +463,6 @@ static int nvme_rdma_create_cq(struct ib_device *ibdev,
|
|||||||
struct nvme_rdma_queue *queue)
|
struct nvme_rdma_queue *queue)
|
||||||
{
|
{
|
||||||
int ret, comp_vector, idx = nvme_rdma_queue_idx(queue);
|
int ret, comp_vector, idx = nvme_rdma_queue_idx(queue);
|
||||||
enum ib_poll_context poll_ctx;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Spread I/O queues completion vectors according their queue index.
|
* Spread I/O queues completion vectors according their queue index.
|
||||||
@ -473,15 +471,12 @@ static int nvme_rdma_create_cq(struct ib_device *ibdev,
|
|||||||
comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
|
comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
|
||||||
|
|
||||||
/* Polling queues need direct cq polling context */
|
/* Polling queues need direct cq polling context */
|
||||||
if (nvme_rdma_poll_queue(queue)) {
|
if (nvme_rdma_poll_queue(queue))
|
||||||
poll_ctx = IB_POLL_DIRECT;
|
|
||||||
queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size,
|
queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size,
|
||||||
comp_vector, poll_ctx);
|
comp_vector, IB_POLL_DIRECT);
|
||||||
} else {
|
else
|
||||||
poll_ctx = IB_POLL_SOFTIRQ;
|
|
||||||
queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size,
|
queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size,
|
||||||
comp_vector, poll_ctx);
|
comp_vector, IB_POLL_SOFTIRQ);
|
||||||
}
|
|
||||||
|
|
||||||
if (IS_ERR(queue->ib_cq)) {
|
if (IS_ERR(queue->ib_cq)) {
|
||||||
ret = PTR_ERR(queue->ib_cq);
|
ret = PTR_ERR(queue->ib_cq);
|
||||||
@ -2163,10 +2158,8 @@ static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
|
|||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
||||||
set->map[HCTX_TYPE_READ].queue_offset = 0;
|
set->map[HCTX_TYPE_READ].queue_offset = 0;
|
||||||
}
|
}
|
||||||
blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
|
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
|
||||||
ctrl->device->dev, 0);
|
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
|
||||||
blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ],
|
|
||||||
ctrl->device->dev, 0);
|
|
||||||
|
|
||||||
if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
|
if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
|
||||||
/* map dedicated poll queues only if we have queues left */
|
/* map dedicated poll queues only if we have queues left */
|
||||||
|
@ -876,6 +876,9 @@ static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
|
|||||||
size_t consumed = len;
|
size_t consumed = len;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
|
if (unlikely(!queue->rd_enabled))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
while (len) {
|
while (len) {
|
||||||
switch (nvme_tcp_recv_state(queue)) {
|
switch (nvme_tcp_recv_state(queue)) {
|
||||||
case NVME_TCP_RECV_PDU:
|
case NVME_TCP_RECV_PDU:
|
||||||
|
@ -127,15 +127,12 @@ TRACE_EVENT(nvme_async_event,
|
|||||||
),
|
),
|
||||||
TP_printk("nvme%d: NVME_AEN=%#08x [%s]",
|
TP_printk("nvme%d: NVME_AEN=%#08x [%s]",
|
||||||
__entry->ctrl_id, __entry->result,
|
__entry->ctrl_id, __entry->result,
|
||||||
__print_symbolic(__entry->result,
|
__print_symbolic(__entry->result & 0x7,
|
||||||
aer_name(NVME_AER_NOTICE_NS_CHANGED),
|
aer_name(NVME_AER_ERROR),
|
||||||
aer_name(NVME_AER_NOTICE_ANA),
|
aer_name(NVME_AER_SMART),
|
||||||
aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
|
aer_name(NVME_AER_NOTICE),
|
||||||
aer_name(NVME_AER_NOTICE_DISC_CHANGED),
|
aer_name(NVME_AER_CSS),
|
||||||
aer_name(NVME_AER_ERROR),
|
aer_name(NVME_AER_VS))
|
||||||
aer_name(NVME_AER_SMART),
|
|
||||||
aer_name(NVME_AER_CSS),
|
|
||||||
aer_name(NVME_AER_VS))
|
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -668,21 +668,11 @@ out:
|
|||||||
nvmet_req_complete(req, status);
|
nvmet_req_complete(req, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
|
static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
|
||||||
{
|
{
|
||||||
switch (req->cmd->identify.csi) {
|
/* Not supported: return zeroes */
|
||||||
case NVME_CSI_NVM:
|
nvmet_req_complete(req,
|
||||||
nvmet_execute_identify_desclist(req);
|
nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
|
||||||
return true;
|
|
||||||
case NVME_CSI_ZNS:
|
|
||||||
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
|
|
||||||
nvmet_execute_identify_desclist(req);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
default:
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvmet_execute_identify(struct nvmet_req *req)
|
static void nvmet_execute_identify(struct nvmet_req *req)
|
||||||
@ -692,54 +682,49 @@ static void nvmet_execute_identify(struct nvmet_req *req)
|
|||||||
|
|
||||||
switch (req->cmd->identify.cns) {
|
switch (req->cmd->identify.cns) {
|
||||||
case NVME_ID_CNS_NS:
|
case NVME_ID_CNS_NS:
|
||||||
switch (req->cmd->identify.csi) {
|
nvmet_execute_identify_ns(req);
|
||||||
case NVME_CSI_NVM:
|
return;
|
||||||
return nvmet_execute_identify_ns(req);
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case NVME_ID_CNS_CS_NS:
|
|
||||||
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
|
|
||||||
switch (req->cmd->identify.csi) {
|
|
||||||
case NVME_CSI_ZNS:
|
|
||||||
return nvmet_execute_identify_cns_cs_ns(req);
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case NVME_ID_CNS_CTRL:
|
case NVME_ID_CNS_CTRL:
|
||||||
|
nvmet_execute_identify_ctrl(req);
|
||||||
|
return;
|
||||||
|
case NVME_ID_CNS_NS_ACTIVE_LIST:
|
||||||
|
nvmet_execute_identify_nslist(req);
|
||||||
|
return;
|
||||||
|
case NVME_ID_CNS_NS_DESC_LIST:
|
||||||
|
nvmet_execute_identify_desclist(req);
|
||||||
|
return;
|
||||||
|
case NVME_ID_CNS_CS_NS:
|
||||||
switch (req->cmd->identify.csi) {
|
switch (req->cmd->identify.csi) {
|
||||||
case NVME_CSI_NVM:
|
case NVME_CSI_NVM:
|
||||||
return nvmet_execute_identify_ctrl(req);
|
/* Not supported */
|
||||||
|
break;
|
||||||
|
case NVME_CSI_ZNS:
|
||||||
|
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
|
||||||
|
nvmet_execute_identify_ns_zns(req);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case NVME_ID_CNS_CS_CTRL:
|
case NVME_ID_CNS_CS_CTRL:
|
||||||
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
|
|
||||||
switch (req->cmd->identify.csi) {
|
|
||||||
case NVME_CSI_ZNS:
|
|
||||||
return nvmet_execute_identify_cns_cs_ctrl(req);
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case NVME_ID_CNS_NS_ACTIVE_LIST:
|
|
||||||
switch (req->cmd->identify.csi) {
|
switch (req->cmd->identify.csi) {
|
||||||
case NVME_CSI_NVM:
|
case NVME_CSI_NVM:
|
||||||
return nvmet_execute_identify_nslist(req);
|
nvmet_execute_identify_ctrl_nvm(req);
|
||||||
default:
|
return;
|
||||||
|
case NVME_CSI_ZNS:
|
||||||
|
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
|
||||||
|
nvmet_execute_identify_ctrl_zns(req);
|
||||||
|
return;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case NVME_ID_CNS_NS_DESC_LIST:
|
|
||||||
if (nvmet_handle_identify_desclist(req) == true)
|
|
||||||
return;
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nvmet_req_cns_error_complete(req);
|
pr_debug("unhandled identify cns %d on qid %d\n",
|
||||||
|
req->cmd->identify.cns, req->sq->qid);
|
||||||
|
req->error_loc = offsetof(struct nvme_identify, cns);
|
||||||
|
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -614,10 +614,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
|
|||||||
struct fcloop_fcpreq *tfcp_req =
|
struct fcloop_fcpreq *tfcp_req =
|
||||||
container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
|
container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
|
||||||
struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
|
struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
|
||||||
|
unsigned long flags;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
bool aborted = false;
|
bool aborted = false;
|
||||||
|
|
||||||
spin_lock_irq(&tfcp_req->reqlock);
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||||
switch (tfcp_req->inistate) {
|
switch (tfcp_req->inistate) {
|
||||||
case INI_IO_START:
|
case INI_IO_START:
|
||||||
tfcp_req->inistate = INI_IO_ACTIVE;
|
tfcp_req->inistate = INI_IO_ACTIVE;
|
||||||
@ -626,11 +627,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
|
|||||||
aborted = true;
|
aborted = true;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
spin_unlock_irq(&tfcp_req->reqlock);
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&tfcp_req->reqlock);
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||||
|
|
||||||
if (unlikely(aborted))
|
if (unlikely(aborted))
|
||||||
ret = -ECANCELED;
|
ret = -ECANCELED;
|
||||||
@ -655,8 +656,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
|
|||||||
container_of(work, struct fcloop_fcpreq, abort_rcv_work);
|
container_of(work, struct fcloop_fcpreq, abort_rcv_work);
|
||||||
struct nvmefc_fcp_req *fcpreq;
|
struct nvmefc_fcp_req *fcpreq;
|
||||||
bool completed = false;
|
bool completed = false;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irq(&tfcp_req->reqlock);
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||||
fcpreq = tfcp_req->fcpreq;
|
fcpreq = tfcp_req->fcpreq;
|
||||||
switch (tfcp_req->inistate) {
|
switch (tfcp_req->inistate) {
|
||||||
case INI_IO_ABORTED:
|
case INI_IO_ABORTED:
|
||||||
@ -665,11 +667,11 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
|
|||||||
completed = true;
|
completed = true;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
spin_unlock_irq(&tfcp_req->reqlock);
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&tfcp_req->reqlock);
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||||
|
|
||||||
if (unlikely(completed)) {
|
if (unlikely(completed)) {
|
||||||
/* remove reference taken in original abort downcall */
|
/* remove reference taken in original abort downcall */
|
||||||
@ -681,9 +683,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
|
|||||||
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
|
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
|
||||||
&tfcp_req->tgt_fcp_req);
|
&tfcp_req->tgt_fcp_req);
|
||||||
|
|
||||||
spin_lock_irq(&tfcp_req->reqlock);
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||||
tfcp_req->fcpreq = NULL;
|
tfcp_req->fcpreq = NULL;
|
||||||
spin_unlock_irq(&tfcp_req->reqlock);
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||||
|
|
||||||
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
|
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
|
||||||
/* call_host_done releases reference for abort downcall */
|
/* call_host_done releases reference for abort downcall */
|
||||||
@ -699,11 +701,12 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
|
|||||||
struct fcloop_fcpreq *tfcp_req =
|
struct fcloop_fcpreq *tfcp_req =
|
||||||
container_of(work, struct fcloop_fcpreq, tio_done_work);
|
container_of(work, struct fcloop_fcpreq, tio_done_work);
|
||||||
struct nvmefc_fcp_req *fcpreq;
|
struct nvmefc_fcp_req *fcpreq;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irq(&tfcp_req->reqlock);
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||||
fcpreq = tfcp_req->fcpreq;
|
fcpreq = tfcp_req->fcpreq;
|
||||||
tfcp_req->inistate = INI_IO_COMPLETED;
|
tfcp_req->inistate = INI_IO_COMPLETED;
|
||||||
spin_unlock_irq(&tfcp_req->reqlock);
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||||
|
|
||||||
fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
|
fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
|
||||||
}
|
}
|
||||||
@ -807,13 +810,14 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
|
|||||||
u32 rsplen = 0, xfrlen = 0;
|
u32 rsplen = 0, xfrlen = 0;
|
||||||
int fcp_err = 0, active, aborted;
|
int fcp_err = 0, active, aborted;
|
||||||
u8 op = tgt_fcpreq->op;
|
u8 op = tgt_fcpreq->op;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irq(&tfcp_req->reqlock);
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||||
fcpreq = tfcp_req->fcpreq;
|
fcpreq = tfcp_req->fcpreq;
|
||||||
active = tfcp_req->active;
|
active = tfcp_req->active;
|
||||||
aborted = tfcp_req->aborted;
|
aborted = tfcp_req->aborted;
|
||||||
tfcp_req->active = true;
|
tfcp_req->active = true;
|
||||||
spin_unlock_irq(&tfcp_req->reqlock);
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||||
|
|
||||||
if (unlikely(active))
|
if (unlikely(active))
|
||||||
/* illegal - call while i/o active */
|
/* illegal - call while i/o active */
|
||||||
@ -821,9 +825,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
|
|||||||
|
|
||||||
if (unlikely(aborted)) {
|
if (unlikely(aborted)) {
|
||||||
/* target transport has aborted i/o prior */
|
/* target transport has aborted i/o prior */
|
||||||
spin_lock_irq(&tfcp_req->reqlock);
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||||
tfcp_req->active = false;
|
tfcp_req->active = false;
|
||||||
spin_unlock_irq(&tfcp_req->reqlock);
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||||
tgt_fcpreq->transferred_length = 0;
|
tgt_fcpreq->transferred_length = 0;
|
||||||
tgt_fcpreq->fcp_error = -ECANCELED;
|
tgt_fcpreq->fcp_error = -ECANCELED;
|
||||||
tgt_fcpreq->done(tgt_fcpreq);
|
tgt_fcpreq->done(tgt_fcpreq);
|
||||||
@ -880,9 +884,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irq(&tfcp_req->reqlock);
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||||
tfcp_req->active = false;
|
tfcp_req->active = false;
|
||||||
spin_unlock_irq(&tfcp_req->reqlock);
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||||
|
|
||||||
tgt_fcpreq->transferred_length = xfrlen;
|
tgt_fcpreq->transferred_length = xfrlen;
|
||||||
tgt_fcpreq->fcp_error = fcp_err;
|
tgt_fcpreq->fcp_error = fcp_err;
|
||||||
@ -896,15 +900,16 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
|
|||||||
struct nvmefc_tgt_fcp_req *tgt_fcpreq)
|
struct nvmefc_tgt_fcp_req *tgt_fcpreq)
|
||||||
{
|
{
|
||||||
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
|
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* mark aborted only in case there were 2 threads in transport
|
* mark aborted only in case there were 2 threads in transport
|
||||||
* (one doing io, other doing abort) and only kills ops posted
|
* (one doing io, other doing abort) and only kills ops posted
|
||||||
* after the abort request
|
* after the abort request
|
||||||
*/
|
*/
|
||||||
spin_lock_irq(&tfcp_req->reqlock);
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||||
tfcp_req->aborted = true;
|
tfcp_req->aborted = true;
|
||||||
spin_unlock_irq(&tfcp_req->reqlock);
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||||
|
|
||||||
tfcp_req->status = NVME_SC_INTERNAL;
|
tfcp_req->status = NVME_SC_INTERNAL;
|
||||||
|
|
||||||
@ -946,6 +951,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
|
|||||||
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
|
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
|
||||||
struct fcloop_fcpreq *tfcp_req;
|
struct fcloop_fcpreq *tfcp_req;
|
||||||
bool abortio = true;
|
bool abortio = true;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock(&inireq->inilock);
|
spin_lock(&inireq->inilock);
|
||||||
tfcp_req = inireq->tfcp_req;
|
tfcp_req = inireq->tfcp_req;
|
||||||
@ -958,7 +964,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
/* break initiator/target relationship for io */
|
/* break initiator/target relationship for io */
|
||||||
spin_lock_irq(&tfcp_req->reqlock);
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||||
switch (tfcp_req->inistate) {
|
switch (tfcp_req->inistate) {
|
||||||
case INI_IO_START:
|
case INI_IO_START:
|
||||||
case INI_IO_ACTIVE:
|
case INI_IO_ACTIVE:
|
||||||
@ -968,11 +974,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
|
|||||||
abortio = false;
|
abortio = false;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
spin_unlock_irq(&tfcp_req->reqlock);
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&tfcp_req->reqlock);
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||||
|
|
||||||
if (abortio)
|
if (abortio)
|
||||||
/* leave the reference while the work item is scheduled */
|
/* leave the reference while the work item is scheduled */
|
||||||
|
@ -581,8 +581,8 @@ bool nvmet_ns_revalidate(struct nvmet_ns *ns);
|
|||||||
u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
|
u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
|
||||||
|
|
||||||
bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
|
bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
|
||||||
void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req);
|
void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
|
||||||
void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req);
|
void nvmet_execute_identify_ns_zns(struct nvmet_req *req);
|
||||||
void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
|
void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
|
||||||
void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
|
void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
|
||||||
void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
|
void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
|
||||||
@ -687,14 +687,6 @@ static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
|
|||||||
req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
|
req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void nvmet_req_cns_error_complete(struct nvmet_req *req)
|
|
||||||
{
|
|
||||||
pr_debug("unhandled identify cns %d on qid %d\n",
|
|
||||||
req->cmd->identify.cns, req->sq->qid);
|
|
||||||
req->error_loc = offsetof(struct nvme_identify, cns);
|
|
||||||
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
|
static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
|
||||||
{
|
{
|
||||||
if (bio != &req->b.inline_bio)
|
if (bio != &req->b.inline_bio)
|
||||||
|
@ -20,6 +20,31 @@
|
|||||||
|
|
||||||
#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
|
#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
|
||||||
|
|
||||||
|
static int param_store_val(const char *str, int *val, int min, int max)
|
||||||
|
{
|
||||||
|
int ret, new_val;
|
||||||
|
|
||||||
|
ret = kstrtoint(str, 10, &new_val);
|
||||||
|
if (ret)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (new_val < min || new_val > max)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
*val = new_val;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int set_params(const char *str, const struct kernel_param *kp)
|
||||||
|
{
|
||||||
|
return param_store_val(str, kp->arg, 0, INT_MAX);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct kernel_param_ops set_param_ops = {
|
||||||
|
.set = set_params,
|
||||||
|
.get = param_get_int,
|
||||||
|
};
|
||||||
|
|
||||||
/* Define the socket priority to use for connections were it is desirable
|
/* Define the socket priority to use for connections were it is desirable
|
||||||
* that the NIC consider performing optimized packet processing or filtering.
|
* that the NIC consider performing optimized packet processing or filtering.
|
||||||
* A non-zero value being sufficient to indicate general consideration of any
|
* A non-zero value being sufficient to indicate general consideration of any
|
||||||
@ -27,8 +52,8 @@
|
|||||||
* values that may be unique for some NIC implementations.
|
* values that may be unique for some NIC implementations.
|
||||||
*/
|
*/
|
||||||
static int so_priority;
|
static int so_priority;
|
||||||
module_param(so_priority, int, 0644);
|
device_param_cb(so_priority, &set_param_ops, &so_priority, 0644);
|
||||||
MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
|
MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0");
|
||||||
|
|
||||||
/* Define a time period (in usecs) that io_work() shall sample an activated
|
/* Define a time period (in usecs) that io_work() shall sample an activated
|
||||||
* queue before determining it to be idle. This optional module behavior
|
* queue before determining it to be idle. This optional module behavior
|
||||||
@ -36,9 +61,10 @@ MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
|
|||||||
* using advanced interrupt moderation techniques.
|
* using advanced interrupt moderation techniques.
|
||||||
*/
|
*/
|
||||||
static int idle_poll_period_usecs;
|
static int idle_poll_period_usecs;
|
||||||
module_param(idle_poll_period_usecs, int, 0644);
|
device_param_cb(idle_poll_period_usecs, &set_param_ops,
|
||||||
|
&idle_poll_period_usecs, 0644);
|
||||||
MODULE_PARM_DESC(idle_poll_period_usecs,
|
MODULE_PARM_DESC(idle_poll_period_usecs,
|
||||||
"nvmet tcp io_work poll till idle time period in usecs");
|
"nvmet tcp io_work poll till idle time period in usecs: Default 0");
|
||||||
|
|
||||||
#define NVMET_TCP_RECV_BUDGET 8
|
#define NVMET_TCP_RECV_BUDGET 8
|
||||||
#define NVMET_TCP_SEND_BUDGET 8
|
#define NVMET_TCP_SEND_BUDGET 8
|
||||||
|
@ -70,7 +70,7 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req)
|
void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req)
|
||||||
{
|
{
|
||||||
u8 zasl = req->sq->ctrl->subsys->zasl;
|
u8 zasl = req->sq->ctrl->subsys->zasl;
|
||||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||||
@ -95,9 +95,9 @@ out:
|
|||||||
nvmet_req_complete(req, status);
|
nvmet_req_complete(req, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
|
void nvmet_execute_identify_ns_zns(struct nvmet_req *req)
|
||||||
{
|
{
|
||||||
struct nvme_id_ns_zns *id_zns;
|
struct nvme_id_ns_zns *id_zns = NULL;
|
||||||
u64 zsze;
|
u64 zsze;
|
||||||
u16 status;
|
u16 status;
|
||||||
u32 mar, mor;
|
u32 mar, mor;
|
||||||
@ -118,16 +118,18 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
|
|||||||
if (status)
|
if (status)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
if (!bdev_is_zoned(req->ns->bdev)) {
|
|
||||||
req->error_loc = offsetof(struct nvme_identify, nsid);
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (nvmet_ns_revalidate(req->ns)) {
|
if (nvmet_ns_revalidate(req->ns)) {
|
||||||
mutex_lock(&req->ns->subsys->lock);
|
mutex_lock(&req->ns->subsys->lock);
|
||||||
nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
|
nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
|
||||||
mutex_unlock(&req->ns->subsys->lock);
|
mutex_unlock(&req->ns->subsys->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!bdev_is_zoned(req->ns->bdev)) {
|
||||||
|
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||||
|
req->error_loc = offsetof(struct nvme_identify, nsid);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
|
zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
|
||||||
req->ns->blksize_shift;
|
req->ns->blksize_shift;
|
||||||
id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
|
id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
|
||||||
@ -148,8 +150,8 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
|
|||||||
|
|
||||||
done:
|
done:
|
||||||
status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
|
status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
|
||||||
kfree(id_zns);
|
|
||||||
out:
|
out:
|
||||||
|
kfree(id_zns);
|
||||||
nvmet_req_complete(req, status);
|
nvmet_req_complete(req, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
|
||||||
#ifndef _LINUX_BLK_MQ_RDMA_H
|
|
||||||
#define _LINUX_BLK_MQ_RDMA_H
|
|
||||||
|
|
||||||
struct blk_mq_tag_set;
|
|
||||||
struct ib_device;
|
|
||||||
|
|
||||||
void blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
|
|
||||||
struct ib_device *dev, int first_vec);
|
|
||||||
|
|
||||||
#endif /* _LINUX_BLK_MQ_RDMA_H */
|
|
Loading…
Reference in New Issue
Block a user