Merge branch 'nvmf-4.10' of git://git.infradead.org/nvme-fabrics into for-4.10/block

Sagi writes:

The major addition here is the nvme FC transport implementation
from James.

What else:
- some cleanups and memory leak fixes in the host side fabrics code from Bart
- possible rcu violation fix from Sasha
- logging change from Max
- small include cleanup
This commit is contained in:
Jens Axboe 2016-12-06 08:06:19 -07:00
commit d65cfe9094
24 changed files with 7313 additions and 34 deletions

View File

@ -8659,6 +8659,16 @@ L: linux-nvme@lists.infradead.org
S: Supported
F: drivers/nvme/target/
NVM EXPRESS FC TRANSPORT DRIVERS
M: James Smart <james.smart@broadcom.com>
L: linux-nvme@lists.infradead.org
S: Supported
F: include/linux/nvme-fc.h
F: include/linux/nvme-fc-driver.h
F: drivers/nvme/host/fc.c
F: drivers/nvme/target/fc.c
F: drivers/nvme/target/fcloop.c
NVMEM FRAMEWORK
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
M: Maxime Ripard <maxime.ripard@free-electrons.com>

View File

@ -43,3 +43,20 @@ config NVME_RDMA
from https://github.com/linux-nvme/nvme-cli.
If unsure, say N.
config NVME_FC
tristate "NVM Express over Fabrics FC host driver"
depends on BLOCK
depends on HAS_DMA
select NVME_CORE
select NVME_FABRICS
select SG_POOL
help
This provides support for the NVMe over Fabrics protocol using
the FC transport. This allows you to use remote block devices
exported using the NVMe protocol set.
To configure a NVMe over Fabrics controller use the nvme-cli tool
from https://github.com/linux-nvme/nvme-cli.
If unsure, say N.

View File

@ -2,6 +2,7 @@ obj-$(CONFIG_NVME_CORE) += nvme-core.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
nvme-core-y := core.o
nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o
@ -12,3 +13,5 @@ nvme-y += pci.o
nvme-fabrics-y += fabrics.o
nvme-rdma-y += rdma.o
nvme-fc-y += fc.o

View File

@ -303,7 +303,6 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
memset(cmnd, 0, sizeof(*cmnd));
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
cmnd->rw.command_id = req->tag;
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
@ -345,6 +344,8 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
else
nvme_setup_rw(ns, req, cmd);
cmd->common.command_id = req->tag;
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);

View File

@ -576,7 +576,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
nqnlen = strlen(opts->subsysnqn);
if (nqnlen >= NVMF_NQN_SIZE) {
pr_err("%s needs to be < %d bytes\n",
opts->subsysnqn, NVMF_NQN_SIZE);
opts->subsysnqn, NVMF_NQN_SIZE);
ret = -EINVAL;
goto out;
}
@ -666,10 +666,12 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
if (nqnlen >= NVMF_NQN_SIZE) {
pr_err("%s needs to be < %d bytes\n",
p, NVMF_NQN_SIZE);
kfree(p);
ret = -EINVAL;
goto out;
}
opts->host = nvmf_host_add(p);
kfree(p);
if (!opts->host) {
ret = -ENOMEM;
goto out;
@ -825,8 +827,7 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
out_unlock:
mutex_unlock(&nvmf_transports_mutex);
out_free_opts:
nvmf_host_put(opts->host);
kfree(opts);
nvmf_free_options(opts);
return ERR_PTR(ret);
}

2586
drivers/nvme/host/fc.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -611,7 +611,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out;
cmnd.common.command_id = req->tag;
blk_mq_start_request(req);
spin_lock_irq(&nvmeq->q_lock);

View File

@ -28,7 +28,6 @@
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <rdma/ib_cm.h>
#include <linux/nvme-rdma.h>
#include "nvme.h"
@ -241,7 +240,9 @@ out_free_ring:
static void nvme_rdma_qp_event(struct ib_event *event, void *context)
{
pr_debug("QP event %d\n", event->event);
pr_debug("QP event %s (%d)\n",
ib_event_msg(event->event), event->event);
}
static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
@ -1398,7 +1399,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
c->common.command_id = rq->tag;
blk_mq_start_request(rq);
map_len = nvme_map_len(rq);
@ -1904,6 +1904,14 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
opts->queue_size = ctrl->ctrl.maxcmd;
}
if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
/* warn if sqsize is lower than queue_size */
dev_warn(ctrl->ctrl.device,
"queue_size %zu > ctrl sqsize %u, clamping down\n",
opts->queue_size, ctrl->ctrl.sqsize + 1);
opts->queue_size = ctrl->ctrl.sqsize + 1;
}
if (opts->nr_io_queues) {
ret = nvme_rdma_create_io_queues(ctrl);
if (ret)

View File

@ -1280,10 +1280,6 @@ static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
u16 idx, u16 bd_len, u8 llbaa)
{
u16 bd_num;
bd_num = bd_len / ((llbaa == 0) ?
SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
/* Store block descriptor info if a FORMAT UNIT comes later */
/* TODO Saving 1st BD info; what to do if multiple BD received? */
if (llbaa == 0) {
@ -1528,7 +1524,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int nvme_sc;
struct nvme_id_ns *id_ns;
u8 i;
u8 flbas, nlbaf;
u8 nlbaf;
u8 selected_lbaf = 0xFF;
u32 cdw10 = 0;
struct nvme_command c;
@ -1539,7 +1535,6 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
if (res)
return res;
flbas = (id_ns->flbas) & 0x0F;
nlbaf = id_ns->nlbaf;
for (i = 0; i < nlbaf; i++) {
@ -2168,12 +2163,10 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *cmd)
{
u8 immed, pcmod, no_flush, start;
u8 immed, no_flush;
immed = cmd[1] & 0x01;
pcmod = cmd[3] & 0x0f;
no_flush = cmd[4] & 0x04;
start = cmd[4] & 0x01;
if (immed != 0) {
return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,

View File

@ -34,3 +34,27 @@ config NVME_TARGET_RDMA
devices over RDMA.
If unsure, say N.
config NVME_TARGET_FC
tristate "NVMe over Fabrics FC target driver"
depends on NVME_TARGET
depends on HAS_DMA
help
This enables the NVMe FC target support, which allows exporting NVMe
devices over FC.
If unsure, say N.
config NVME_TARGET_FCLOOP
tristate "NVMe over Fabrics FC Transport Loopback Test driver"
depends on NVME_TARGET
select NVME_CORE
select NVME_FABRICS
select SG_POOL
depends on NVME_FC
depends on NVME_TARGET_FC
help
This enables the NVMe FC loopback test support, which can be useful
to test NVMe-FC transport interfaces.
If unsure, say N.

View File

@ -2,8 +2,12 @@
obj-$(CONFIG_NVME_TARGET) += nvmet.o
obj-$(CONFIG_NVME_TARGET_LOOP) += nvme-loop.o
obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
obj-$(CONFIG_NVME_TARGET_FC) += nvmet-fc.o
obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o
nvmet-y += core.o configfs.o admin-cmd.o io-cmd.o fabrics-cmd.o \
discovery.o
nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
nvme-fcloop-y += fcloop.o

View File

@ -37,6 +37,8 @@ static ssize_t nvmet_addr_adrfam_show(struct config_item *item,
return sprintf(page, "ipv6\n");
case NVMF_ADDR_FAMILY_IB:
return sprintf(page, "ib\n");
case NVMF_ADDR_FAMILY_FC:
return sprintf(page, "fc\n");
default:
return sprintf(page, "\n");
}
@ -59,6 +61,8 @@ static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6;
} else if (sysfs_streq(page, "ib")) {
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB;
} else if (sysfs_streq(page, "fc")) {
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC;
} else {
pr_err("Invalid value '%s' for adrfam\n", page);
return -EINVAL;
@ -209,6 +213,8 @@ static ssize_t nvmet_addr_trtype_show(struct config_item *item,
return sprintf(page, "rdma\n");
case NVMF_TRTYPE_LOOP:
return sprintf(page, "loop\n");
case NVMF_TRTYPE_FC:
return sprintf(page, "fc\n");
default:
return sprintf(page, "\n");
}
@ -229,6 +235,12 @@ static void nvmet_port_init_tsas_loop(struct nvmet_port *port)
memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
}
static void nvmet_port_init_tsas_fc(struct nvmet_port *port)
{
port->disc_addr.trtype = NVMF_TRTYPE_FC;
memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
}
static ssize_t nvmet_addr_trtype_store(struct config_item *item,
const char *page, size_t count)
{
@ -244,6 +256,8 @@ static ssize_t nvmet_addr_trtype_store(struct config_item *item,
nvmet_port_init_tsas_rdma(port);
} else if (sysfs_streq(page, "loop")) {
nvmet_port_init_tsas_loop(port);
} else if (sysfs_streq(page, "fc")) {
nvmet_port_init_tsas_fc(port);
} else {
pr_err("Invalid value '%s' for trtype\n", page);
return -EINVAL;
@ -271,7 +285,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
mutex_lock(&subsys->lock);
ret = -EBUSY;
if (nvmet_ns_enabled(ns))
if (ns->enabled)
goto out_unlock;
kfree(ns->device_path);
@ -307,7 +321,7 @@ static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
int ret = 0;
mutex_lock(&subsys->lock);
if (nvmet_ns_enabled(ns)) {
if (ns->enabled) {
ret = -EBUSY;
goto out_unlock;
}
@ -339,7 +353,7 @@ CONFIGFS_ATTR(nvmet_ns_, device_nguid);
static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
{
return sprintf(page, "%d\n", nvmet_ns_enabled(to_nvmet_ns(item)));
return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
}
static ssize_t nvmet_ns_enable_store(struct config_item *item,

View File

@ -264,7 +264,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
int ret = 0;
mutex_lock(&subsys->lock);
if (!list_empty(&ns->dev_link))
if (ns->enabled)
goto out_unlock;
ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
@ -309,6 +309,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
ns->enabled = true;
ret = 0;
out_unlock:
mutex_unlock(&subsys->lock);
@ -325,11 +326,11 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
struct nvmet_ctrl *ctrl;
mutex_lock(&subsys->lock);
if (list_empty(&ns->dev_link)) {
mutex_unlock(&subsys->lock);
return;
}
list_del_init(&ns->dev_link);
if (!ns->enabled)
goto out_unlock;
ns->enabled = false;
list_del_rcu(&ns->dev_link);
mutex_unlock(&subsys->lock);
/*
@ -351,6 +352,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
if (ns->bdev)
blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
out_unlock:
mutex_unlock(&subsys->lock);
}

2288
drivers/nvme/target/fc.c Normal file

File diff suppressed because it is too large Load Diff

1148
drivers/nvme/target/fcloop.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -194,7 +194,6 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
BUG_ON(iod->req.sg_cnt > req->nr_phys_segments);
}
iod->cmd.common.command_id = req->tag;
blk_mq_start_request(req);
schedule_work(&iod->work);

View File

@ -47,6 +47,7 @@ struct nvmet_ns {
loff_t size;
u8 nguid[16];
bool enabled;
struct nvmet_subsys *subsys;
const char *device_path;
@ -61,11 +62,6 @@ static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
return container_of(to_config_group(item), struct nvmet_ns, group);
}
static inline bool nvmet_ns_enabled(struct nvmet_ns *ns)
{
return !list_empty_careful(&ns->dev_link);
}
struct nvmet_cq {
u16 qid;
u16 size;

View File

@ -1044,8 +1044,10 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
}
ret = nvmet_sq_init(&queue->nvme_sq);
if (ret)
if (ret) {
ret = NVME_RDMA_CM_NO_RSC;
goto out_free_queue;
}
ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
if (ret)
@ -1114,6 +1116,7 @@ out_destroy_sq:
out_free_queue:
kfree(queue);
out_reject:
pr_debug("rejecting connect request with status code %d\n", ret);
nvmet_rdma_cm_reject(cm_id, ret);
return NULL;
}
@ -1127,7 +1130,8 @@ static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
rdma_notify(queue->cm_id, event->event);
break;
default:
pr_err("received unrecognized IB QP event %d\n", event->event);
pr_err("received IB QP event: %s (%d)\n",
ib_event_msg(event->event), event->event);
break;
}
}

View File

@ -0,0 +1,851 @@
/*
* Copyright (c) 2016, Avago Technologies
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef _NVME_FC_DRIVER_H
#define _NVME_FC_DRIVER_H 1
/*
* ********************** LLDD FC-NVME Host API ********************
*
* For FC LLDD's that are the NVME Host role.
*
* ******************************************************************
*/
/* FC Port role bitmask - can merge with FC Port Roles in fc transport */
#define FC_PORT_ROLE_NVME_INITIATOR 0x10
#define FC_PORT_ROLE_NVME_TARGET 0x11
#define FC_PORT_ROLE_NVME_DISCOVERY 0x12
/**
* struct nvme_fc_port_info - port-specific ids and FC connection-specific
* data element used during NVME Host role
* registrations
*
* Static fields describing the port being registered:
* @node_name: FC WWNN for the port
* @port_name: FC WWPN for the port
* @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx)
*
* Initialization values for dynamic port fields:
* @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
* be set to 0.
*/
struct nvme_fc_port_info {
u64 node_name;
u64 port_name;
u32 port_role;
u32 port_id;
};
/**
* struct nvmefc_ls_req - Request structure passed from NVME-FC transport
* to LLDD in order to perform a NVME FC-4 LS
* request and obtain a response.
*
* Values set by the NVME-FC layer prior to calling the LLDD ls_req
* entrypoint.
* @rqstaddr: pointer to request buffer
* @rqstdma: PCI DMA address of request buffer
* @rqstlen: Length, in bytes, of request buffer
* @rspaddr: pointer to response buffer
* @rspdma: PCI DMA address of response buffer
* @rsplen: Length, in bytes, of response buffer
* @timeout: Maximum amount of time, in seconds, to wait for the LS response.
* If timeout exceeded, LLDD to abort LS exchange and complete
* LS request with error status.
* @private: pointer to memory allocated alongside the ls request structure
* that is specifically for the LLDD to use while processing the
* request. The length of the buffer corresponds to the
* lsrqst_priv_sz value specified in the nvme_fc_port_template
* supplied by the LLDD.
* @done: The callback routine the LLDD is to invoke upon completion of
* the LS request. req argument is the pointer to the original LS
* request structure. Status argument must be 0 upon success, a
* negative errno on failure (example: -ENXIO).
*/
struct nvmefc_ls_req {
void *rqstaddr;
dma_addr_t rqstdma;
u32 rqstlen;
void *rspaddr;
dma_addr_t rspdma;
u32 rsplen;
u32 timeout;
void *private;
void (*done)(struct nvmefc_ls_req *req, int status);
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
enum nvmefc_fcp_datadir {
NVMEFC_FCP_NODATA, /* payload_length and sg_cnt will be zero */
NVMEFC_FCP_WRITE,
NVMEFC_FCP_READ,
};
#define NVME_FC_MAX_SEGMENTS 256
/**
* struct nvmefc_fcp_req - Request structure passed from NVME-FC transport
* to LLDD in order to perform a NVME FCP IO operation.
*
* Values set by the NVME-FC layer prior to calling the LLDD fcp_io
* entrypoint.
* @cmdaddr: pointer to the FCP CMD IU buffer
* @rspaddr: pointer to the FCP RSP IU buffer
* @cmddma: PCI DMA address of the FCP CMD IU buffer
* @rspdma: PCI DMA address of the FCP RSP IU buffer
* @cmdlen: Length, in bytes, of the FCP CMD IU buffer
* @rsplen: Length, in bytes, of the FCP RSP IU buffer
* @payload_length: Length of DATA_IN or DATA_OUT payload data to transfer
* @sg_table: scatter/gather structure for payload data
* @first_sgl: memory for 1st scatter/gather list segment for payload data
* @sg_cnt: number of elements in the scatter/gather list
* @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx)
* @sqid: The nvme SQID the command is being issued on
* @done: The callback routine the LLDD is to invoke upon completion of
* the FCP operation. req argument is the pointer to the original
* FCP IO operation.
* @private: pointer to memory allocated alongside the FCP operation
* request structure that is specifically for the LLDD to use
* while processing the operation. The length of the buffer
* corresponds to the fcprqst_priv_sz value specified in the
* nvme_fc_port_template supplied by the LLDD.
*
* Values set by the LLDD indicating completion status of the FCP operation.
* Must be set prior to calling the done() callback.
* @transferred_length: amount of payload data, in bytes, that were
* transferred. Should equal payload_length on success.
* @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
* @status: Completion status of the FCP operation. must be 0 upon success,
* NVME_SC_FC_xxx value upon failure. Note: this is NOT a
* reflection of the NVME CQE completion status. Only the status
* of the FCP operation at the NVME-FC level.
*/
struct nvmefc_fcp_req {
void *cmdaddr;
void *rspaddr;
dma_addr_t cmddma;
dma_addr_t rspdma;
u16 cmdlen;
u16 rsplen;
u32 payload_length;
struct sg_table sg_table;
struct scatterlist *first_sgl;
int sg_cnt;
enum nvmefc_fcp_datadir io_dir;
__le16 sqid;
void (*done)(struct nvmefc_fcp_req *req);
void *private;
u32 transferred_length;
u16 rcv_rsplen;
u32 status;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
/*
* Direct copy of fc_port_state enum. For later merging
*/
enum nvme_fc_obj_state {
FC_OBJSTATE_UNKNOWN,
FC_OBJSTATE_NOTPRESENT,
FC_OBJSTATE_ONLINE,
FC_OBJSTATE_OFFLINE, /* User has taken Port Offline */
FC_OBJSTATE_BLOCKED,
FC_OBJSTATE_BYPASSED,
FC_OBJSTATE_DIAGNOSTICS,
FC_OBJSTATE_LINKDOWN,
FC_OBJSTATE_ERROR,
FC_OBJSTATE_LOOPBACK,
FC_OBJSTATE_DELETED,
};
/**
* struct nvme_fc_local_port - structure used between NVME-FC transport and
* a LLDD to reference a local NVME host port.
* Allocated/created by the nvme_fc_register_localport()
* transport interface.
*
* Fields with static values for the port. Initialized by the
* port_info struct supplied to the registration call.
* @port_num: NVME-FC transport host port number
* @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx)
* @node_name: FC WWNN for the port
* @port_name: FC WWPN for the port
* @private: pointer to memory allocated alongside the local port
* structure that is specifically for the LLDD to use.
* The length of the buffer corresponds to the local_priv_sz
* value specified in the nvme_fc_port_template supplied by
* the LLDD.
*
* Fields with dynamic values. Values may change base on link state. LLDD
* may reference fields directly to change them. Initialized by the
* port_info struct supplied to the registration call.
* @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
* be set to 0.
* @port_state: Operational state of the port.
*/
struct nvme_fc_local_port {
/* static/read-only fields */
u32 port_num;
u32 port_role;
u64 node_name;
u64 port_name;
void *private;
/* dynamic fields */
u32 port_id;
enum nvme_fc_obj_state port_state;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
/**
* struct nvme_fc_remote_port - structure used between NVME-FC transport and
* a LLDD to reference a remote NVME subsystem port.
* Allocated/created by the nvme_fc_register_remoteport()
* transport interface.
*
* Fields with static values for the port. Initialized by the
* port_info struct supplied to the registration call.
* @port_num: NVME-FC transport remote subsystem port number
* @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx)
* @node_name: FC WWNN for the port
* @port_name: FC WWPN for the port
* @localport: pointer to the NVME-FC local host port the subsystem is
* connected to.
* @private: pointer to memory allocated alongside the remote port
* structure that is specifically for the LLDD to use.
* The length of the buffer corresponds to the remote_priv_sz
* value specified in the nvme_fc_port_template supplied by
* the LLDD.
*
* Fields with dynamic values. Values may change base on link or login
* state. LLDD may reference fields directly to change them. Initialized by
* the port_info struct supplied to the registration call.
* @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
* be set to 0.
* @port_state: Operational state of the remote port. Valid values are
* ONLINE or UNKNOWN.
*/
struct nvme_fc_remote_port {
/* static fields */
u32 port_num;
u32 port_role;
u64 node_name;
u64 port_name;
struct nvme_fc_local_port *localport;
void *private;
/* dynamic fields */
u32 port_id;
enum nvme_fc_obj_state port_state;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
/**
* struct nvme_fc_port_template - structure containing static entrypoints and
* operational parameters for an LLDD that supports NVME host
* behavior. Passed by reference in port registrations.
* NVME-FC transport remembers template reference and may
* access it during runtime operation.
*
* Host/Initiator Transport Entrypoints/Parameters:
*
* @localport_delete: The LLDD initiates deletion of a localport via
* nvme_fc_deregister_localport(). However, the teardown is
* asynchronous. This routine is called upon the completion of the
* teardown to inform the LLDD that the localport has been deleted.
* Entrypoint is Mandatory.
*
* @remoteport_delete: The LLDD initiates deletion of a remoteport via
* nvme_fc_deregister_remoteport(). However, the teardown is
* asynchronous. This routine is called upon the completion of the
* teardown to inform the LLDD that the remoteport has been deleted.
* Entrypoint is Mandatory.
*
* @create_queue: Upon creating a host<->controller association, queues are
* created such that they can be affinitized to cpus/cores. This
* callback into the LLDD to notify that a controller queue is being
* created. The LLDD may choose to allocate an associated hw queue
* or map it onto a shared hw queue. Upon return from the call, the
* LLDD specifies a handle that will be given back to it for any
* command that is posted to the controller queue. The handle can
* be used by the LLDD to map quickly to the proper hw queue for
* command execution. The mask of cpu's that will map to this queue
* at the block-level is also passed in. The LLDD should use the
* queue id and/or cpu masks to ensure proper affinitization of the
* controller queue to the hw queue.
* Entrypoint is Optional.
*
* @delete_queue: This is the inverse of the crete_queue. During
* host<->controller association teardown, this routine is called
* when a controller queue is being terminated. Any association with
* a hw queue should be termined. If there is a unique hw queue, the
* hw queue should be torn down.
* Entrypoint is Optional.
*
* @poll_queue: Called to poll for the completion of an io on a blk queue.
* Entrypoint is Optional.
*
* @ls_req: Called to issue a FC-NVME FC-4 LS service request.
* The nvme_fc_ls_req structure will fully describe the buffers for
* the request payload and where to place the response payload. The
* LLDD is to allocate an exchange, issue the LS request, obtain the
* LS response, and call the "done" routine specified in the request
* structure (argument to done is the ls request structure itself).
* Entrypoint is Mandatory.
*
* @fcp_io: called to issue a FC-NVME I/O request. The I/O may be for
* an admin queue or an i/o queue. The nvmefc_fcp_req structure will
* fully describe the io: the buffer containing the FC-NVME CMD IU
* (which contains the SQE), the sg list for the payload if applicable,
* and the buffer to place the FC-NVME RSP IU into. The LLDD will
* complete the i/o, indicating the amount of data transferred or
* any transport error, and call the "done" routine specified in the
* request structure (argument to done is the fcp request structure
* itself).
* Entrypoint is Mandatory.
*
* @ls_abort: called to request the LLDD to abort the indicated ls request.
* The call may return before the abort has completed. After aborting
* the request, the LLDD must still call the ls request done routine
* indicating an FC transport Aborted status.
* Entrypoint is Mandatory.
*
* @fcp_abort: called to request the LLDD to abort the indicated fcp request.
* The call may return before the abort has completed. After aborting
* the request, the LLDD must still call the fcp request done routine
* indicating an FC transport Aborted status.
* Entrypoint is Mandatory.
*
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
*
* @max_sgl_segments: indicates the maximum number of sgl segments supported
* by the LLDD
* Value is Mandatory. Must be at least 1. Recommend at least 256.
*
* @max_dif_sgl_segments: indicates the maximum number of sgl segments
* supported by the LLDD for DIF operations.
* Value is Mandatory. Must be at least 1. Recommend at least 256.
*
* @dma_boundary: indicates the dma address boundary where dma mappings
* will be split across.
* Value is Mandatory. Typical value is 0xFFFFFFFF to split across
* 4Gig address boundarys
*
* @local_priv_sz: The LLDD sets this field to the amount of additional
* memory that it would like fc nvme layer to allocate on the LLDD's
* behalf whenever a localport is allocated. The additional memory
* area solely for the of the LLDD and its location is specified by
* the localport->private pointer.
* Value is Mandatory. Allowed to be zero.
*
* @remote_priv_sz: The LLDD sets this field to the amount of additional
* memory that it would like fc nvme layer to allocate on the LLDD's
* behalf whenever a remoteport is allocated. The additional memory
* area solely for the of the LLDD and its location is specified by
* the remoteport->private pointer.
* Value is Mandatory. Allowed to be zero.
*
* @lsrqst_priv_sz: The LLDD sets this field to the amount of additional
* memory that it would like fc nvme layer to allocate on the LLDD's
* behalf whenever a ls request structure is allocated. The additional
* memory area solely for the of the LLDD and its location is
* specified by the ls_request->private pointer.
* Value is Mandatory. Allowed to be zero.
*
* @fcprqst_priv_sz: The LLDD sets this field to the amount of additional
* memory that it would like fc nvme layer to allocate on the LLDD's
* behalf whenever a fcp request structure is allocated. The additional
* memory area solely for the of the LLDD and its location is
* specified by the fcp_request->private pointer.
* Value is Mandatory. Allowed to be zero.
*/
struct nvme_fc_port_template {
/* initiator-based functions */
void (*localport_delete)(struct nvme_fc_local_port *);
void (*remoteport_delete)(struct nvme_fc_remote_port *);
int (*create_queue)(struct nvme_fc_local_port *,
unsigned int qidx, u16 qsize,
void **handle);
void (*delete_queue)(struct nvme_fc_local_port *,
unsigned int qidx, void *handle);
void (*poll_queue)(struct nvme_fc_local_port *, void *handle);
int (*ls_req)(struct nvme_fc_local_port *,
struct nvme_fc_remote_port *,
struct nvmefc_ls_req *);
int (*fcp_io)(struct nvme_fc_local_port *,
struct nvme_fc_remote_port *,
void *hw_queue_handle,
struct nvmefc_fcp_req *);
void (*ls_abort)(struct nvme_fc_local_port *,
struct nvme_fc_remote_port *,
struct nvmefc_ls_req *);
void (*fcp_abort)(struct nvme_fc_local_port *,
struct nvme_fc_remote_port *,
void *hw_queue_handle,
struct nvmefc_fcp_req *);
u32 max_hw_queues;
u16 max_sgl_segments;
u16 max_dif_sgl_segments;
u64 dma_boundary;
/* sizes of additional private data for data structures */
u32 local_priv_sz;
u32 remote_priv_sz;
u32 lsrqst_priv_sz;
u32 fcprqst_priv_sz;
};
/*
* Initiator/Host functions
*/
int nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
struct nvme_fc_port_template *template,
struct device *dev,
struct nvme_fc_local_port **lport_p);
int nvme_fc_unregister_localport(struct nvme_fc_local_port *localport);
int nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
struct nvme_fc_port_info *pinfo,
struct nvme_fc_remote_port **rport_p);
int nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *remoteport);
/*
* *************** LLDD FC-NVME Target/Subsystem API ***************
*
* For FC LLDD's that are the NVME Subsystem role
*
* ******************************************************************
*/
/**
* struct nvmet_fc_port_info - port-specific ids and FC connection-specific
* data element used during NVME Subsystem role
* registrations
*
* Static fields describing the port being registered:
* @node_name: FC WWNN for the port
* @port_name: FC WWPN for the port
*
* Initialization values for dynamic port fields:
* @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
* be set to 0.
*/
struct nvmet_fc_port_info {
u64 node_name;
u64 port_name;
u32 port_id;
};
/**
* struct nvmefc_tgt_ls_req - Structure used between LLDD and NVMET-FC
* layer to represent the exchange context for
* a FC-NVME Link Service (LS).
*
* The structure is allocated by the LLDD whenever a LS Request is received
* from the FC link. The address of the structure is passed to the nvmet-fc
* layer via the nvmet_fc_rcv_ls_req() call. The address of the structure
* will be passed back to the LLDD when the response is to be transmit.
* The LLDD is to use the address to map back to the LLDD exchange structure
* which maintains information such as the targetport the LS was received
* on, the remote FC NVME initiator that sent the LS, and any FC exchange
* context. Upon completion of the LS response transmit, the address of the
* structure will be passed back to the LS rsp done() routine, allowing the
* nvmet-fc layer to release dma resources. Upon completion of the done()
* routine, no further access will be made by the nvmet-fc layer and the
* LLDD can de-allocate the structure.
*
* Field initialization:
* At the time of the nvmet_fc_rcv_ls_req() call, there is no content that
* is valid in the structure.
*
* When the structure is used for the LLDD->xmt_ls_rsp() call, the nvmet-fc
* layer will fully set the fields in order to specify the response
* payload buffer and its length as well as the done routine to be called
* upon compeletion of the transmit. The nvmet-fc layer will also set a
* private pointer for its own use in the done routine.
*
* Values set by the NVMET-FC layer prior to calling the LLDD xmt_ls_rsp
* entrypoint.
* @rspbuf: pointer to the LS response buffer
* @rspdma: PCI DMA address of the LS response buffer
* @rsplen: Length, in bytes, of the LS response buffer
* @done: The callback routine the LLDD is to invoke upon completion of
* transmitting the LS response. req argument is the pointer to
* the original ls request.
* @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used
* as part of the NVMET-FC processing. The LLDD is not to access
* this pointer.
*/
struct nvmefc_tgt_ls_req {
void *rspbuf;
dma_addr_t rspdma;
u16 rsplen;
void (*done)(struct nvmefc_tgt_ls_req *req);
void *nvmet_fc_private; /* LLDD is not to access !! */
};
/* Operations that NVME-FC layer may request the LLDD to perform for FCP */
enum {
NVMET_FCOP_READDATA = 1, /* xmt data to initiator */
NVMET_FCOP_WRITEDATA = 2, /* xmt data from initiator */
NVMET_FCOP_READDATA_RSP = 3, /* xmt data to initiator and send
* rsp as well
*/
NVMET_FCOP_RSP = 4, /* send rsp frame */
NVMET_FCOP_ABORT = 5, /* abort exchange via ABTS */
NVMET_FCOP_BA_ACC = 6, /* send BA_ACC */
NVMET_FCOP_BA_RJT = 7, /* send BA_RJT */
};
/**
* struct nvmefc_tgt_fcp_req - Structure used between LLDD and NVMET-FC
* layer to represent the exchange context and
* the specific FC-NVME IU operation(s) to perform
* for a FC-NVME FCP IO.
*
* Structure used between LLDD and nvmet-fc layer to represent the exchange
* context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related
* memory transfers, and its assocated cqe transfer).
*
* The structure is allocated by the LLDD whenever a FCP CMD IU is received
* from the FC link. The address of the structure is passed to the nvmet-fc
* layer via the nvmet_fc_rcv_fcp_req() call. The address of the structure
* will be passed back to the LLDD for the data operations and transmit of
* the response. The LLDD is to use the address to map back to the LLDD
* exchange structure which maintains information such as the targetport
* the FCP I/O was received on, the remote FC NVME initiator that sent the
* FCP I/O, and any FC exchange context. Upon completion of the FCP target
* operation, the address of the structure will be passed back to the FCP
* op done() routine, allowing the nvmet-fc layer to release dma resources.
* Upon completion of the done() routine for either RSP or ABORT ops, no
* further access will be made by the nvmet-fc layer and the LLDD can
* de-allocate the structure.
*
* Field initialization:
* At the time of the nvmet_fc_rcv_fcp_req() call, there is no content that
* is valid in the structure.
*
* When the structure is used for an FCP target operation, the nvmet-fc
* layer will fully set the fields in order to specify the scattergather
* list, the transfer length, as well as the done routine to be called
* upon compeletion of the operation. The nvmet-fc layer will also set a
* private pointer for its own use in the done routine.
*
* Note: the LLDD must never fail a NVMET_FCOP_ABORT request !!
*
* Values set by the NVMET-FC layer prior to calling the LLDD fcp_op
* entrypoint.
* @op: Indicates the FCP IU operation to perform (see NVMET_FCOP_xxx)
* @hwqid: Specifies the hw queue index (0..N-1, where N is the
* max_hw_queues value from the LLD's nvmet_fc_target_template)
* that the operation is to use.
* @offset: Indicates the DATA_OUT/DATA_IN payload offset to be tranferred.
* Field is only valid on WRITEDATA, READDATA, or READDATA_RSP ops.
* @timeout: amount of time, in seconds, to wait for a response from the NVME
* host. A value of 0 is an infinite wait.
* Valid only for the following ops:
* WRITEDATA: caps the wait for data reception
* READDATA_RSP & RSP: caps wait for FCP_CONF reception (if used)
* @transfer_length: the length, in bytes, of the DATA_OUT or DATA_IN payload
* that is to be transferred.
* Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops.
* @ba_rjt: Contains the BA_RJT payload that is to be transferred.
* Valid only for the NVMET_FCOP_BA_RJT op.
* @sg: Scatter/gather list for the DATA_OUT/DATA_IN payload data.
* Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops.
* @sg_cnt: Number of valid entries in the scatter/gather list.
* Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops.
* @rspaddr: pointer to the FCP RSP IU buffer to be transmit
* Used by RSP and READDATA_RSP ops
* @rspdma: PCI DMA address of the FCP RSP IU buffer
* Used by RSP and READDATA_RSP ops
* @rsplen: Length, in bytes, of the FCP RSP IU buffer
* Used by RSP and READDATA_RSP ops
* @done: The callback routine the LLDD is to invoke upon completion of
* the operation. req argument is the pointer to the original
* FCP subsystem op request.
* @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used
* as part of the NVMET-FC processing. The LLDD is not to
* reference this field.
*
* Values set by the LLDD indicating completion status of the FCP operation.
* Must be set prior to calling the done() callback.
* @transferred_length: amount of DATA_OUT payload data received by a
* a WRITEDATA operation. If not a WRITEDATA operation, value must
* be set to 0. Should equal transfer_length on success.
* @fcp_error: status of the FCP operation. Must be 0 on success; on failure
* must be a NVME_SC_FC_xxxx value.
*/
struct nvmefc_tgt_fcp_req {
u8 op;
u16 hwqid;
u32 offset;
u32 timeout;
u32 transfer_length;
struct fc_ba_rjt ba_rjt;
struct scatterlist sg[NVME_FC_MAX_SEGMENTS];
int sg_cnt;
void *rspaddr;
dma_addr_t rspdma;
u16 rsplen;
void (*done)(struct nvmefc_tgt_fcp_req *);
void *nvmet_fc_private; /* LLDD is not to access !! */
u32 transferred_length;
int fcp_error;
};
/* Target Features (Bit fields) LLDD supports */
enum {
NVMET_FCTGTFEAT_READDATA_RSP = (1 << 0),
/* Bit 0: supports the NVMET_FCPOP_READDATA_RSP op, which
* sends (the last) Read Data sequence followed by the RSP
* sequence in one LLDD operation. Errors during Data
* sequence transmit must not allow RSP sequence to be sent.
*/
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED = (1 << 1),
/* Bit 1: When 0, the LLDD will deliver FCP CMD
* on the CPU it should be affinitized to. Thus work will
* be scheduled on the cpu received on. When 1, the LLDD
* may not deliver the CMD on the CPU it should be worked
* on. The transport should pick a cpu to schedule the work
* on.
*/
};
/**
* struct nvmet_fc_target_port - structure used between NVME-FC transport and
* a LLDD to reference a local NVME subsystem port.
* Allocated/created by the nvme_fc_register_targetport()
* transport interface.
*
* Fields with static values for the port. Initialized by the
* port_info struct supplied to the registration call.
* @port_num: NVME-FC transport subsytem port number
* @node_name: FC WWNN for the port
* @port_name: FC WWPN for the port
* @private: pointer to memory allocated alongside the local port
* structure that is specifically for the LLDD to use.
* The length of the buffer corresponds to the target_priv_sz
* value specified in the nvme_fc_target_template supplied by
* the LLDD.
*
* Fields with dynamic values. Values may change base on link state. LLDD
* may reference fields directly to change them. Initialized by the
* port_info struct supplied to the registration call.
* @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
* be set to 0.
* @port_state: Operational state of the port.
*/
struct nvmet_fc_target_port {
/* static/read-only fields */
u32 port_num;
u64 node_name;
u64 port_name;
void *private;
/* dynamic fields */
u32 port_id;
enum nvme_fc_obj_state port_state;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
/**
* struct nvmet_fc_target_template - structure containing static entrypoints
* and operational parameters for an LLDD that supports NVME
* subsystem behavior. Passed by reference in port
* registrations. NVME-FC transport remembers template
* reference and may access it during runtime operation.
*
* Subsystem/Target Transport Entrypoints/Parameters:
*
* @targetport_delete: The LLDD initiates deletion of a targetport via
* nvmet_fc_unregister_targetport(). However, the teardown is
* asynchronous. This routine is called upon the completion of the
* teardown to inform the LLDD that the targetport has been deleted.
* Entrypoint is Mandatory.
*
* @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service.
* The nvmefc_tgt_ls_req structure is the same LLDD-supplied exchange
* structure specified in the nvmet_fc_rcv_ls_req() call made when
* the LS request was received. The structure will fully describe
* the buffers for the response payload and the dma address of the
* payload. The LLDD is to transmit the response (or return a non-zero
* errno status), and upon completion of the transmit, call the
* "done" routine specified in the nvmefc_tgt_ls_req structure
* (argument to done is the ls reqwuest structure itself).
* After calling the done routine, the LLDD shall consider the
* LS handling complete and the nvmefc_tgt_ls_req structure may
* be freed/released.
* Entrypoint is Mandatory.
*
* @fcp_op: Called to perform a data transfer, transmit a response, or
* abort an FCP opertion. The nvmefc_tgt_fcp_req structure is the same
* LLDD-supplied exchange structure specified in the
* nvmet_fc_rcv_fcp_req() call made when the FCP CMD IU was received.
* The op field in the structure shall indicate the operation for
* the LLDD to perform relative to the io.
* NVMET_FCOP_READDATA operation: the LLDD is to send the
* payload data (described by sglist) to the host in 1 or
* more FC sequences (preferrably 1). Note: the fc-nvme layer
* may call the READDATA operation multiple times for longer
* payloads.
* NVMET_FCOP_WRITEDATA operation: the LLDD is to receive the
* payload data (described by sglist) from the host via 1 or
* more FC sequences (preferrably 1). The LLDD is to generate
* the XFER_RDY IU(s) corresponding to the data being requested.
* Note: the FC-NVME layer may call the WRITEDATA operation
* multiple times for longer payloads.
* NVMET_FCOP_READDATA_RSP operation: the LLDD is to send the
* payload data (described by sglist) to the host in 1 or
* more FC sequences (preferrably 1). If an error occurs during
* payload data transmission, the LLDD is to set the
* nvmefc_tgt_fcp_req fcp_error and transferred_length field, then
* consider the operation complete. On error, the LLDD is to not
* transmit the FCP_RSP iu. If all payload data is transferred
* successfully, the LLDD is to update the nvmefc_tgt_fcp_req
* transferred_length field and may subsequently transmit the
* FCP_RSP iu payload (described by rspbuf, rspdma, rsplen).
* The LLDD is to await FCP_CONF reception to confirm the RSP
* reception by the host. The LLDD may retramsit the FCP_RSP iu
* if necessary per FC-NVME. Upon reception of FCP_CONF, or upon
* FCP_CONF failure, the LLDD is to set the nvmefc_tgt_fcp_req
* fcp_error field and consider the operation complete..
* NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload
* (described by rspbuf, rspdma, rsplen). The LLDD is to await
* FCP_CONF reception to confirm the RSP reception by the host.
* The LLDD may retramsit the FCP_RSP iu if necessary per FC-NVME.
* Upon reception of FCP_CONF, or upon FCP_CONF failure, the
* LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
* consider the operation complete..
* NVMET_FCOP_ABORT: the LLDD is to terminate the exchange
* corresponding to the fcp operation. The LLDD shall send
* ABTS and follow FC exchange abort-multi rules, including
* ABTS retries and possible logout.
* Upon completing the indicated operation, the LLDD is to set the
* status fields for the operation (tranferred_length and fcp_error
* status) in the request, then all the "done" routine
* indicated in the fcp request. Upon return from the "done"
* routine for either a NVMET_FCOP_RSP or NVMET_FCOP_ABORT operation
* the fc-nvme layer will not longer reference the fcp request,
* allowing the LLDD to free/release the fcp request.
* Note: when calling the done routine for READDATA or WRITEDATA
* operations, the fc-nvme layer may immediate convert, in the same
* thread and before returning to the LLDD, the fcp operation to
* the next operation for the fcp io and call the LLDDs fcp_op
* call again. If fields in the fcp request are to be accessed post
* the done call, the LLDD should save their values prior to calling
* the done routine, and inspect the save values after the done
* routine.
* Returns 0 on success, -<errno> on failure (Ex: -EIO)
* Entrypoint is Mandatory.
*
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
*
* @max_sgl_segments: indicates the maximum number of sgl segments supported
* by the LLDD
* Value is Mandatory. Must be at least 1. Recommend at least 256.
*
* @max_dif_sgl_segments: indicates the maximum number of sgl segments
* supported by the LLDD for DIF operations.
* Value is Mandatory. Must be at least 1. Recommend at least 256.
*
* @dma_boundary: indicates the dma address boundary where dma mappings
* will be split across.
* Value is Mandatory. Typical value is 0xFFFFFFFF to split across
* 4Gig address boundarys
*
* @target_features: The LLDD sets bits in this field to correspond to
* optional features that are supported by the LLDD.
* Refer to the NVMET_FCTGTFEAT_xxx values.
* Value is Mandatory. Allowed to be zero.
*
* @target_priv_sz: The LLDD sets this field to the amount of additional
* memory that it would like fc nvme layer to allocate on the LLDD's
* behalf whenever a targetport is allocated. The additional memory
* area solely for the of the LLDD and its location is specified by
* the targetport->private pointer.
* Value is Mandatory. Allowed to be zero.
*/
struct nvmet_fc_target_template {
void (*targetport_delete)(struct nvmet_fc_target_port *tgtport);
int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_ls_req *tls_req);
int (*fcp_op)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *);
u32 max_hw_queues;
u16 max_sgl_segments;
u16 max_dif_sgl_segments;
u64 dma_boundary;
u32 target_features;
u32 target_priv_sz;
};
int nvmet_fc_register_targetport(struct nvmet_fc_port_info *portinfo,
struct nvmet_fc_target_template *template,
struct device *dev,
struct nvmet_fc_target_port **tgtport_p);
int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *tgtport);
int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_ls_req *lsreq,
void *lsreqbuf, u32 lsreqbuf_len);
int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq,
void *cmdiubuf, u32 cmdiubuf_len);
#endif /* _NVME_FC_DRIVER_H */

268
include/linux/nvme-fc.h Normal file
View File

@ -0,0 +1,268 @@
/*
* Copyright (c) 2016 Avago Technologies. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful.
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
* THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
* See the GNU General Public License for more details, a copy of which
* can be found in the file COPYING included with this package
*
*/
/*
* This file contains definitions relative to FC-NVME r1.11 and a few
* newer items
*/
#ifndef _NVME_FC_H
#define _NVME_FC_H 1
#define NVME_CMD_SCSI_ID 0xFD
#define NVME_CMD_FC_ID FC_TYPE_NVME
/* FC-NVME Cmd IU Flags */
#define FCNVME_CMD_FLAGS_DIRMASK 0x03
#define FCNVME_CMD_FLAGS_WRITE 0x01
#define FCNVME_CMD_FLAGS_READ 0x02
struct nvme_fc_cmd_iu {
__u8 scsi_id;
__u8 fc_id;
__be16 iu_len;
__u8 rsvd4[3];
__u8 flags;
__be64 connection_id;
__be32 csn;
__be32 data_len;
struct nvme_command sqe;
__be32 rsvd88[2];
};
#define NVME_FC_SIZEOF_ZEROS_RSP 12
struct nvme_fc_ersp_iu {
__u8 rsvd0[2];
__be16 iu_len;
__be32 rsn;
__be32 xfrd_len;
__be32 rsvd12;
struct nvme_completion cqe;
/* for now - no additional payload */
};
/* FC-NVME r1.03/16-119v0 NVME Link Services */
enum {
FCNVME_LS_RSVD = 0,
FCNVME_LS_RJT = 1,
FCNVME_LS_ACC = 2,
FCNVME_LS_CREATE_ASSOCIATION = 3,
FCNVME_LS_CREATE_CONNECTION = 4,
FCNVME_LS_DISCONNECT = 5,
};
/* FC-NVME r1.03/16-119v0 NVME Link Service Descriptors */
enum {
FCNVME_LSDESC_RSVD = 0x0,
FCNVME_LSDESC_RQST = 0x1,
FCNVME_LSDESC_RJT = 0x2,
FCNVME_LSDESC_CREATE_ASSOC_CMD = 0x3,
FCNVME_LSDESC_CREATE_CONN_CMD = 0x4,
FCNVME_LSDESC_DISCONN_CMD = 0x5,
FCNVME_LSDESC_CONN_ID = 0x6,
FCNVME_LSDESC_ASSOC_ID = 0x7,
};
/* ********** start of Link Service Descriptors ********** */
/*
* fills in length of a descriptor. Struture minus descriptor header
*/
static inline __be32 fcnvme_lsdesc_len(size_t sz)
{
return cpu_to_be32(sz - (2 * sizeof(u32)));
}
struct fcnvme_ls_rqst_w0 {
u8 ls_cmd; /* FCNVME_LS_xxx */
u8 zeros[3];
};
/* FCNVME_LSDESC_RQST */
struct fcnvme_lsdesc_rqst {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
struct fcnvme_ls_rqst_w0 w0;
__be32 rsvd12;
};
/* FCNVME_LSDESC_RJT */
struct fcnvme_lsdesc_rjt {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
u8 rsvd8;
/*
* Reject reason and explanaction codes are generic
* to ELs's from LS-3.
*/
u8 reason_code;
u8 reason_explanation;
u8 vendor;
__be32 rsvd12;
};
#define FCNVME_ASSOC_HOSTID_LEN 64
#define FCNVME_ASSOC_HOSTNQN_LEN 256
#define FCNVME_ASSOC_SUBNQN_LEN 256
/* FCNVME_LSDESC_CREATE_ASSOC_CMD */
struct fcnvme_lsdesc_cr_assoc_cmd {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
__be16 ersp_ratio;
__be16 rsvd10;
__be32 rsvd12[9];
__be16 cntlid;
__be16 sqsize;
__be32 rsvd52;
u8 hostid[FCNVME_ASSOC_HOSTID_LEN];
u8 hostnqn[FCNVME_ASSOC_HOSTNQN_LEN];
u8 subnqn[FCNVME_ASSOC_SUBNQN_LEN];
u8 rsvd632[384];
};
/* FCNVME_LSDESC_CREATE_CONN_CMD */
struct fcnvme_lsdesc_cr_conn_cmd {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
__be16 ersp_ratio;
__be16 rsvd10;
__be32 rsvd12[9];
__be16 qid;
__be16 sqsize;
__be32 rsvd52;
};
/* Disconnect Scope Values */
enum {
FCNVME_DISCONN_ASSOCIATION = 0,
FCNVME_DISCONN_CONNECTION = 1,
};
/* FCNVME_LSDESC_DISCONN_CMD */
struct fcnvme_lsdesc_disconn_cmd {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
u8 rsvd8[3];
/* note: scope is really a 1 bit field */
u8 scope; /* FCNVME_DISCONN_xxx */
__be32 rsvd12;
__be64 id;
};
/* FCNVME_LSDESC_CONN_ID */
struct fcnvme_lsdesc_conn_id {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
__be64 connection_id;
};
/* FCNVME_LSDESC_ASSOC_ID */
struct fcnvme_lsdesc_assoc_id {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
__be64 association_id;
};
/* r_ctl values */
enum {
FCNVME_RS_RCTL_DATA = 1,
FCNVME_RS_RCTL_XFER_RDY = 5,
FCNVME_RS_RCTL_RSP = 8,
};
/* ********** start of Link Services ********** */
/* FCNVME_LS_RJT */
struct fcnvme_ls_rjt {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_rqst rqst;
struct fcnvme_lsdesc_rjt rjt;
};
/* FCNVME_LS_ACC */
struct fcnvme_ls_acc_hdr {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_rqst rqst;
/* Followed by cmd-specific ACC descriptors, see next definitions */
};
/* FCNVME_LS_CREATE_ASSOCIATION */
struct fcnvme_ls_cr_assoc_rqst {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_cr_assoc_cmd assoc_cmd;
};
struct fcnvme_ls_cr_assoc_acc {
struct fcnvme_ls_acc_hdr hdr;
struct fcnvme_lsdesc_assoc_id associd;
struct fcnvme_lsdesc_conn_id connectid;
};
/* FCNVME_LS_CREATE_CONNECTION */
struct fcnvme_ls_cr_conn_rqst {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_assoc_id associd;
struct fcnvme_lsdesc_cr_conn_cmd connect_cmd;
};
struct fcnvme_ls_cr_conn_acc {
struct fcnvme_ls_acc_hdr hdr;
struct fcnvme_lsdesc_conn_id connectid;
};
/* FCNVME_LS_DISCONNECT */
struct fcnvme_ls_disconnect_rqst {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_assoc_id associd;
struct fcnvme_lsdesc_disconn_cmd discon_cmd;
};
struct fcnvme_ls_disconnect_acc {
struct fcnvme_ls_acc_hdr hdr;
};
/*
* Yet to be defined in FC-NVME:
*/
#define NVME_FC_CONNECT_TIMEOUT_SEC 2 /* 2 seconds */
#define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */
#define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */
#endif /* _NVME_FC_H */

View File

@ -963,6 +963,19 @@ enum {
NVME_SC_ACCESS_DENIED = 0x286,
NVME_SC_DNR = 0x4000,
/*
* FC Transport-specific error status values for NVME commands
*
* Transport-specific status code values must be in the range 0xB0..0xBF
*/
/* Generic FC failure - catchall */
NVME_SC_FC_TRANSPORT_ERROR = 0x00B0,
/* I/O failure due to FC ABTS'd */
NVME_SC_FC_TRANSPORT_ABORTED = 0x00B1,
};
struct nvme_completion {

View File

@ -27,6 +27,7 @@ typedef struct {
int match_token(char *, const match_table_t table, substring_t args[]);
int match_int(substring_t *, int *result);
int match_u64(substring_t *, u64 *result);
int match_octal(substring_t *, int *result);
int match_hex(substring_t *, int *result);
bool match_wildcard(const char *pattern, const char *str);

View File

@ -190,6 +190,7 @@ enum fc_fh_type {
FC_TYPE_FCP = 0x08, /* SCSI FCP */
FC_TYPE_CT = 0x20, /* Fibre Channel Services (FC-CT) */
FC_TYPE_ILS = 0x22, /* internal link service */
FC_TYPE_NVME = 0x28, /* FC-NVME */
};
/*
@ -203,6 +204,7 @@ enum fc_fh_type {
[FC_TYPE_FCP] = "FCP", \
[FC_TYPE_CT] = "CT", \
[FC_TYPE_ILS] = "ILS", \
[FC_TYPE_NVME] = "NVME", \
}
/*

View File

@ -151,6 +151,36 @@ static int match_number(substring_t *s, int *result, int base)
return ret;
}
/**
* match_u64int: scan a number in the given base from a substring_t
* @s: substring to be scanned
* @result: resulting u64 on success
* @base: base to use when converting string
*
* Description: Given a &substring_t and a base, attempts to parse the substring
* as a number in that base. On success, sets @result to the integer represented
* by the string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
*/
static int match_u64int(substring_t *s, u64 *result, int base)
{
char *buf;
int ret;
u64 val;
size_t len = s->to - s->from;
buf = kmalloc(len + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, s->from, len);
buf[len] = '\0';
ret = kstrtoull(buf, base, &val);
if (!ret)
*result = val;
kfree(buf);
return ret;
}
/**
* match_int: - scan a decimal representation of an integer from a substring_t
* @s: substring_t to be scanned
@ -166,6 +196,23 @@ int match_int(substring_t *s, int *result)
}
EXPORT_SYMBOL(match_int);
/**
* match_u64: - scan a decimal representation of a u64 from
* a substring_t
* @s: substring_t to be scanned
* @result: resulting unsigned long long on success
*
* Description: Attempts to parse the &substring_t @s as a long decimal
* integer. On success, sets @result to the integer represented by the
* string and returns 0.
* Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
*/
int match_u64(substring_t *s, u64 *result)
{
return match_u64int(s, result, 0);
}
EXPORT_SYMBOL(match_u64);
/**
* match_octal: - scan an octal representation of an integer from a substring_t
* @s: substring_t to be scanned