nvme updates for 5.11:

- fix a race in the nvme-tcp send code (Sagi Grimberg)
  - fix a list corruption in an nvme-rdma error path (Israel Rukshin)
  - avoid a possible double fetch in nvme-pci (Lalithambika Krishnakumar)
  - add the susystem NQN quirk for a Samsung driver (Gopal Tiwari)
  - fix two compiler warnings in nvme-fcloop (James Smart)
  - don't call sleeping functions from irq context in nvme-fc (James Smart)
  - remove an unused argument (Max Gurtovoy)
  - remove unused exports (Minwoo Im)
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl/3RXELHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYN5zw//ZRWm6Ul+iDfwjz0j1ERGEt8v4qr9dexAJ5QguPAO
 TWxAVP1B1SvzTnuMa4/LrWyjrXl1wgc84+TftfX+EFFTAS1NO89yn95mEzgVCNLH
 OV46Blswiz8hzdUPm3EngIWP5uzzmvpmgnRg87rvJsAaxETLX08VvIi/oBntHhKn
 d9oSSYRuI/NW8h2dyaXjs1qPzgM65Iof4B5rRUCXDlSE3gjAxuwHJXXVKWw5gbbL
 WtiCuTUiSzy2GMj02OgHSrZZqvp8YATdsPxuj4CoJlagWz9GPEyZnwwhl1orxStx
 Wk9PbONeM65WmmKtOpBHc8cX2UYJI7Uage887PVwbN6+7o18MQn27SqrR2/GMCxt
 L2FOAXdGqI3FkowsguU618bvxgJrEUrixbnCvfXK59OZUbCoIRSXui2oYR012zI2
 lImmG0R8x+ygZmsSk5Ne0Gi/lpl2/9O9nFnIf3IZ8NOn4JellSjIxGy8vtCo4R2O
 +ZgkIS/U1Yon/1YxzEBM5H+makQSJI7e8uTNUoihEP0CJeq+rPh4KNbki2JE8zO4
 LQ6ybwVkNqJQKjGFwPvZtRLQsmlx+G5CdeP7R3FcQS7xhgL1O7u+2PH9dZpeTtvq
 9Vd30+18Vxdda0H3yKC4t+O8XPdYkVXbrvHmlkRbFn0xxcOvJn3fJu1LsBmCzlMX
 HrE=
 =nlMY
 -----END PGP SIGNATURE-----

Merge tag 'nvme-5.11-2021-01-07' of git://git.infradead.org/nvme into block-5.11

Pull NVMe updates from Christoph:

"nvme updates for 5.11:

 - fix a race in the nvme-tcp send code (Sagi Grimberg)
 - fix a list corruption in an nvme-rdma error path (Israel Rukshin)
 - avoid a possible double fetch in nvme-pci (Lalithambika Krishnakumar)
 - add the susystem NQN quirk for a Samsung driver (Gopal Tiwari)
 - fix two compiler warnings in nvme-fcloop (James Smart)
 - don't call sleeping functions from irq context in nvme-fc (James Smart)
 - remove an unused argument (Max Gurtovoy)
 - remove unused exports (Minwoo Im)"

* tag 'nvme-5.11-2021-01-07' of git://git.infradead.org/nvme:
  nvme: remove the unused status argument from nvme_trace_bio_complete
  nvmet-rdma: Fix list_del corruption on queue establishment failure
  nvme: unexport functions with no external caller
  nvme: avoid possible double fetch in handling CQE
  nvme-tcp: Fix possible race of io_work and direct send
  nvme-pci: mark Samsung PM1725a as IGNORE_DEV_SUBNQN
  nvme-fcloop: Fix sscanf type and list_first_entry_or_null warnings
  nvme-fc: avoid calling _nvme_fc_abort_outstanding_ios from interrupt context
This commit is contained in:
Jens Axboe 2021-01-07 10:57:54 -07:00
commit 04b1ecb6a4
7 changed files with 50 additions and 21 deletions

View File

@ -179,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
int ret;
@ -192,7 +192,6 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
return ret;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{
@ -331,7 +330,7 @@ static inline void nvme_end_req(struct request *req)
req->__sector = nvme_lba_to_sect(req->q->queuedata,
le64_to_cpu(nvme_req(req)->result.u64));
nvme_trace_bio_complete(req, status);
nvme_trace_bio_complete(req);
blk_mq_end_request(req, status);
}
@ -578,7 +577,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(nvme_alloc_request);
struct request *nvme_alloc_request_qid(struct request_queue *q,
static struct request *nvme_alloc_request_qid(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
struct request *req;
@ -589,7 +588,6 @@ struct request *nvme_alloc_request_qid(struct request_queue *q,
nvme_init_request(req, cmd);
return req;
}
EXPORT_SYMBOL_GPL(nvme_alloc_request_qid);
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{

View File

@ -166,6 +166,7 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set admin_tag_set;
struct blk_mq_tag_set tag_set;
struct work_struct ioerr_work;
struct delayed_work connect_work;
struct kref ref;
@ -1888,6 +1889,15 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
}
}
static void
nvme_fc_ctrl_ioerr_work(struct work_struct *work)
{
struct nvme_fc_ctrl *ctrl =
container_of(work, struct nvme_fc_ctrl, ioerr_work);
nvme_fc_error_recovery(ctrl, "transport detected io error");
}
static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{
@ -2046,7 +2056,7 @@ done:
check_error:
if (terminate_assoc)
nvme_fc_error_recovery(ctrl, "transport detected io error");
queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}
static int
@ -3233,6 +3243,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
cancel_work_sync(&ctrl->ioerr_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
@ -3449,6 +3460,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
@ -3540,6 +3552,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
cancel_work_sync(&ctrl->ioerr_work);
cancel_work_sync(&ctrl->ctrl.reset_work);
cancel_delayed_work_sync(&ctrl->connect_work);

View File

@ -610,8 +610,6 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags);
struct request *nvme_alloc_request_qid(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd);
@ -630,7 +628,6 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
@ -675,8 +672,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
kblockd_schedule_work(&head->requeue_work);
}
static inline void nvme_trace_bio_complete(struct request *req,
blk_status_t status)
static inline void nvme_trace_bio_complete(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
@ -731,8 +727,7 @@ static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
}
static inline void nvme_trace_bio_complete(struct request *req,
blk_status_t status)
static inline void nvme_trace_bio_complete(struct request *req)
{
}
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,

View File

@ -967,6 +967,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
{
struct nvme_completion *cqe = &nvmeq->cqes[idx];
__u16 command_id = READ_ONCE(cqe->command_id);
struct request *req;
/*
@ -975,17 +976,17 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
}
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
if (unlikely(!req)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpu(cqe->sq_id));
command_id, le16_to_cpu(cqe->sq_id));
return;
}
@ -3196,7 +3197,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */

View File

@ -262,6 +262,16 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
}
}
static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
{
int ret;
/* drain the send queue as much as we can... */
do {
ret = nvme_tcp_try_send(queue);
} while (ret > 0);
}
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last)
{
@ -279,7 +289,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
if (queue->io_cpu == smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) {
queue->more_requests = !last;
nvme_tcp_try_send(queue);
nvme_tcp_send_all(queue);
queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
} else if (last) {

View File

@ -1501,7 +1501,8 @@ static ssize_t
fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int opcode, starting, amount;
unsigned int opcode;
int starting, amount;
if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
return -EBADRQC;
@ -1588,8 +1589,8 @@ out_destroy_class:
static void __exit fcloop_exit(void)
{
struct fcloop_lport *lport;
struct fcloop_nport *nport;
struct fcloop_lport *lport = NULL;
struct fcloop_nport *nport = NULL;
struct fcloop_tport *tport;
struct fcloop_rport *rport;
unsigned long flags;

View File

@ -1641,6 +1641,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
spin_lock_irqsave(&queue->state_lock, flags);
switch (queue->state) {
case NVMET_RDMA_Q_CONNECTING:
while (!list_empty(&queue->rsp_wait_list)) {
struct nvmet_rdma_rsp *rsp;
rsp = list_first_entry(&queue->rsp_wait_list,
struct nvmet_rdma_rsp,
wait_list);
list_del(&rsp->wait_list);
nvmet_rdma_put_rsp(rsp);
}
fallthrough;
case NVMET_RDMA_Q_LIVE:
queue->state = NVMET_RDMA_Q_DISCONNECTING;
disconnect = true;