2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 09:13:55 +08:00

nvme fixes for Linux 5.15:

- keep ctrl->namespaces ordered (me)
  - fix incorrect h2cdata pdu offset accounting in nvme-tcp
    (Sagi Grimberg)
  - handled updated hw_queues in nvme-fc more carefully (Daniel Wagner,
    James Smart)
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmFNcZkLHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYO5LQ//SD7S9poAxqPyHJ9n10hU7pz2+CcylgvKrfN/pJ68
 QEu8qDaMv8RCfyYmpkKvV+lbwa5yaX3e1E03KuGkwWF0ReLmQhMMkz32lWPdD96l
 V7/lorGc5sfOYCJXv9haTxyItegAZzTOp+h8Mdtz9hV0CJeCKQ57j0EQnCsacw4A
 c5nEg1hx7oHKWPTtPkTnmoWRcx1RndWYBpHPJY2nkRjVeLiE3I7mVFts6tDhnNwf
 zWRjY1MzhdBO4//XnXBsXnu37puJ/xhvL0WZH0lqNl6y6NVpdsEVBOFB5nwAihZP
 ThJqR4uU3ijpU/yNSuGl2VbR3klNkwTPpti1itsNyqLSowrW942aYVXFWarMBeQA
 a6hb2zht8vUM97uXtM5hVLvtUBhBHmoH4lJvxKumQ3NWsYJVUT2lQhIkVxFnDfsb
 b+Y1NicwXMrkSEquQfh+PIkCR3S7CGHz1eMB/Yr4w3yWtcNCTaMu0Go9AWR/EXjm
 mXSSWgyb+yWNPXYva3oaRItNpxNH5+EuX1dBAm+6HUT5lDdxF1KUd5g8Zo6qAVyA
 176i/lLVB0rhliNKgNwgyB4ciIgK2ICqArESR/mAMHQPtJKjDG9qShGGkw+wnPYS
 x5sL/AT8OPGvHdo83c+7csgsr0vJwdmrxOLZZ+GmqMSq9WWq1bMTJ7L2ACGwF0EY
 /Y4=
 =hiQa
 -----END PGP SIGNATURE-----

Merge tag 'nvme-5.15-2021-09-24' of git://git.infradead.org/nvme into block-5.15

Pull NVMe fixes from Christoph:

"nvme fixes for Linux 5.15:

 - keep ctrl->namespaces ordered (me)
 - fix incorrect h2cdata pdu offset accounting in nvme-tcp
   (Sagi Grimberg)
 - handled updated hw_queues in nvme-fc more carefully (Daniel Wagner,
   James Smart)"

* tag 'nvme-5.15-2021-09-24' of git://git.infradead.org/nvme:
  nvme: keep ctrl->namespaces ordered
  nvme-tcp: fix incorrect h2cdata pdu offset accounting
  nvme-fc: remove freeze/unfreeze around update_nr_hw_queues
  nvme-fc: avoid race between time out and tear down
  nvme-fc: update hardware queues before using them
This commit is contained in:
Jens Axboe 2021-09-24 07:15:21 -06:00
commit 5cad875691
3 changed files with 36 additions and 28 deletions

View File

@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/list_sort.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/pr.h>
@ -3716,15 +3715,6 @@ out_unlock:
return ret;
}
static int ns_cmp(void *priv, const struct list_head *a,
const struct list_head *b)
{
struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
return nsa->head->ns_id - nsb->head->ns_id;
}
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns, *ret = NULL;
@ -3745,6 +3735,22 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
}
EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
/*
* Add the namespace to the controller list while keeping the list ordered.
*/
static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
{
struct nvme_ns *tmp;
list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
if (tmp->head->ns_id < ns->head->ns_id) {
list_add(&ns->list, &tmp->list);
return;
}
}
list_add(&ns->list, &ns->ctrl->namespaces);
}
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
struct nvme_ns_ids *ids)
{
@ -3795,9 +3801,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
goto out_unlink_ns;
down_write(&ctrl->namespaces_rwsem);
list_add_tail(&ns->list, &ctrl->namespaces);
nvme_ns_add_to_ctrl_list(ns);
up_write(&ctrl->namespaces_rwsem);
nvme_get_ctrl(ctrl);
if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
@ -4080,10 +4085,6 @@ static void nvme_scan_work(struct work_struct *work)
if (nvme_scan_ns_list(ctrl) != 0)
nvme_scan_ns_sequential(ctrl);
mutex_unlock(&ctrl->scan_lock);
down_write(&ctrl->namespaces_rwsem);
list_sort(NULL, &ctrl->namespaces, ns_cmp);
up_write(&ctrl->namespaces_rwsem);
}
/*

View File

@ -2487,6 +2487,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
*/
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
@ -2510,6 +2511,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
* clean up the admin queue. Same thing as above.
*/
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_sync_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
@ -2951,6 +2953,13 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
if (ctrl->ctrl.queue_count == 1)
return 0;
if (prior_ioq_cnt != nr_io_queues) {
dev_info(ctrl->ctrl.device,
"reconnect: revising io queue count from %d to %d\n",
prior_ioq_cnt, nr_io_queues);
blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
}
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_free_io_queues;
@ -2959,15 +2968,6 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_delete_hw_queues;
if (prior_ioq_cnt != nr_io_queues) {
dev_info(ctrl->ctrl.device,
"reconnect: revising io queue count from %d to %d\n",
prior_ioq_cnt, nr_io_queues);
nvme_wait_freeze(&ctrl->ctrl);
blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
nvme_unfreeze(&ctrl->ctrl);
}
return 0;
out_delete_hw_queues:

View File

@ -620,7 +620,7 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
data->ttag = pdu->ttag;
data->command_id = nvme_cid(rq);
data->data_offset = cpu_to_le32(req->data_sent);
data->data_offset = pdu->r2t_offset;
data->data_length = cpu_to_le32(req->pdu_len);
return 0;
}
@ -953,7 +953,15 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
nvme_tcp_ddgst_update(queue->snd_hash, page,
offset, ret);
/* fully successful last write*/
/*
* update the request iterator except for the last payload send
* in the request where we don't want to modify it as we may
* compete with the RX path completing the request.
*/
if (req->data_sent + ret < req->data_len)
nvme_tcp_advance_req(req, ret);
/* fully successful last send in current PDU */
if (last && ret == len) {
if (queue->data_digest) {
nvme_tcp_ddgst_final(queue->snd_hash,
@ -965,7 +973,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
}
return 1;
}
nvme_tcp_advance_req(req, ret);
}
return -EAGAIN;
}