mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 21:24:08 +08:00
nvme-pci: split nvme_dev_add
Split nvme_dev_add into a helper to actually allocate the tag set, and one that just update the number of queues. Add a local variable for the tag_set to clean up the code a bit. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Keith Busch <kbusch@kernel.org> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f91b727ccf
commit
2455a4b778
@ -2533,47 +2533,45 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nvme_dev_add(struct nvme_dev *dev)
|
||||
static void nvme_pci_alloc_tag_set(struct nvme_dev *dev)
|
||||
{
|
||||
struct blk_mq_tag_set * set = &dev->tagset;
|
||||
int ret;
|
||||
|
||||
if (!dev->ctrl.tagset) {
|
||||
dev->tagset.ops = &nvme_mq_ops;
|
||||
dev->tagset.nr_hw_queues = dev->online_queues - 1;
|
||||
dev->tagset.nr_maps = 2; /* default + read */
|
||||
if (dev->io_queues[HCTX_TYPE_POLL])
|
||||
dev->tagset.nr_maps++;
|
||||
dev->tagset.timeout = NVME_IO_TIMEOUT;
|
||||
dev->tagset.numa_node = dev->ctrl.numa_node;
|
||||
dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth,
|
||||
BLK_MQ_MAX_DEPTH) - 1;
|
||||
dev->tagset.cmd_size = sizeof(struct nvme_iod);
|
||||
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
dev->tagset.driver_data = dev;
|
||||
set->ops = &nvme_mq_ops;
|
||||
set->nr_hw_queues = dev->online_queues - 1;
|
||||
set->nr_maps = 2; /* default + read */
|
||||
if (dev->io_queues[HCTX_TYPE_POLL])
|
||||
set->nr_maps++;
|
||||
set->timeout = NVME_IO_TIMEOUT;
|
||||
set->numa_node = dev->ctrl.numa_node;
|
||||
set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
|
||||
set->cmd_size = sizeof(struct nvme_iod);
|
||||
set->flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
set->driver_data = dev;
|
||||
|
||||
/*
|
||||
* Some Apple controllers requires tags to be unique
|
||||
* across admin and IO queue, so reserve the first 32
|
||||
* tags of the IO queue.
|
||||
*/
|
||||
if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
|
||||
dev->tagset.reserved_tags = NVME_AQ_DEPTH;
|
||||
/*
|
||||
* Some Apple controllers requires tags to be unique
|
||||
* across admin and IO queue, so reserve the first 32
|
||||
* tags of the IO queue.
|
||||
*/
|
||||
if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
|
||||
set->reserved_tags = NVME_AQ_DEPTH;
|
||||
|
||||
ret = blk_mq_alloc_tag_set(&dev->tagset);
|
||||
if (ret) {
|
||||
dev_warn(dev->ctrl.device,
|
||||
"IO queues tagset allocation failed %d\n", ret);
|
||||
return;
|
||||
}
|
||||
dev->ctrl.tagset = &dev->tagset;
|
||||
} else {
|
||||
blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
|
||||
|
||||
/* Free previously allocated queues that are no longer usable */
|
||||
nvme_free_queues(dev, dev->online_queues);
|
||||
ret = blk_mq_alloc_tag_set(set);
|
||||
if (ret) {
|
||||
dev_warn(dev->ctrl.device,
|
||||
"IO queues tagset allocation failed %d\n", ret);
|
||||
return;
|
||||
}
|
||||
dev->ctrl.tagset = set;
|
||||
}
|
||||
|
||||
nvme_dbbuf_set(dev);
|
||||
static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
|
||||
{
|
||||
blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
|
||||
/* free previously allocated queues that are no longer usable */
|
||||
nvme_free_queues(dev, dev->online_queues);
|
||||
}
|
||||
|
||||
static int nvme_pci_enable(struct nvme_dev *dev)
|
||||
@ -2924,7 +2922,11 @@ static void nvme_reset_work(struct work_struct *work)
|
||||
} else {
|
||||
nvme_start_queues(&dev->ctrl);
|
||||
nvme_wait_freeze(&dev->ctrl);
|
||||
nvme_dev_add(dev);
|
||||
if (!dev->ctrl.tagset)
|
||||
nvme_pci_alloc_tag_set(dev);
|
||||
else
|
||||
nvme_pci_update_nr_queues(dev);
|
||||
nvme_dbbuf_set(dev);
|
||||
nvme_unfreeze(&dev->ctrl);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user