mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 05:34:13 +08:00
nvme-pci: report the actual number of tagset maps
We've been reporting 2 maps regardless of whether the module parameter asked for anything beyond the default queues. A consequence of this means that blk-mq will reinitialize the all the hardware contexts and io schedulers on every controller reset when the mapping is exactly the same as before. This unnecessary overhead is adding several milliseconds on a reset for environments that don't need it. Report the actual number of mappings in use. Signed-off-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
61ce339f19
commit
6ee742fa8e
@ -2526,9 +2526,11 @@ static void nvme_pci_alloc_tag_set(struct nvme_dev *dev)
|
||||
|
||||
set->ops = &nvme_mq_ops;
|
||||
set->nr_hw_queues = dev->online_queues - 1;
|
||||
set->nr_maps = 2; /* default + read */
|
||||
set->nr_maps = 1;
|
||||
if (dev->io_queues[HCTX_TYPE_READ])
|
||||
set->nr_maps = 2;
|
||||
if (dev->io_queues[HCTX_TYPE_POLL])
|
||||
set->nr_maps++;
|
||||
set->nr_maps = 3;
|
||||
set->timeout = NVME_IO_TIMEOUT;
|
||||
set->numa_node = dev->ctrl.numa_node;
|
||||
set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
|
||||
|
Loading…
Reference in New Issue
Block a user