mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-21 10:05:00 +08:00
6a6dcae8f4
The default queue mapping builder of blk_mq_map_queues doesn't take NUMA topo into account, so the built mapping is pretty bad, since CPUs belonging to different NUMA node are assigned to same queue. It is observed that IOPS drops by ~30% when running two jobs on same hctx of null_blk from two CPUs belonging to two NUMA nodes compared with from same NUMA node. Address the issue by reusing group_cpus_evenly() for building queue mapping since group_cpus_evenly() does group cpus according to CPU/NUMA locality. Also performance data becomes more stable with this given correct queue mapping is applied wrt. numa locality viewpoint, for example, on one two nodes arm64 machine with 160 cpus, node 0(cpu 0~79), node 1(cpu 80~159): 1) modprobe null_blk nr_devices=1 submit_queues=2 2) run 'fio(t/io_uring -p 0 -n 4 -r 20 /dev/nullb0)', and observe that IOPS becomes much stable on multiple tests: - unpatched: IOPS is 2.5M ~ 4.5M - patched: IOPS is 4.3M ~ 5.0M Lots of drivers may benefit from the change, such as nvme pci poll, nvme tcp, ... Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: John Garry <john.g.garry@oracle.com> Reviewed-by: Jens Axboe <axboe@kernel.dk> Link: https://lore.kernel.org/r/20221227022905.352674-7-ming.lei@redhat.com
58 lines
1.3 KiB
C
58 lines
1.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* CPU <-> hardware queue mapping helpers
|
|
*
|
|
* Copyright (C) 2013-2014 Jens Axboe
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/threads.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/group_cpus.h>
|
|
|
|
#include <linux/blk-mq.h>
|
|
#include "blk.h"
|
|
#include "blk-mq.h"
|
|
|
|
void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
|
|
{
|
|
const struct cpumask *masks;
|
|
unsigned int queue, cpu;
|
|
|
|
masks = group_cpus_evenly(qmap->nr_queues);
|
|
if (!masks) {
|
|
for_each_possible_cpu(cpu)
|
|
qmap->mq_map[cpu] = qmap->queue_offset;
|
|
return;
|
|
}
|
|
|
|
for (queue = 0; queue < qmap->nr_queues; queue++) {
|
|
for_each_cpu(cpu, &masks[queue])
|
|
qmap->mq_map[cpu] = qmap->queue_offset + queue;
|
|
}
|
|
kfree(masks);
|
|
}
|
|
EXPORT_SYMBOL_GPL(blk_mq_map_queues);
|
|
|
|
/**
|
|
* blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
|
|
* @qmap: CPU to hardware queue map.
|
|
* @index: hardware queue index.
|
|
*
|
|
* We have no quick way of doing reverse lookups. This is only used at
|
|
* queue init time, so runtime isn't important.
|
|
*/
|
|
int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
|
|
{
|
|
int i;
|
|
|
|
for_each_possible_cpu(i) {
|
|
if (index == qmap->mq_map[i])
|
|
return cpu_to_node(i);
|
|
}
|
|
|
|
return NUMA_NO_NODE;
|
|
}
|