mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 09:13:55 +08:00
blk-mq: get rid of the cpumask in struct blk_mq_tags
Unused now that NVMe sets up irq affinity before calling into blk-mq. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
b5af7f2ff0
commit
1b157939f9
@ -665,11 +665,6 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
|
||||
if (!tags)
|
||||
return NULL;
|
||||
|
||||
if (!zalloc_cpumask_var(&tags->cpumask, GFP_KERNEL)) {
|
||||
kfree(tags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tags->nr_tags = total_tags;
|
||||
tags->nr_reserved_tags = reserved_tags;
|
||||
|
||||
@ -680,7 +675,6 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
|
||||
{
|
||||
bt_free(&tags->bitmap_tags);
|
||||
bt_free(&tags->breserved_tags);
|
||||
free_cpumask_var(tags->cpumask);
|
||||
kfree(tags);
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,6 @@ struct blk_mq_tags {
|
||||
struct list_head page_list;
|
||||
|
||||
int alloc_policy;
|
||||
cpumask_var_t cpumask;
|
||||
};
|
||||
|
||||
|
||||
|
@ -1861,7 +1861,6 @@ static void blk_mq_map_swqueue(struct request_queue *q,
|
||||
hctx->tags = set->tags[i];
|
||||
WARN_ON(!hctx->tags);
|
||||
|
||||
cpumask_copy(hctx->tags->cpumask, hctx->cpumask);
|
||||
/*
|
||||
* Set the map size to the number of mapped software queues.
|
||||
* This is more accurate and more efficient than looping
|
||||
@ -2272,11 +2271,29 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags)
|
||||
static int blk_mq_create_mq_map(struct blk_mq_tag_set *set,
|
||||
const struct cpumask *affinity_mask)
|
||||
{
|
||||
return tags->cpumask;
|
||||
int queue = -1, cpu = 0;
|
||||
|
||||
set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
|
||||
GFP_KERNEL, set->numa_node);
|
||||
if (!set->mq_map)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!affinity_mask)
|
||||
return 0; /* map all cpus to queue 0 */
|
||||
|
||||
/* If cpus are offline, map them to first hctx */
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpumask_test_cpu(cpu, affinity_mask))
|
||||
queue++;
|
||||
if (queue >= 0)
|
||||
set->mq_map[cpu] = queue;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask);
|
||||
|
||||
/*
|
||||
* Alloc a tag set to be associated with one or more request queues.
|
||||
|
@ -201,7 +201,6 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
|
||||
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op,
|
||||
unsigned int flags, unsigned int hctx_idx);
|
||||
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
|
||||
struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags);
|
||||
|
||||
enum {
|
||||
BLK_MQ_UNIQUE_TAG_BITS = 16,
|
||||
|
Loading…
Reference in New Issue
Block a user