mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-23 18:14:04 +08:00
blk-mq: Stop using pointers for blk_mq_tags bitmap tags
Now that we use shared tags for shared sbitmap support, we don't require
the tags sbitmap pointers, so drop them.
This essentially reverts commit 222a5ae03c
("blk-mq: Use pointers for
blk_mq_tags bitmap tags").
Function blk_mq_init_bitmap_tags() is removed also, since it would be only
a wrappper for blk_mq_init_bitmaps().
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: John Garry <john.garry@huawei.com>
Link: https://lore.kernel.org/r/1633429419-228500-14-git-send-email-john.garry@huawei.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
e155b0c238
commit
ae0f1a732f
@ -6884,8 +6884,8 @@ static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
|
||||
struct blk_mq_tags *tags = hctx->sched_tags;
|
||||
unsigned int min_shallow;
|
||||
|
||||
min_shallow = bfq_update_depths(bfqd, tags->bitmap_tags);
|
||||
sbitmap_queue_min_shallow_depth(tags->bitmap_tags, min_shallow);
|
||||
min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
|
||||
sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
|
||||
}
|
||||
|
||||
static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
|
||||
|
@ -453,11 +453,11 @@ static void blk_mq_debugfs_tags_show(struct seq_file *m,
|
||||
atomic_read(&tags->active_queues));
|
||||
|
||||
seq_puts(m, "\nbitmap_tags:\n");
|
||||
sbitmap_queue_show(tags->bitmap_tags, m);
|
||||
sbitmap_queue_show(&tags->bitmap_tags, m);
|
||||
|
||||
if (tags->nr_reserved_tags) {
|
||||
seq_puts(m, "\nbreserved_tags:\n");
|
||||
sbitmap_queue_show(tags->breserved_tags, m);
|
||||
sbitmap_queue_show(&tags->breserved_tags, m);
|
||||
}
|
||||
}
|
||||
|
||||
@ -488,7 +488,7 @@ static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
|
||||
if (res)
|
||||
goto out;
|
||||
if (hctx->tags)
|
||||
sbitmap_bitmap_show(&hctx->tags->bitmap_tags->sb, m);
|
||||
sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
out:
|
||||
@ -522,7 +522,7 @@ static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
|
||||
if (res)
|
||||
goto out;
|
||||
if (hctx->sched_tags)
|
||||
sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags->sb, m);
|
||||
sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
out:
|
||||
|
@ -44,9 +44,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
|
||||
*/
|
||||
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
|
||||
{
|
||||
sbitmap_queue_wake_all(tags->bitmap_tags);
|
||||
sbitmap_queue_wake_all(&tags->bitmap_tags);
|
||||
if (include_reserve)
|
||||
sbitmap_queue_wake_all(tags->breserved_tags);
|
||||
sbitmap_queue_wake_all(&tags->breserved_tags);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -100,10 +100,10 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
|
||||
WARN_ON_ONCE(1);
|
||||
return BLK_MQ_NO_TAG;
|
||||
}
|
||||
bt = tags->breserved_tags;
|
||||
bt = &tags->breserved_tags;
|
||||
tag_offset = 0;
|
||||
} else {
|
||||
bt = tags->bitmap_tags;
|
||||
bt = &tags->bitmap_tags;
|
||||
tag_offset = tags->nr_reserved_tags;
|
||||
}
|
||||
|
||||
@ -149,9 +149,9 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
|
||||
data->ctx);
|
||||
tags = blk_mq_tags_from_data(data);
|
||||
if (data->flags & BLK_MQ_REQ_RESERVED)
|
||||
bt = tags->breserved_tags;
|
||||
bt = &tags->breserved_tags;
|
||||
else
|
||||
bt = tags->bitmap_tags;
|
||||
bt = &tags->bitmap_tags;
|
||||
|
||||
/*
|
||||
* If destination hw queue is changed, fake wake up on
|
||||
@ -185,10 +185,10 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
|
||||
const int real_tag = tag - tags->nr_reserved_tags;
|
||||
|
||||
BUG_ON(real_tag >= tags->nr_tags);
|
||||
sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu);
|
||||
sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
|
||||
} else {
|
||||
BUG_ON(tag >= tags->nr_reserved_tags);
|
||||
sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu);
|
||||
sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
|
||||
}
|
||||
}
|
||||
|
||||
@ -339,9 +339,9 @@ static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
|
||||
WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
|
||||
|
||||
if (tags->nr_reserved_tags)
|
||||
bt_tags_for_each(tags, tags->breserved_tags, fn, priv,
|
||||
bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
|
||||
flags | BT_TAG_ITER_RESERVED);
|
||||
bt_tags_for_each(tags, tags->bitmap_tags, fn, priv, flags);
|
||||
bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -458,8 +458,8 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
||||
continue;
|
||||
|
||||
if (tags->nr_reserved_tags)
|
||||
bt_for_each(hctx, tags->breserved_tags, fn, priv, true);
|
||||
bt_for_each(hctx, tags->bitmap_tags, fn, priv, false);
|
||||
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
|
||||
bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
|
||||
}
|
||||
blk_queue_exit(q);
|
||||
}
|
||||
@ -491,24 +491,6 @@ free_bitmap_tags:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
|
||||
int node, int alloc_policy)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = blk_mq_init_bitmaps(&tags->__bitmap_tags,
|
||||
&tags->__breserved_tags,
|
||||
tags->nr_tags, tags->nr_reserved_tags,
|
||||
node, alloc_policy);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
tags->bitmap_tags = &tags->__bitmap_tags;
|
||||
tags->breserved_tags = &tags->__breserved_tags;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
|
||||
unsigned int reserved_tags,
|
||||
int node, int alloc_policy)
|
||||
@ -528,7 +510,9 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
|
||||
tags->nr_reserved_tags = reserved_tags;
|
||||
spin_lock_init(&tags->lock);
|
||||
|
||||
if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) {
|
||||
if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags,
|
||||
total_tags, reserved_tags, node,
|
||||
alloc_policy) < 0) {
|
||||
kfree(tags);
|
||||
return NULL;
|
||||
}
|
||||
@ -537,8 +521,8 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
|
||||
|
||||
void blk_mq_free_tags(struct blk_mq_tags *tags)
|
||||
{
|
||||
sbitmap_queue_free(tags->bitmap_tags);
|
||||
sbitmap_queue_free(tags->breserved_tags);
|
||||
sbitmap_queue_free(&tags->bitmap_tags);
|
||||
sbitmap_queue_free(&tags->breserved_tags);
|
||||
kfree(tags);
|
||||
}
|
||||
|
||||
@ -587,7 +571,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
|
||||
* Don't need (or can't) update reserved tags here, they
|
||||
* remain static and should never need resizing.
|
||||
*/
|
||||
sbitmap_queue_resize(tags->bitmap_tags,
|
||||
sbitmap_queue_resize(&tags->bitmap_tags,
|
||||
tdepth - tags->nr_reserved_tags);
|
||||
}
|
||||
|
||||
@ -598,12 +582,12 @@ void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int s
|
||||
{
|
||||
struct blk_mq_tags *tags = set->shared_sbitmap_tags;
|
||||
|
||||
sbitmap_queue_resize(&tags->__bitmap_tags, size - set->reserved_tags);
|
||||
sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
|
||||
}
|
||||
|
||||
void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q)
|
||||
{
|
||||
sbitmap_queue_resize(q->shared_sbitmap_tags->bitmap_tags,
|
||||
sbitmap_queue_resize(&q->shared_sbitmap_tags->bitmap_tags,
|
||||
q->nr_requests - q->tag_set->reserved_tags);
|
||||
}
|
||||
|
||||
|
@ -13,11 +13,8 @@ struct blk_mq_tags {
|
||||
|
||||
atomic_t active_queues;
|
||||
|
||||
struct sbitmap_queue *bitmap_tags;
|
||||
struct sbitmap_queue *breserved_tags;
|
||||
|
||||
struct sbitmap_queue __bitmap_tags;
|
||||
struct sbitmap_queue __breserved_tags;
|
||||
struct sbitmap_queue bitmap_tags;
|
||||
struct sbitmap_queue breserved_tags;
|
||||
|
||||
struct request **rqs;
|
||||
struct request **static_rqs;
|
||||
|
@ -1071,14 +1071,14 @@ static inline unsigned int queued_to_index(unsigned int queued)
|
||||
|
||||
static bool __blk_mq_get_driver_tag(struct request *rq)
|
||||
{
|
||||
struct sbitmap_queue *bt = rq->mq_hctx->tags->bitmap_tags;
|
||||
struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
|
||||
unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
|
||||
int tag;
|
||||
|
||||
blk_mq_tag_busy(rq->mq_hctx);
|
||||
|
||||
if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
|
||||
bt = rq->mq_hctx->tags->breserved_tags;
|
||||
bt = &rq->mq_hctx->tags->breserved_tags;
|
||||
tag_offset = 0;
|
||||
} else {
|
||||
if (!hctx_may_queue(rq->mq_hctx, bt))
|
||||
@ -1121,7 +1121,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
|
||||
struct sbitmap_queue *sbq;
|
||||
|
||||
list_del_init(&wait->entry);
|
||||
sbq = hctx->tags->bitmap_tags;
|
||||
sbq = &hctx->tags->bitmap_tags;
|
||||
atomic_dec(&sbq->ws_active);
|
||||
}
|
||||
spin_unlock(&hctx->dispatch_wait_lock);
|
||||
@ -1139,7 +1139,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
|
||||
static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq)
|
||||
{
|
||||
struct sbitmap_queue *sbq = hctx->tags->bitmap_tags;
|
||||
struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
|
||||
struct wait_queue_head *wq;
|
||||
wait_queue_entry_t *wait;
|
||||
bool ret;
|
||||
|
@ -453,11 +453,11 @@ static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
|
||||
struct blk_mq_tags *tags = hctx->sched_tags;
|
||||
unsigned int shift = tags->bitmap_tags->sb.shift;
|
||||
unsigned int shift = tags->bitmap_tags.sb.shift;
|
||||
|
||||
kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
|
||||
|
||||
sbitmap_queue_min_shallow_depth(tags->bitmap_tags, kqd->async_depth);
|
||||
sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth);
|
||||
}
|
||||
|
||||
static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
||||
|
@ -567,7 +567,7 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
|
||||
|
||||
dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
|
||||
|
||||
sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
|
||||
sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
|
||||
}
|
||||
|
||||
/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
|
||||
|
Loading…
Reference in New Issue
Block a user