mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 14:14:24 +08:00
block: remove debugfs blk_mq_ctx dispatched/merged/completed attributes
These were added as part of early days debugging for blk-mq, and they are not really useful anymore. Rather than spend cycles updating them, just get rid of them. As a bonus, this shrinks the per-cpu software queue size from 256b to 192b. That's a whole cacheline less. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
128459062b
commit
9a14d6ce41
@ -663,57 +663,6 @@ CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
|
||||
CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
|
||||
CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
|
||||
|
||||
static int ctx_dispatched_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = data;
|
||||
|
||||
seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = data;
|
||||
|
||||
ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
|
||||
return count;
|
||||
}
|
||||
|
||||
static int ctx_merged_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = data;
|
||||
|
||||
seq_printf(m, "%lu\n", ctx->rq_merged);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t ctx_merged_write(void *data, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = data;
|
||||
|
||||
ctx->rq_merged = 0;
|
||||
return count;
|
||||
}
|
||||
|
||||
static int ctx_completed_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = data;
|
||||
|
||||
seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t ctx_completed_write(void *data, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = data;
|
||||
|
||||
ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
|
||||
return count;
|
||||
}
|
||||
|
||||
static int blk_mq_debugfs_show(struct seq_file *m, void *v)
|
||||
{
|
||||
const struct blk_mq_debugfs_attr *attr = m->private;
|
||||
@ -803,9 +752,6 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
|
||||
{"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
|
||||
{"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
|
||||
{"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
|
||||
{"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
|
||||
{"merged", 0600, ctx_merged_show, ctx_merged_write},
|
||||
{"completed", 0600, ctx_completed_show, ctx_completed_write},
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -387,13 +387,10 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
|
||||
* potentially merge with. Currently includes a hand-wavy stop
|
||||
* count of 8, to not spend too much time checking for merges.
|
||||
*/
|
||||
if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
|
||||
ctx->rq_merged++;
|
||||
if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs))
|
||||
ret = true;
|
||||
}
|
||||
|
||||
spin_unlock(&ctx->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -359,7 +359,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
||||
rq->end_io = NULL;
|
||||
rq->end_io_data = NULL;
|
||||
|
||||
data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++;
|
||||
blk_crypto_rq_set_defaults(rq);
|
||||
INIT_LIST_HEAD(&rq->queuelist);
|
||||
/* tag was already set */
|
||||
@ -595,7 +594,6 @@ static void __blk_mq_free_request(struct request *rq)
|
||||
void blk_mq_free_request(struct request *rq)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||
|
||||
if (rq->rq_flags & (RQF_ELVPRIV | RQF_ELV)) {
|
||||
@ -609,7 +607,6 @@ void blk_mq_free_request(struct request *rq)
|
||||
}
|
||||
}
|
||||
|
||||
ctx->rq_completed[rq_is_sync(rq)]++;
|
||||
if (rq->rq_flags & RQF_MQ_INFLIGHT)
|
||||
__blk_mq_dec_active_requests(hctx);
|
||||
|
||||
|
@ -25,13 +25,6 @@ struct blk_mq_ctx {
|
||||
unsigned short index_hw[HCTX_MAX_TYPES];
|
||||
struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
|
||||
|
||||
/* incremented at dispatch time */
|
||||
unsigned long rq_dispatched[2];
|
||||
unsigned long rq_merged;
|
||||
|
||||
/* incremented at completion time */
|
||||
unsigned long ____cacheline_aligned_in_smp rq_completed[2];
|
||||
|
||||
struct request_queue *queue;
|
||||
struct blk_mq_ctxs *ctxs;
|
||||
struct kobject kobj;
|
||||
|
Loading…
Reference in New Issue
Block a user