mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
blk-throttle: track read and write request individually
In mixed read/write workload on SSD, write latency is much lower than read. But now we only track and record read latency and then use it as threshold base for both read and write io latency accounting. As a result, write io latency will always be considered as good and bad_bio_cnt is much smaller than 20% of bio_cnt. That is to mean, the tg to be checked will be treated as idle most of the time and still let others dispatch more ios, even it is truly running under low limit and wants its low limit to be guaranteed, which is not we expected in fact. So track read and write request individually, which can bring more precise latency control for low limit idle detection. Signed-off-by: Joseph Qi <qijiang.qj@alibaba-inc.com> Reviewed-by: Shaohua Li <shli@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
a13553c777
commit
b889bf66d0
@ -216,9 +216,9 @@ struct throtl_data
|
|||||||
|
|
||||||
unsigned int scale;
|
unsigned int scale;
|
||||||
|
|
||||||
struct latency_bucket tmp_buckets[LATENCY_BUCKET_SIZE];
|
struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
|
||||||
struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
|
struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
|
||||||
struct latency_bucket __percpu *latency_buckets;
|
struct latency_bucket __percpu *latency_buckets[2];
|
||||||
unsigned long last_calculate_time;
|
unsigned long last_calculate_time;
|
||||||
unsigned long filtered_latency;
|
unsigned long filtered_latency;
|
||||||
|
|
||||||
@ -2050,10 +2050,10 @@ static void blk_throtl_update_idletime(struct throtl_grp *tg)
|
|||||||
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
||||||
static void throtl_update_latency_buckets(struct throtl_data *td)
|
static void throtl_update_latency_buckets(struct throtl_data *td)
|
||||||
{
|
{
|
||||||
struct avg_latency_bucket avg_latency[LATENCY_BUCKET_SIZE];
|
struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
|
||||||
int i, cpu;
|
int i, cpu, rw;
|
||||||
unsigned long last_latency = 0;
|
unsigned long last_latency[2] = { 0 };
|
||||||
unsigned long latency;
|
unsigned long latency[2];
|
||||||
|
|
||||||
if (!blk_queue_nonrot(td->queue))
|
if (!blk_queue_nonrot(td->queue))
|
||||||
return;
|
return;
|
||||||
@ -2062,56 +2062,67 @@ static void throtl_update_latency_buckets(struct throtl_data *td)
|
|||||||
td->last_calculate_time = jiffies;
|
td->last_calculate_time = jiffies;
|
||||||
|
|
||||||
memset(avg_latency, 0, sizeof(avg_latency));
|
memset(avg_latency, 0, sizeof(avg_latency));
|
||||||
for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
|
for (rw = READ; rw <= WRITE; rw++) {
|
||||||
struct latency_bucket *tmp = &td->tmp_buckets[i];
|
for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
|
||||||
|
struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
struct latency_bucket *bucket;
|
struct latency_bucket *bucket;
|
||||||
|
|
||||||
/* this isn't race free, but ok in practice */
|
/* this isn't race free, but ok in practice */
|
||||||
bucket = per_cpu_ptr(td->latency_buckets, cpu);
|
bucket = per_cpu_ptr(td->latency_buckets[rw],
|
||||||
tmp->total_latency += bucket[i].total_latency;
|
cpu);
|
||||||
tmp->samples += bucket[i].samples;
|
tmp->total_latency += bucket[i].total_latency;
|
||||||
bucket[i].total_latency = 0;
|
tmp->samples += bucket[i].samples;
|
||||||
bucket[i].samples = 0;
|
bucket[i].total_latency = 0;
|
||||||
}
|
bucket[i].samples = 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (tmp->samples >= 32) {
|
if (tmp->samples >= 32) {
|
||||||
int samples = tmp->samples;
|
int samples = tmp->samples;
|
||||||
|
|
||||||
latency = tmp->total_latency;
|
latency[rw] = tmp->total_latency;
|
||||||
|
|
||||||
tmp->total_latency = 0;
|
tmp->total_latency = 0;
|
||||||
tmp->samples = 0;
|
tmp->samples = 0;
|
||||||
latency /= samples;
|
latency[rw] /= samples;
|
||||||
if (latency == 0)
|
if (latency[rw] == 0)
|
||||||
continue;
|
continue;
|
||||||
avg_latency[i].latency = latency;
|
avg_latency[rw][i].latency = latency[rw];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
|
for (rw = READ; rw <= WRITE; rw++) {
|
||||||
if (!avg_latency[i].latency) {
|
for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
|
||||||
if (td->avg_buckets[i].latency < last_latency)
|
if (!avg_latency[rw][i].latency) {
|
||||||
td->avg_buckets[i].latency = last_latency;
|
if (td->avg_buckets[rw][i].latency < last_latency[rw])
|
||||||
continue;
|
td->avg_buckets[rw][i].latency =
|
||||||
|
last_latency[rw];
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!td->avg_buckets[rw][i].valid)
|
||||||
|
latency[rw] = avg_latency[rw][i].latency;
|
||||||
|
else
|
||||||
|
latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
|
||||||
|
avg_latency[rw][i].latency) >> 3;
|
||||||
|
|
||||||
|
td->avg_buckets[rw][i].latency = max(latency[rw],
|
||||||
|
last_latency[rw]);
|
||||||
|
td->avg_buckets[rw][i].valid = true;
|
||||||
|
last_latency[rw] = td->avg_buckets[rw][i].latency;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!td->avg_buckets[i].valid)
|
|
||||||
latency = avg_latency[i].latency;
|
|
||||||
else
|
|
||||||
latency = (td->avg_buckets[i].latency * 7 +
|
|
||||||
avg_latency[i].latency) >> 3;
|
|
||||||
|
|
||||||
td->avg_buckets[i].latency = max(latency, last_latency);
|
|
||||||
td->avg_buckets[i].valid = true;
|
|
||||||
last_latency = td->avg_buckets[i].latency;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
|
for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
|
||||||
throtl_log(&td->service_queue,
|
throtl_log(&td->service_queue,
|
||||||
"Latency bucket %d: latency=%ld, valid=%d", i,
|
"Latency bucket %d: read latency=%ld, read valid=%d, "
|
||||||
td->avg_buckets[i].latency, td->avg_buckets[i].valid);
|
"write latency=%ld, write valid=%d", i,
|
||||||
|
td->avg_buckets[READ][i].latency,
|
||||||
|
td->avg_buckets[READ][i].valid,
|
||||||
|
td->avg_buckets[WRITE][i].latency,
|
||||||
|
td->avg_buckets[WRITE][i].valid);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void throtl_update_latency_buckets(struct throtl_data *td)
|
static inline void throtl_update_latency_buckets(struct throtl_data *td)
|
||||||
@ -2258,16 +2269,17 @@ static void throtl_track_latency(struct throtl_data *td, sector_t size,
|
|||||||
struct latency_bucket *latency;
|
struct latency_bucket *latency;
|
||||||
int index;
|
int index;
|
||||||
|
|
||||||
if (!td || td->limit_index != LIMIT_LOW || op != REQ_OP_READ ||
|
if (!td || td->limit_index != LIMIT_LOW ||
|
||||||
|
!(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
|
||||||
!blk_queue_nonrot(td->queue))
|
!blk_queue_nonrot(td->queue))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
index = request_bucket_index(size);
|
index = request_bucket_index(size);
|
||||||
|
|
||||||
latency = get_cpu_ptr(td->latency_buckets);
|
latency = get_cpu_ptr(td->latency_buckets[op]);
|
||||||
latency[index].total_latency += time;
|
latency[index].total_latency += time;
|
||||||
latency[index].samples++;
|
latency[index].samples++;
|
||||||
put_cpu_ptr(td->latency_buckets);
|
put_cpu_ptr(td->latency_buckets[op]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_throtl_stat_add(struct request *rq, u64 time_ns)
|
void blk_throtl_stat_add(struct request *rq, u64 time_ns)
|
||||||
@ -2286,6 +2298,7 @@ void blk_throtl_bio_endio(struct bio *bio)
|
|||||||
unsigned long finish_time;
|
unsigned long finish_time;
|
||||||
unsigned long start_time;
|
unsigned long start_time;
|
||||||
unsigned long lat;
|
unsigned long lat;
|
||||||
|
int rw = bio_data_dir(bio);
|
||||||
|
|
||||||
tg = bio->bi_cg_private;
|
tg = bio->bi_cg_private;
|
||||||
if (!tg)
|
if (!tg)
|
||||||
@ -2314,7 +2327,7 @@ void blk_throtl_bio_endio(struct bio *bio)
|
|||||||
|
|
||||||
bucket = request_bucket_index(
|
bucket = request_bucket_index(
|
||||||
blk_stat_size(&bio->bi_issue_stat));
|
blk_stat_size(&bio->bi_issue_stat));
|
||||||
threshold = tg->td->avg_buckets[bucket].latency +
|
threshold = tg->td->avg_buckets[rw][bucket].latency +
|
||||||
tg->latency_target;
|
tg->latency_target;
|
||||||
if (lat > threshold)
|
if (lat > threshold)
|
||||||
tg->bad_bio_cnt++;
|
tg->bad_bio_cnt++;
|
||||||
@ -2407,9 +2420,16 @@ int blk_throtl_init(struct request_queue *q)
|
|||||||
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
|
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
|
||||||
if (!td)
|
if (!td)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
td->latency_buckets = __alloc_percpu(sizeof(struct latency_bucket) *
|
td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
|
||||||
LATENCY_BUCKET_SIZE, __alignof__(u64));
|
LATENCY_BUCKET_SIZE, __alignof__(u64));
|
||||||
if (!td->latency_buckets) {
|
if (!td->latency_buckets[READ]) {
|
||||||
|
kfree(td);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
|
||||||
|
LATENCY_BUCKET_SIZE, __alignof__(u64));
|
||||||
|
if (!td->latency_buckets[WRITE]) {
|
||||||
|
free_percpu(td->latency_buckets[READ]);
|
||||||
kfree(td);
|
kfree(td);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@ -2428,7 +2448,8 @@ int blk_throtl_init(struct request_queue *q)
|
|||||||
/* activate policy */
|
/* activate policy */
|
||||||
ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
|
ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
free_percpu(td->latency_buckets);
|
free_percpu(td->latency_buckets[READ]);
|
||||||
|
free_percpu(td->latency_buckets[WRITE]);
|
||||||
kfree(td);
|
kfree(td);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -2439,7 +2460,8 @@ void blk_throtl_exit(struct request_queue *q)
|
|||||||
BUG_ON(!q->td);
|
BUG_ON(!q->td);
|
||||||
throtl_shutdown_wq(q);
|
throtl_shutdown_wq(q);
|
||||||
blkcg_deactivate_policy(q, &blkcg_policy_throtl);
|
blkcg_deactivate_policy(q, &blkcg_policy_throtl);
|
||||||
free_percpu(q->td->latency_buckets);
|
free_percpu(q->td->latency_buckets[READ]);
|
||||||
|
free_percpu(q->td->latency_buckets[WRITE]);
|
||||||
kfree(q->td);
|
kfree(q->td);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2457,8 +2479,10 @@ void blk_throtl_register_queue(struct request_queue *q)
|
|||||||
} else {
|
} else {
|
||||||
td->throtl_slice = DFL_THROTL_SLICE_HD;
|
td->throtl_slice = DFL_THROTL_SLICE_HD;
|
||||||
td->filtered_latency = LATENCY_FILTERED_HD;
|
td->filtered_latency = LATENCY_FILTERED_HD;
|
||||||
for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
|
for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
|
||||||
td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
|
td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
|
||||||
|
td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
|
#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
|
||||||
/* if no low limit, use previous default */
|
/* if no low limit, use previous default */
|
||||||
|
Loading…
Reference in New Issue
Block a user