mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-12 05:48:39 +08:00
cfq: async queue allocation per priority
If we have two processes with different ioprio_class, but the same ioprio_data, their async requests will fall into the same queue. I guess such behavior is not expected, because it's not right to put real-time requests and best-effort requests in the same queue. The attached patch fixes the problem by introducing additional *cfqq fields on cfqd, pointing to per-(class,priority) async queues. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
9a79b22741
commit
c2dea2d1fd
@ -92,7 +92,11 @@ struct cfq_data {
|
||||
struct cfq_queue *active_queue;
|
||||
struct cfq_io_context *active_cic;
|
||||
|
||||
struct cfq_queue *async_cfqq[IOPRIO_BE_NR];
|
||||
/*
|
||||
* async queue for each priority case
|
||||
*/
|
||||
struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
|
||||
struct cfq_queue *async_idle_cfqq;
|
||||
|
||||
struct timer_list idle_class_timer;
|
||||
|
||||
@ -1414,24 +1418,44 @@ out:
|
||||
return cfqq;
|
||||
}
|
||||
|
||||
static struct cfq_queue **
|
||||
cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
|
||||
{
|
||||
switch(ioprio_class) {
|
||||
case IOPRIO_CLASS_RT:
|
||||
return &cfqd->async_cfqq[0][ioprio];
|
||||
case IOPRIO_CLASS_BE:
|
||||
return &cfqd->async_cfqq[1][ioprio];
|
||||
case IOPRIO_CLASS_IDLE:
|
||||
return &cfqd->async_idle_cfqq;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static struct cfq_queue *
|
||||
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
const int ioprio = task_ioprio(tsk);
|
||||
const int ioprio_class = task_ioprio_class(tsk);
|
||||
struct cfq_queue **async_cfqq = NULL;
|
||||
struct cfq_queue *cfqq = NULL;
|
||||
|
||||
if (!is_sync)
|
||||
cfqq = cfqd->async_cfqq[ioprio];
|
||||
if (!is_sync) {
|
||||
async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
|
||||
cfqq = *async_cfqq;
|
||||
}
|
||||
|
||||
if (!cfqq)
|
||||
cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
|
||||
|
||||
/*
|
||||
* pin the queue now that it's allocated, scheduler exit will prune it
|
||||
*/
|
||||
if (!is_sync && !cfqd->async_cfqq[ioprio]) {
|
||||
if (!is_sync && !(*async_cfqq)) {
|
||||
atomic_inc(&cfqq->ref);
|
||||
cfqd->async_cfqq[ioprio] = cfqq;
|
||||
*async_cfqq = cfqq;
|
||||
}
|
||||
|
||||
atomic_inc(&cfqq->ref);
|
||||
@ -2042,11 +2066,24 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
|
||||
blk_sync_queue(cfqd->queue);
|
||||
}
|
||||
|
||||
static void cfq_put_async_queues(struct cfq_data *cfqd)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IOPRIO_BE_NR; i++) {
|
||||
if (cfqd->async_cfqq[0][i])
|
||||
cfq_put_queue(cfqd->async_cfqq[0][i]);
|
||||
if (cfqd->async_cfqq[1][i])
|
||||
cfq_put_queue(cfqd->async_cfqq[1][i]);
|
||||
if (cfqd->async_idle_cfqq)
|
||||
cfq_put_queue(cfqd->async_idle_cfqq);
|
||||
}
|
||||
}
|
||||
|
||||
static void cfq_exit_queue(elevator_t *e)
|
||||
{
|
||||
struct cfq_data *cfqd = e->elevator_data;
|
||||
request_queue_t *q = cfqd->queue;
|
||||
int i;
|
||||
|
||||
cfq_shutdown_timer_wq(cfqd);
|
||||
|
||||
@ -2063,12 +2100,7 @@ static void cfq_exit_queue(elevator_t *e)
|
||||
__cfq_exit_single_io_context(cfqd, cic);
|
||||
}
|
||||
|
||||
/*
|
||||
* Put the async queues
|
||||
*/
|
||||
for (i = 0; i < IOPRIO_BE_NR; i++)
|
||||
if (cfqd->async_cfqq[i])
|
||||
cfq_put_queue(cfqd->async_cfqq[i]);
|
||||
cfq_put_async_queues(cfqd);
|
||||
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
|
@ -53,6 +53,14 @@ static inline int task_ioprio(struct task_struct *task)
|
||||
return IOPRIO_NORM;
|
||||
}
|
||||
|
||||
static inline int task_ioprio_class(struct task_struct *task)
|
||||
{
|
||||
if (ioprio_valid(task->ioprio))
|
||||
return IOPRIO_PRIO_CLASS(task->ioprio);
|
||||
|
||||
return IOPRIO_CLASS_BE;
|
||||
}
|
||||
|
||||
static inline int task_nice_ioprio(struct task_struct *task)
|
||||
{
|
||||
return (task_nice(task) + 20) / 5;
|
||||
|
Loading…
Reference in New Issue
Block a user