2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 17:53:56 +08:00

blkcg: restructure blkg_policy_data allocation in blkcg_activate_policy()

When a policy gets activated, it needs to allocate and install its
policy data on all existing blkg's (blkcg_gq's).  Because blkg
iteration is protected by a spinlock, it currently counts the total
number of blkg's in the system, allocates the matching number of
policy data on a list and installs them during a single iteration.

This can be simplified by using speculative GFP_NOWAIT allocations
while iterating and falling back to a preallocated policy data on
failure.  If the preallocated one has already been consumed, it
releases the lock, preallocate with GFP_KERNEL and then restarts the
iteration.  This can be a bit more expensive than before but policy
activation is a very cold path and shouldn't matter.

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Tejun Heo 2015-08-18 14:55:09 -07:00 committed by Jens Axboe
parent bc915e61cd
commit 4c55f4f9ad
2 changed files with 21 additions and 37 deletions

View File

@ -1047,65 +1047,52 @@ EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
int blkcg_activate_policy(struct request_queue *q,
const struct blkcg_policy *pol)
{
LIST_HEAD(pds);
struct blkg_policy_data *pd_prealloc = NULL;
struct blkcg_gq *blkg;
struct blkg_policy_data *pd, *nd;
int cnt = 0, ret;
int ret;
if (blkcg_policy_enabled(q, pol))
return 0;
/* count and allocate policy_data for all existing blkgs */
blk_queue_bypass_start(q);
spin_lock_irq(q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node)
cnt++;
spin_unlock_irq(q->queue_lock);
/* allocate per-blkg policy data for all existing blkgs */
while (cnt--) {
pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
if (!pd) {
pd_prealloc:
if (!pd_prealloc) {
pd_prealloc = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
if (!pd_prealloc) {
ret = -ENOMEM;
goto out_free;
goto out_bypass_end;
}
list_add_tail(&pd->alloc_node, &pds);
}
/*
* Install the allocated pds and cpds. With @q bypassing, no new blkg
* should have been created while the queue lock was dropped.
*/
spin_lock_irq(q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
if (WARN_ON(list_empty(&pds))) {
/* umm... this shouldn't happen, just abort */
ret = -ENOMEM;
goto out_unlock;
}
pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
list_del_init(&pd->alloc_node);
struct blkg_policy_data *pd;
/* grab blkcg lock too while installing @pd on @blkg */
spin_lock(&blkg->blkcg->lock);
if (blkg->pd[pol->plid])
continue;
pd = kzalloc_node(pol->pd_size, GFP_NOWAIT, q->node);
if (!pd)
swap(pd, pd_prealloc);
if (!pd) {
spin_unlock_irq(q->queue_lock);
goto pd_prealloc;
}
blkg->pd[pol->plid] = pd;
pd->blkg = blkg;
pd->plid = pol->plid;
pol->pd_init_fn(blkg);
spin_unlock(&blkg->blkcg->lock);
}
__set_bit(pol->plid, q->blkcg_pols);
ret = 0;
out_unlock:
spin_unlock_irq(q->queue_lock);
out_free:
out_bypass_end:
blk_queue_bypass_end(q);
list_for_each_entry_safe(pd, nd, &pds, alloc_node)
kfree(pd);
kfree(pd_prealloc);
return ret;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);

View File

@ -80,9 +80,6 @@ struct blkg_policy_data {
/* the blkg and policy id this per-policy data belongs to */
struct blkcg_gq *blkg;
int plid;
/* used during policy activation */
struct list_head alloc_node;
};
/*