mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-16 15:34:48 +08:00
workqueue: Replace pwq_activate_inactive_work() with [__]pwq_activate_work()
[ Upstream commit4c6380305d
] To prepare for unbound nr_active handling improvements, move work activation part of pwq_activate_inactive_work() into __pwq_activate_work() and add pwq_activate_work() which tests WORK_STRUCT_INACTIVE and updates nr_active. pwq_activate_first_inactive() and try_to_grab_pending() are updated to use pwq_activate_work(). The latter conversion is functionally identical. For the former, this conversion adds an unnecessary WORK_STRUCT_INACTIVE testing. This is temporary and will be removed by the next patch. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com> Stable-dep-of:5797b1c189
("workqueue: Implement system-wide nr_active enforcement for unbound workqueues") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
bad184d26a
commit
6c592f0bb9
@ -1455,16 +1455,36 @@ static bool pwq_is_empty(struct pool_workqueue *pwq)
|
||||
return !pwq->nr_active && list_empty(&pwq->inactive_works);
|
||||
}
|
||||
|
||||
static void pwq_activate_inactive_work(struct work_struct *work)
|
||||
static void __pwq_activate_work(struct pool_workqueue *pwq,
|
||||
struct work_struct *work)
|
||||
{
|
||||
struct pool_workqueue *pwq = get_work_pwq(work);
|
||||
|
||||
trace_workqueue_activate_work(work);
|
||||
if (list_empty(&pwq->pool->worklist))
|
||||
pwq->pool->watchdog_ts = jiffies;
|
||||
move_linked_works(work, &pwq->pool->worklist, NULL);
|
||||
__clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
|
||||
}
|
||||
|
||||
/**
|
||||
* pwq_activate_work - Activate a work item if inactive
|
||||
* @pwq: pool_workqueue @work belongs to
|
||||
* @work: work item to activate
|
||||
*
|
||||
* Returns %true if activated. %false if already active.
|
||||
*/
|
||||
static bool pwq_activate_work(struct pool_workqueue *pwq,
|
||||
struct work_struct *work)
|
||||
{
|
||||
struct worker_pool *pool = pwq->pool;
|
||||
|
||||
lockdep_assert_held(&pool->lock);
|
||||
|
||||
if (!(*work_data_bits(work) & WORK_STRUCT_INACTIVE))
|
||||
return false;
|
||||
|
||||
pwq->nr_active++;
|
||||
__pwq_activate_work(pwq, work);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
|
||||
@ -1472,7 +1492,7 @@ static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
|
||||
struct work_struct *work = list_first_entry(&pwq->inactive_works,
|
||||
struct work_struct, entry);
|
||||
|
||||
pwq_activate_inactive_work(work);
|
||||
pwq_activate_work(pwq, work);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1610,8 +1630,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
||||
* management later on and cause stall. Make sure the work
|
||||
* item is activated before grabbing.
|
||||
*/
|
||||
if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
|
||||
pwq_activate_inactive_work(work);
|
||||
pwq_activate_work(pwq, work);
|
||||
|
||||
list_del_init(&work->entry);
|
||||
pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
|
||||
|
Loading…
Reference in New Issue
Block a user