mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 00:34:10 +08:00
Revert "cpuset: Make cpuset hotplug synchronous"
This reverts commit a49e4629b5
("cpuset: Make cpuset hotplug synchronous") as
it may deadlock with cpu hotplug path.
Link: http://lkml.kernel.org/r/F0388D99-84D7-453B-9B6B-EEFF0E7BE4CC@lca.pw
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Qian Cai <cai@lca.pw>
Cc: Prateek Sood <prsood@codeaurora.org>
This commit is contained in:
parent
38aca3071c
commit
2b729fe7f3
@ -54,6 +54,7 @@ extern int cpuset_init(void);
|
||||
extern void cpuset_init_smp(void);
|
||||
extern void cpuset_force_rebuild(void);
|
||||
extern void cpuset_update_active_cpus(void);
|
||||
extern void cpuset_wait_for_hotplug(void);
|
||||
extern void cpuset_read_lock(void);
|
||||
extern void cpuset_read_unlock(void);
|
||||
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
|
||||
@ -175,6 +176,8 @@ static inline void cpuset_update_active_cpus(void)
|
||||
partition_sched_domains(1, NULL, NULL);
|
||||
}
|
||||
|
||||
static inline void cpuset_wait_for_hotplug(void) { }
|
||||
|
||||
static inline void cpuset_read_lock(void) { }
|
||||
static inline void cpuset_read_unlock(void) { }
|
||||
|
||||
|
@ -3101,7 +3101,7 @@ update_tasks:
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuset_hotplug - handle CPU/memory hotunplug for a cpuset
|
||||
* cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
|
||||
*
|
||||
* This function is called after either CPU or memory configuration has
|
||||
* changed and updates cpuset accordingly. The top_cpuset is always
|
||||
@ -3116,7 +3116,7 @@ update_tasks:
|
||||
* Note that CPU offlining during suspend is ignored. We don't modify
|
||||
* cpusets across suspend/resume cycles at all.
|
||||
*/
|
||||
static void cpuset_hotplug(bool use_cpu_hp_lock)
|
||||
static void cpuset_hotplug_workfn(struct work_struct *work)
|
||||
{
|
||||
static cpumask_t new_cpus;
|
||||
static nodemask_t new_mems;
|
||||
@ -3201,32 +3201,25 @@ static void cpuset_hotplug(bool use_cpu_hp_lock)
|
||||
/* rebuild sched domains if cpus_allowed has changed */
|
||||
if (cpus_updated || force_rebuild) {
|
||||
force_rebuild = false;
|
||||
if (use_cpu_hp_lock)
|
||||
rebuild_sched_domains();
|
||||
else {
|
||||
/* Acquiring cpu_hotplug_lock is not required.
|
||||
* When cpuset_hotplug() is called in hotplug path,
|
||||
* cpu_hotplug_lock is held by the hotplug context
|
||||
* which is waiting for cpuhp_thread_fun to indicate
|
||||
* completion of callback.
|
||||
*/
|
||||
percpu_down_write(&cpuset_rwsem);
|
||||
rebuild_sched_domains_locked();
|
||||
percpu_up_write(&cpuset_rwsem);
|
||||
}
|
||||
rebuild_sched_domains();
|
||||
}
|
||||
|
||||
free_cpumasks(NULL, ptmp);
|
||||
}
|
||||
|
||||
static void cpuset_hotplug_workfn(struct work_struct *work)
|
||||
{
|
||||
cpuset_hotplug(true);
|
||||
}
|
||||
|
||||
void cpuset_update_active_cpus(void)
|
||||
{
|
||||
cpuset_hotplug(false);
|
||||
/*
|
||||
* We're inside cpu hotplug critical region which usually nests
|
||||
* inside cgroup synchronization. Bounce actual hotplug processing
|
||||
* to a work item to avoid reverse locking order.
|
||||
*/
|
||||
schedule_work(&cpuset_hotplug_work);
|
||||
}
|
||||
|
||||
void cpuset_wait_for_hotplug(void)
|
||||
{
|
||||
flush_work(&cpuset_hotplug_work);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -204,6 +204,8 @@ void thaw_processes(void)
|
||||
__usermodehelper_set_disable_depth(UMH_FREEZING);
|
||||
thaw_workqueues();
|
||||
|
||||
cpuset_wait_for_hotplug();
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
for_each_process_thread(g, p) {
|
||||
/* No other threads should have PF_SUSPEND_TASK set */
|
||||
|
Loading…
Reference in New Issue
Block a user