mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 14:14:24 +08:00
[PATCH] fix workqueue oops during cpu offline
Use first_cpu(cpu_possible_map) for the single-thread workqueue case. We
used to hardcode 0, but that broke on systems where !cpu_possible(0) when
workqueue_struct->cpu_workqueue_struct was changed from a static array to
alloc_percpu.
Commit id bce61dd49d
("Fix hardcoded cpu=0 in
workqueue for per_cpu_ptr() calls") fixed that for Ben's funky sparc64
system, but it regressed my Power5. Offlining cpu 0 oopses upon the next
call to queue_work for a single-thread workqueue, because now we try to
manipulate per_cpu_ptr(wq->cpu_wq, 1), which is uninitialized.
So we need to establish an unchanging "slot" for single-thread workqueues
which will have a valid percpu allocation. Since alloc_percpu keys off of
cpu_possible_map, which must not change after initialization, make this
slot == first_cpu(cpu_possible_map).
Signed-off-by: Nathan Lynch <ntl@pobox.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
945f390f02
commit
f756d5e256
@ -29,7 +29,8 @@
|
|||||||
#include <linux/kthread.h>
|
#include <linux/kthread.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The per-CPU workqueue (if single thread, we always use cpu 0's).
|
* The per-CPU workqueue (if single thread, we always use the first
|
||||||
|
* possible cpu).
|
||||||
*
|
*
|
||||||
* The sequence counters are for flush_scheduled_work(). It wants to wait
|
* The sequence counters are for flush_scheduled_work(). It wants to wait
|
||||||
* until until all currently-scheduled works are completed, but it doesn't
|
* until until all currently-scheduled works are completed, but it doesn't
|
||||||
@ -69,6 +70,8 @@ struct workqueue_struct {
|
|||||||
static DEFINE_SPINLOCK(workqueue_lock);
|
static DEFINE_SPINLOCK(workqueue_lock);
|
||||||
static LIST_HEAD(workqueues);
|
static LIST_HEAD(workqueues);
|
||||||
|
|
||||||
|
static int singlethread_cpu;
|
||||||
|
|
||||||
/* If it's single threaded, it isn't in the list of workqueues. */
|
/* If it's single threaded, it isn't in the list of workqueues. */
|
||||||
static inline int is_single_threaded(struct workqueue_struct *wq)
|
static inline int is_single_threaded(struct workqueue_struct *wq)
|
||||||
{
|
{
|
||||||
@ -102,7 +105,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
|
|||||||
|
|
||||||
if (!test_and_set_bit(0, &work->pending)) {
|
if (!test_and_set_bit(0, &work->pending)) {
|
||||||
if (unlikely(is_single_threaded(wq)))
|
if (unlikely(is_single_threaded(wq)))
|
||||||
cpu = any_online_cpu(cpu_online_map);
|
cpu = singlethread_cpu;
|
||||||
BUG_ON(!list_empty(&work->entry));
|
BUG_ON(!list_empty(&work->entry));
|
||||||
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
|
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
|
||||||
ret = 1;
|
ret = 1;
|
||||||
@ -118,7 +121,7 @@ static void delayed_work_timer_fn(unsigned long __data)
|
|||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
if (unlikely(is_single_threaded(wq)))
|
if (unlikely(is_single_threaded(wq)))
|
||||||
cpu = any_online_cpu(cpu_online_map);
|
cpu = singlethread_cpu;
|
||||||
|
|
||||||
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
|
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
|
||||||
}
|
}
|
||||||
@ -267,7 +270,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
|
|||||||
|
|
||||||
if (is_single_threaded(wq)) {
|
if (is_single_threaded(wq)) {
|
||||||
/* Always use first cpu's area. */
|
/* Always use first cpu's area. */
|
||||||
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map)));
|
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
|
||||||
} else {
|
} else {
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
@ -325,7 +328,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
|
|||||||
lock_cpu_hotplug();
|
lock_cpu_hotplug();
|
||||||
if (singlethread) {
|
if (singlethread) {
|
||||||
INIT_LIST_HEAD(&wq->list);
|
INIT_LIST_HEAD(&wq->list);
|
||||||
p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map));
|
p = create_workqueue_thread(wq, singlethread_cpu);
|
||||||
if (!p)
|
if (!p)
|
||||||
destroy = 1;
|
destroy = 1;
|
||||||
else
|
else
|
||||||
@ -379,7 +382,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
|||||||
/* We don't need the distraction of CPUs appearing and vanishing. */
|
/* We don't need the distraction of CPUs appearing and vanishing. */
|
||||||
lock_cpu_hotplug();
|
lock_cpu_hotplug();
|
||||||
if (is_single_threaded(wq))
|
if (is_single_threaded(wq))
|
||||||
cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map));
|
cleanup_workqueue_thread(wq, singlethread_cpu);
|
||||||
else {
|
else {
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
cleanup_workqueue_thread(wq, cpu);
|
cleanup_workqueue_thread(wq, cpu);
|
||||||
@ -567,6 +570,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
|
|||||||
|
|
||||||
void init_workqueues(void)
|
void init_workqueues(void)
|
||||||
{
|
{
|
||||||
|
singlethread_cpu = first_cpu(cpu_possible_map);
|
||||||
hotcpu_notifier(workqueue_cpu_callback, 0);
|
hotcpu_notifier(workqueue_cpu_callback, 0);
|
||||||
keventd_wq = create_workqueue("events");
|
keventd_wq = create_workqueue("events");
|
||||||
BUG_ON(!keventd_wq);
|
BUG_ON(!keventd_wq);
|
||||||
|
Loading…
Reference in New Issue
Block a user