[PATCH] Make RCU task_struct safe for oprofile

Applying RCU to the task structure broke oprofile, because
free_task_notify() can now be called from softirq.  This means that the
task_mortuary lock must be acquired with irq disabled in order to avoid
intermittent self-deadlock.  Since irq is now disabled, the critical
section within process_task_mortuary() has been restructured to be O(1) in
order to maximize scalability and minimize realtime latency degradation.

Kudos to Wu Fengguang for finding this problem!

CC: Wu Fengguang <wfg@mail.ustc.edu.cn>
Cc: Philippe Elie <phil.el@wanadoo.fr>
Cc: John Levon <levon@movementarian.org>
Signed-off-by: "Paul E. McKenney" <paulmck@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Paul E. McKenney 2006-01-08 01:01:35 -08:00 committed by Linus Torvalds
parent eafbaa9469
commit 4369ef3c3e

View File

@ -43,13 +43,16 @@ static void process_task_mortuary(void);
* list for processing. Only after two full buffer syncs
* does the task eventually get freed, because by then
* we are sure we will not reference it again.
* Can be invoked from softirq via RCU callback due to
* call_rcu() of the task struct, hence the _irqsave.
*/
static int task_free_notify(struct notifier_block * self, unsigned long val, void * data)
{
unsigned long flags;
struct task_struct * task = data;
spin_lock(&task_mortuary);
spin_lock_irqsave(&task_mortuary, flags);
list_add(&task->tasks, &dying_tasks);
spin_unlock(&task_mortuary);
spin_unlock_irqrestore(&task_mortuary, flags);
return NOTIFY_OK;
}
@ -431,25 +434,22 @@ static void increment_tail(struct oprofile_cpu_buffer * b)
*/
static void process_task_mortuary(void)
{
struct list_head * pos;
struct list_head * pos2;
unsigned long flags;
LIST_HEAD(local_dead_tasks);
struct task_struct * task;
struct task_struct * ttask;
spin_lock(&task_mortuary);
spin_lock_irqsave(&task_mortuary, flags);
list_for_each_safe(pos, pos2, &dead_tasks) {
task = list_entry(pos, struct task_struct, tasks);
list_splice_init(&dead_tasks, &local_dead_tasks);
list_splice_init(&dying_tasks, &dead_tasks);
spin_unlock_irqrestore(&task_mortuary, flags);
list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
list_del(&task->tasks);
free_task(task);
}
list_for_each_safe(pos, pos2, &dying_tasks) {
task = list_entry(pos, struct task_struct, tasks);
list_del(&task->tasks);
list_add_tail(&task->tasks, &dead_tasks);
}
spin_unlock(&task_mortuary);
}