mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
generic-ipi: fix the smp_mb() placement
smp_mb() is needed (to make the memory operations visible globally) before sending the ipi on the sender and the receiver (on Alpha atleast) needs smp_read_barrier_depends() in the handler before reading the call_single_queue list in a lock-free fashion. On x86, x2apic mode register accesses for sending IPI's don't have serializing semantics. So the need for smp_mb() before sending the IPI becomes more critical in x2apic mode. Remove the unnecessary smp_mb() in csd_flag_wait(), as the presence of that smp_mb() doesn't mean anything on the sender, when the ipi receiver is not doing any thing special (like memory fence) after clearing the CSD_FLAG_WAIT. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
e78042e5b8
commit
561920a0d2
18
kernel/smp.c
18
kernel/smp.c
@ -51,10 +51,6 @@ static void csd_flag_wait(struct call_single_data *data)
|
||||
{
|
||||
/* Wait for response */
|
||||
do {
|
||||
/*
|
||||
* We need to see the flags store in the IPI handler
|
||||
*/
|
||||
smp_mb();
|
||||
if (!(data->flags & CSD_FLAG_WAIT))
|
||||
break;
|
||||
cpu_relax();
|
||||
@ -76,6 +72,11 @@ static void generic_exec_single(int cpu, struct call_single_data *data)
|
||||
list_add_tail(&data->list, &dst->list);
|
||||
spin_unlock_irqrestore(&dst->lock, flags);
|
||||
|
||||
/*
|
||||
* Make the list addition visible before sending the ipi.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
if (ipi)
|
||||
arch_send_call_function_single_ipi(cpu);
|
||||
|
||||
@ -157,7 +158,7 @@ void generic_smp_call_function_single_interrupt(void)
|
||||
* Need to see other stores to list head for checking whether
|
||||
* list is empty without holding q->lock
|
||||
*/
|
||||
smp_mb();
|
||||
smp_read_barrier_depends();
|
||||
while (!list_empty(&q->list)) {
|
||||
unsigned int data_flags;
|
||||
|
||||
@ -191,7 +192,7 @@ void generic_smp_call_function_single_interrupt(void)
|
||||
/*
|
||||
* See comment on outer loop
|
||||
*/
|
||||
smp_mb();
|
||||
smp_read_barrier_depends();
|
||||
}
|
||||
}
|
||||
|
||||
@ -370,6 +371,11 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
|
||||
list_add_tail_rcu(&data->csd.list, &call_function_queue);
|
||||
spin_unlock_irqrestore(&call_function_lock, flags);
|
||||
|
||||
/*
|
||||
* Make the list addition visible before sending the ipi.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
/* Send a message to all CPUs in the map */
|
||||
arch_send_call_function_ipi(mask);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user