mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-21 11:44:01 +08:00
x86: unify smp_call_function_mask
definition is moved to common header, x86_64 function name now is native_smp_call_function_mask Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
8678969e60
commit
64b1a21e09
@ -386,7 +386,7 @@ static int __smp_call_function_mask(cpumask_t mask,
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler or from a bottom half handler.
|
||||
*/
|
||||
int smp_call_function_mask(cpumask_t mask,
|
||||
int native_smp_call_function_mask(cpumask_t mask,
|
||||
void (*func)(void *), void *info,
|
||||
int wait)
|
||||
{
|
||||
@ -531,5 +531,6 @@ asmlinkage void smp_call_function_interrupt(void)
|
||||
|
||||
struct smp_ops smp_ops = {
|
||||
.smp_send_reschedule = native_smp_send_reschedule,
|
||||
.smp_call_function_mask = native_smp_call_function_mask,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(smp_ops);
|
||||
|
@ -28,6 +28,13 @@ static inline void smp_send_reschedule(int cpu)
|
||||
{
|
||||
smp_ops.smp_send_reschedule(cpu);
|
||||
}
|
||||
|
||||
static inline int smp_call_function_mask(cpumask_t mask,
|
||||
void (*func) (void *info), void *info,
|
||||
int wait)
|
||||
{
|
||||
return smp_ops.smp_call_function_mask(mask, func, info, wait);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@ -60,12 +60,6 @@ static inline void smp_send_stop(void)
|
||||
{
|
||||
smp_ops.smp_send_stop();
|
||||
}
|
||||
static inline int smp_call_function_mask(cpumask_t mask,
|
||||
void (*func) (void *info), void *info,
|
||||
int wait)
|
||||
{
|
||||
return smp_ops.smp_call_function_mask(mask, func, info, wait);
|
||||
}
|
||||
|
||||
void native_smp_prepare_boot_cpu(void);
|
||||
void native_smp_prepare_cpus(unsigned int max_cpus);
|
||||
|
Loading…
Reference in New Issue
Block a user