mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-17 16:14:25 +08:00
345d52c184
The commitf5bfdc8e39
("locking/osq: Use optimized spinning loop for arm64") introduced a warning from Clang because vcpu_is_preempted() is compiled away, kernel/locking/osq_lock.c:25:19: warning: unused function 'node_cpu' [-Wunused-function] static inline int node_cpu(struct optimistic_spin_node *node) ^ 1 warning generated. Fix it by converting vcpu_is_preempted() to a static inline function. Fixes:f5bfdc8e39
("locking/osq: Use optimized spinning loop for arm64") Acked-by: Waiman Long <longman@redhat.com> Signed-off-by: Qian Cai <cai@lca.pw> Signed-off-by: Will Deacon <will@kernel.org>
28 lines
601 B
C
28 lines
601 B
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
#ifndef __ASM_SPINLOCK_H
|
|
#define __ASM_SPINLOCK_H
|
|
|
|
#include <asm/qrwlock.h>
|
|
#include <asm/qspinlock.h>
|
|
|
|
/* See include/linux/spinlock.h */
|
|
#define smp_mb__after_spinlock() smp_mb()
|
|
|
|
/*
|
|
* Changing this will break osq_lock() thanks to the call inside
|
|
* smp_cond_load_relaxed().
|
|
*
|
|
* See:
|
|
* https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net
|
|
*/
|
|
#define vcpu_is_preempted vcpu_is_preempted
|
|
static inline bool vcpu_is_preempted(int cpu)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#endif /* __ASM_SPINLOCK_H */
|