x86/qspinlock-paravirt: Fix missing-prototype warning

__pv_queued_spin_unlock_slowpath() is defined in a header file as
a global function, and designed to be called from inline asm, but
there is no prototype visible in the definition:

  kernel/locking/qspinlock_paravirt.h:493:1: error: no previous \
    prototype for '__pv_queued_spin_unlock_slowpath' [-Werror=missing-prototypes]

Add this to the x86 header that contains the inline asm calling it,
and ensure this gets included before the definition, rather than
after it.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230803082619.1369127-8-arnd@kernel.org
This commit is contained in:
Arnd Bergmann 2023-08-03 10:26:19 +02:00 committed by Borislav Petkov (AMD)
parent ce0a1b608b
commit 8874a414f8
2 changed files with 12 additions and 10 deletions

View File

@ -4,6 +4,8 @@
#include <asm/ibt.h>
void __lockfunc __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked);
/*
* For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
* registers. For i386, however, only 1 32-bit register needs to be saved

View File

@ -485,6 +485,16 @@ gotlock:
return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL);
}
/*
* Include the architecture specific callee-save thunk of the
* __pv_queued_spin_unlock(). This thunk is put together with
* __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
* function close to each other sharing consecutive instruction cachelines.
* Alternatively, architecture specific version of __pv_queued_spin_unlock()
* can be defined.
*/
#include <asm/qspinlock_paravirt.h>
/*
* PV versions of the unlock fastpath and slowpath functions to be used
* instead of queued_spin_unlock().
@ -533,16 +543,6 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
pv_kick(node->cpu);
}
/*
* Include the architecture specific callee-save thunk of the
* __pv_queued_spin_unlock(). This thunk is put together with
* __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
* function close to each other sharing consecutive instruction cachelines.
* Alternatively, architecture specific version of __pv_queued_spin_unlock()
* can be defined.
*/
#include <asm/qspinlock_paravirt.h>
#ifndef __pv_queued_spin_unlock
__visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock)
{