x86/paravirt: Remove no longer needed 32-bit pvops cruft

PVOP_VCALL4() is only used for Xen PV, while PVOP_CALL4() isn't used
at all. Keep PVOP_CALL4() for 64 bits due to symmetry reasons.

This allows to remove the 32-bit definitions of those macros leading
to a substantial simplification of the paravirt macros, as those were
the only ones needing non-empty "pre" and "post" parameters.

PVOP_CALLEE2() and PVOP_VCALLEE2() are used nowhere, so remove them.

Another no longer needed case is special handling of return types
larger than unsigned long. Replace that with a BUILD_BUG_ON().

DISABLE_INTERRUPTS() is used in 32-bit code only, so it can just be
replaced by cli.

INTERRUPT_RETURN in 32-bit code can be replaced by iret.

ENABLE_INTERRUPTS is used nowhere, so it can be removed.

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210311142319.4723-10-jgross@suse.com
This commit is contained in:
Juergen Gross 2021-03-11 15:23:14 +01:00 committed by Borislav Petkov
parent 4e6292114c
commit 33634e42e3
5 changed files with 35 additions and 123 deletions

View File

@ -430,7 +430,7 @@
* will soon execute iret and the tracer was already set to * will soon execute iret and the tracer was already set to
* the irqstate after the IRET: * the irqstate after the IRET:
*/ */
DISABLE_INTERRUPTS(CLBR_ANY) cli
lss (%esp), %esp /* switch to espfix segment */ lss (%esp), %esp /* switch to espfix segment */
.Lend_\@: .Lend_\@:
#endif /* CONFIG_X86_ESPFIX32 */ #endif /* CONFIG_X86_ESPFIX32 */
@ -1077,7 +1077,7 @@ restore_all_switch_stack:
* when returning from IPI handler and when returning from * when returning from IPI handler and when returning from
* scheduler to user-space. * scheduler to user-space.
*/ */
INTERRUPT_RETURN iret
.section .fixup, "ax" .section .fixup, "ax"
SYM_CODE_START(asm_iret_error) SYM_CODE_START(asm_iret_error)

View File

@ -109,9 +109,6 @@ static __always_inline unsigned long arch_local_irq_save(void)
} }
#else #else
#define ENABLE_INTERRUPTS(x) sti
#define DISABLE_INTERRUPTS(x) cli
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#ifdef CONFIG_DEBUG_ENTRY #ifdef CONFIG_DEBUG_ENTRY
#define SAVE_FLAGS(x) pushfq; popq %rax #define SAVE_FLAGS(x) pushfq; popq %rax
@ -119,8 +116,6 @@ static __always_inline unsigned long arch_local_irq_save(void)
#define INTERRUPT_RETURN jmp native_iret #define INTERRUPT_RETURN jmp native_iret
#else
#define INTERRUPT_RETURN iret
#endif #endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */

View File

@ -719,6 +719,7 @@ extern void default_banner(void);
.if ((~(set)) & mask); pop %reg; .endif .if ((~(set)) & mask); pop %reg; .endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT_XXL
#define PV_SAVE_REGS(set) \ #define PV_SAVE_REGS(set) \
COND_PUSH(set, CLBR_RAX, rax); \ COND_PUSH(set, CLBR_RAX, rax); \
@ -744,46 +745,12 @@ extern void default_banner(void);
#define PARA_PATCH(off) ((off) / 8) #define PARA_PATCH(off) ((off) / 8)
#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8) #define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8)
#define PARA_INDIRECT(addr) *addr(%rip) #define PARA_INDIRECT(addr) *addr(%rip)
#else
#define PV_SAVE_REGS(set) \
COND_PUSH(set, CLBR_EAX, eax); \
COND_PUSH(set, CLBR_EDI, edi); \
COND_PUSH(set, CLBR_ECX, ecx); \
COND_PUSH(set, CLBR_EDX, edx)
#define PV_RESTORE_REGS(set) \
COND_POP(set, CLBR_EDX, edx); \
COND_POP(set, CLBR_ECX, ecx); \
COND_POP(set, CLBR_EDI, edi); \
COND_POP(set, CLBR_EAX, eax)
#define PARA_PATCH(off) ((off) / 4)
#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .long, 4)
#define PARA_INDIRECT(addr) *%cs:addr
#endif
#ifdef CONFIG_PARAVIRT_XXL
#define INTERRUPT_RETURN \ #define INTERRUPT_RETURN \
PARA_SITE(PARA_PATCH(PV_CPU_iret), \ PARA_SITE(PARA_PATCH(PV_CPU_iret), \
ANNOTATE_RETPOLINE_SAFE; \ ANNOTATE_RETPOLINE_SAFE; \
jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);) jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
#define DISABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
ANNOTATE_RETPOLINE_SAFE; \
call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable); \
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#define ENABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable), \
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
ANNOTATE_RETPOLINE_SAFE; \
call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable); \
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#endif
#ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT_XXL
#ifdef CONFIG_DEBUG_ENTRY #ifdef CONFIG_DEBUG_ENTRY
#define SAVE_FLAGS(clobbers) \ #define SAVE_FLAGS(clobbers) \
PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \ PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \

View File

@ -470,55 +470,34 @@ int paravirt_disable_iospace(void);
}) })
#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \ #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, ...) \
pre, post, ...) \
({ \ ({ \
rettype __ret; \
PVOP_CALL_ARGS; \ PVOP_CALL_ARGS; \
PVOP_TEST_NULL(op); \ PVOP_TEST_NULL(op); \
/* This is 32-bit specific, but is okay in 64-bit */ \ BUILD_BUG_ON(sizeof(rettype) > sizeof(unsigned long)); \
/* since this condition will never hold */ \ asm volatile(paravirt_alt(PARAVIRT_CALL) \
if (sizeof(rettype) > sizeof(unsigned long)) { \ : call_clbr, ASM_CALL_CONSTRAINT \
asm volatile(pre \ : paravirt_type(op), \
paravirt_alt(PARAVIRT_CALL) \ paravirt_clobber(clbr), \
post \ ##__VA_ARGS__ \
: call_clbr, ASM_CALL_CONSTRAINT \ : "memory", "cc" extra_clbr); \
: paravirt_type(op), \ (rettype)(__eax & PVOP_RETMASK(rettype)); \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
: "memory", "cc" extra_clbr); \
__ret = (rettype)((((u64)__edx) << 32) | __eax); \
} else { \
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
: "memory", "cc" extra_clbr); \
__ret = (rettype)(__eax & PVOP_RETMASK(rettype)); \
} \
__ret; \
}) })
#define __PVOP_CALL(rettype, op, pre, post, ...) \ #define __PVOP_CALL(rettype, op, ...) \
____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \ ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__) EXTRA_CLOBBERS, ##__VA_ARGS__)
#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \ #define __PVOP_CALLEESAVE(rettype, op, ...) \
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
PVOP_CALLEE_CLOBBERS, , \ PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__)
pre, post, ##__VA_ARGS__)
#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \ #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, ...) \
({ \ ({ \
PVOP_VCALL_ARGS; \ PVOP_VCALL_ARGS; \
PVOP_TEST_NULL(op); \ PVOP_TEST_NULL(op); \
asm volatile(pre \ asm volatile(paravirt_alt(PARAVIRT_CALL) \
paravirt_alt(PARAVIRT_CALL) \
post \
: call_clbr, ASM_CALL_CONSTRAINT \ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \ : paravirt_type(op), \
paravirt_clobber(clbr), \ paravirt_clobber(clbr), \
@ -526,84 +505,57 @@ int paravirt_disable_iospace(void);
: "memory", "cc" extra_clbr); \ : "memory", "cc" extra_clbr); \
}) })
#define __PVOP_VCALL(op, pre, post, ...) \ #define __PVOP_VCALL(op, ...) \
____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \ ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
VEXTRA_CLOBBERS, \ VEXTRA_CLOBBERS, ##__VA_ARGS__)
pre, post, ##__VA_ARGS__)
#define __PVOP_VCALLEESAVE(op, pre, post, ...) \ #define __PVOP_VCALLEESAVE(op, ...) \
____PVOP_VCALL(op.func, CLBR_RET_REG, \ ____PVOP_VCALL(op.func, CLBR_RET_REG, \
PVOP_VCALLEE_CLOBBERS, , \ PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__)
pre, post, ##__VA_ARGS__)
#define PVOP_CALL0(rettype, op) \ #define PVOP_CALL0(rettype, op) \
__PVOP_CALL(rettype, op, "", "") __PVOP_CALL(rettype, op)
#define PVOP_VCALL0(op) \ #define PVOP_VCALL0(op) \
__PVOP_VCALL(op, "", "") __PVOP_VCALL(op)
#define PVOP_CALLEE0(rettype, op) \ #define PVOP_CALLEE0(rettype, op) \
__PVOP_CALLEESAVE(rettype, op, "", "") __PVOP_CALLEESAVE(rettype, op)
#define PVOP_VCALLEE0(op) \ #define PVOP_VCALLEE0(op) \
__PVOP_VCALLEESAVE(op, "", "") __PVOP_VCALLEESAVE(op)
#define PVOP_CALL1(rettype, op, arg1) \ #define PVOP_CALL1(rettype, op, arg1) \
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1))
#define PVOP_VCALL1(op, arg1) \ #define PVOP_VCALL1(op, arg1) \
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1)) __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1))
#define PVOP_CALLEE1(rettype, op, arg1) \ #define PVOP_CALLEE1(rettype, op, arg1) \
__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) __PVOP_CALLEESAVE(rettype, op, PVOP_CALL_ARG1(arg1))
#define PVOP_VCALLEE1(op, arg1) \ #define PVOP_VCALLEE1(op, arg1) \
__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1)) __PVOP_VCALLEESAVE(op, PVOP_CALL_ARG1(arg1))
#define PVOP_CALL2(rettype, op, arg1, arg2) \ #define PVOP_CALL2(rettype, op, arg1, arg2) \
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
PVOP_CALL_ARG2(arg2))
#define PVOP_VCALL2(op, arg1, arg2) \ #define PVOP_VCALL2(op, arg1, arg2) \
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
PVOP_CALL_ARG2(arg2))
#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2))
#define PVOP_VCALLEE2(op, arg1, arg2) \
__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2))
#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
#define PVOP_VCALL3(op, arg1, arg2, arg3) \ #define PVOP_VCALL3(op, arg1, arg2, arg3) \
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
/* This is the only difference in x86_64. We can make it much simpler */
#ifdef CONFIG_X86_32
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
__PVOP_CALL(rettype, op, \ __PVOP_CALL(rettype, op, \
"push %[_arg4];", "lea 4(%%esp),%%esp;", \
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
__PVOP_VCALL(op, \
"push %[_arg4];", "lea 4(%%esp),%%esp;", \
"0" ((u32)(arg1)), "1" ((u32)(arg2)), \
"2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
#else
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
__PVOP_CALL(rettype, op, "", "", \
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
__PVOP_VCALL(op, "", "", \ __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
#endif
/* Lazy mode for batching updates / context switch */ /* Lazy mode for batching updates / context switch */
enum paravirt_lazy_mode { enum paravirt_lazy_mode {

View File

@ -63,8 +63,6 @@ static void __used common(void)
#ifdef CONFIG_PARAVIRT_XXL #ifdef CONFIG_PARAVIRT_XXL
BLANK(); BLANK();
OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable);
OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable);
OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret); OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret);
#endif #endif