From 59dc5bfca0cb6a29db1a50847684eb5c19f8f400 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 18 Jun 2021 01:51:03 +1000 Subject: [PATCH] powerpc/64s: avoid reloading (H)SRR registers if they are still valid When an interrupt is taken, the SRR registers are set to return to where it left off. Unless they are modified in the meantime, or the return address or MSR are modified, there is no need to reload these registers when returning from interrupt. Introduce per-CPU flags that track the validity of SRR and HSRR registers. These are cleared when returning from interrupt, when using the registers for something else (e.g., OPAL calls), when adjusting the return address or MSR of a context, and when context switching (which changes the return address and MSR). This improves the performance of interrupt returns. Signed-off-by: Nicholas Piggin [mpe: Fold in fixup patch from Nick] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20210617155116.2167984-5-npiggin@gmail.com --- arch/powerpc/Kconfig.debug | 4 + arch/powerpc/include/asm/hw_irq.h | 10 +- arch/powerpc/include/asm/interrupt.h | 14 ++- arch/powerpc/include/asm/livepatch.h | 2 +- arch/powerpc/include/asm/paca.h | 4 + arch/powerpc/include/asm/probes.h | 4 +- arch/powerpc/include/asm/ptrace.h | 52 +++++++++-- arch/powerpc/kernel/asm-offsets.c | 4 + arch/powerpc/kernel/entry_64.S | 92 +++++++++++++++++-- arch/powerpc/kernel/exceptions-64s.S | 27 ++++++ arch/powerpc/kernel/fpu.S | 4 + arch/powerpc/kernel/hw_breakpoint.c | 4 +- arch/powerpc/kernel/kgdb.c | 10 +- arch/powerpc/kernel/kprobes-ftrace.c | 4 +- arch/powerpc/kernel/kprobes.c | 23 +++-- arch/powerpc/kernel/mce.c | 2 +- arch/powerpc/kernel/optprobes.c | 2 +- arch/powerpc/kernel/process.c | 42 +++++---- arch/powerpc/kernel/prom_init.c | 3 + arch/powerpc/kernel/ptrace/ptrace-adv.c | 20 ++-- arch/powerpc/kernel/ptrace/ptrace-noadv.c | 14 +-- arch/powerpc/kernel/ptrace/ptrace-view.c | 5 +- arch/powerpc/kernel/rtas.c | 14 ++- arch/powerpc/kernel/signal.c | 12 +-- arch/powerpc/kernel/signal_32.c | 40 ++++---- arch/powerpc/kernel/signal_64.c | 30 +++--- arch/powerpc/kernel/syscalls.c | 3 +- arch/powerpc/kernel/traps.c | 42 ++++----- arch/powerpc/kernel/uprobes.c | 4 +- arch/powerpc/kernel/vector.S | 6 ++ arch/powerpc/kvm/book3s_hv.c | 3 + arch/powerpc/kvm/book3s_pr.c | 2 + arch/powerpc/lib/error-inject.c | 2 +- arch/powerpc/lib/sstep.c | 17 ++-- arch/powerpc/lib/test_emulate_step.c | 1 + arch/powerpc/math-emu/math.c | 2 +- arch/powerpc/math-emu/math_efp.c | 2 +- arch/powerpc/platforms/embedded6xx/holly.c | 4 +- .../platforms/embedded6xx/mpc7448_hpc2.c | 4 +- arch/powerpc/platforms/pasemi/idle.c | 4 +- arch/powerpc/platforms/powernv/opal-call.c | 4 + arch/powerpc/platforms/powernv/opal.c | 2 +- arch/powerpc/platforms/pseries/hvCall.S | 29 ++++++ arch/powerpc/platforms/pseries/ras.c | 4 +- arch/powerpc/sysdev/fsl_pci.c | 2 +- arch/powerpc/sysdev/fsl_rio.c | 4 +- arch/powerpc/xmon/xmon.c | 14 +-- 47 files changed, 418 insertions(+), 179 deletions(-) diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 6342f9da4545..3a8ce31a2ae6 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -85,6 +85,10 @@ config MSI_BITMAP_SELFTEST config PPC_IRQ_SOFT_MASK_DEBUG bool "Include extra checks for powerpc irq soft masking" +config PPC_RFI_SRR_DEBUG + bool "Include extra checks for RFI SRR register validity" + depends on PPC_BOOK3S_64 + config XMON bool "Include xmon kernel debugger" depends on DEBUG_KERNEL diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 56a98936a6a9..19bcef666cf6 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -389,7 +389,15 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) return !(regs->msr & MSR_EE); } -static inline void may_hard_irq_enable(void) { } +static inline bool may_hard_irq_enable(void) +{ + return false; +} + +static inline void do_hard_irq_enable(void) +{ + BUILD_BUG(); +} static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) { diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 59f704408d65..de36fb5d9c51 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -73,13 +73,25 @@ #include #include +#ifdef CONFIG_PPC_BOOK3S_64 +static inline void srr_regs_clobbered(void) +{ + local_paca->srr_valid = 0; + local_paca->hsrr_valid = 0; +} +#else +static inline void srr_regs_clobbered(void) +{ +} +#endif + static inline void nap_adjust_return(struct pt_regs *regs) { #ifdef CONFIG_PPC_970_NAP if (unlikely(test_thread_local_flags(_TLF_NAPPING))) { /* Can avoid a test-and-clear because NMIs do not call this */ clear_thread_local_flags(_TLF_NAPPING); - regs->nip = (unsigned long)power4_idle_nap_return; + regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return); } #endif } diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h index ae25e6e72997..4fe018cc207b 100644 --- a/arch/powerpc/include/asm/livepatch.h +++ b/arch/powerpc/include/asm/livepatch.h @@ -16,7 +16,7 @@ static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip) { struct pt_regs *regs = ftrace_get_regs(fregs); - regs->nip = ip; + regs_set_return_ip(regs, ip); } #define klp_get_ftrace_location klp_get_ftrace_location diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index ecc8d792a431..f5f0f3408047 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -167,6 +167,10 @@ struct paca_struct { u64 saved_msr; /* MSR saved here by enter_rtas */ #ifdef CONFIG_PPC_BOOK3E u16 trap_save; /* Used when bad stack is encountered */ +#endif +#ifdef CONFIG_PPC_BOOK3S_64 + u8 hsrr_valid; /* HSRRs set for HRFID */ + u8 srr_valid; /* SRRs set for RFID */ #endif u8 irq_soft_mask; /* mask for irq soft masking */ u8 irq_happened; /* irq happened while soft-disabled */ diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h index 84dd1addd434..c5d984700d24 100644 --- a/arch/powerpc/include/asm/probes.h +++ b/arch/powerpc/include/asm/probes.h @@ -34,14 +34,14 @@ typedef u32 ppc_opcode_t; /* Enable single stepping for the current task */ static inline void enable_single_step(struct pt_regs *regs) { - regs->msr |= MSR_SINGLESTEP; + regs_set_return_msr(regs, regs->msr | MSR_SINGLESTEP); #ifdef CONFIG_PPC_ADV_DEBUG_REGS /* * We turn off Critical Input Exception(CE) to ensure that the single * step will be for the instruction we have the probe on; if we don't, * it is possible we'd get the single step reported for CE. */ - regs->msr &= ~MSR_CE; + regs_set_return_msr(regs, regs->msr & ~MSR_CE); mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); #ifdef CONFIG_PPC_47x isync(); diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index e06d61b668d4..5d69dafd80ad 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -123,6 +123,47 @@ struct pt_regs #endif /* __powerpc64__ */ #ifndef __ASSEMBLY__ +#include + +#ifdef CONFIG_SMP +extern unsigned long profile_pc(struct pt_regs *regs); +#else +#define profile_pc(regs) instruction_pointer(regs) +#endif + +long do_syscall_trace_enter(struct pt_regs *regs); +void do_syscall_trace_leave(struct pt_regs *regs); + +static inline void regs_set_return_ip(struct pt_regs *regs, unsigned long ip) +{ + regs->nip = ip; +#ifdef CONFIG_PPC_BOOK3S_64 + local_paca->hsrr_valid = 0; + local_paca->srr_valid = 0; +#endif +} + +static inline void regs_set_return_msr(struct pt_regs *regs, unsigned long msr) +{ + regs->msr = msr; +#ifdef CONFIG_PPC_BOOK3S_64 + local_paca->hsrr_valid = 0; + local_paca->srr_valid = 0; +#endif +} + +static inline void set_return_regs_changed(void) +{ +#ifdef CONFIG_PPC_BOOK3S_64 + local_paca->hsrr_valid = 0; + local_paca->srr_valid = 0; +#endif +} + +static inline void regs_add_return_ip(struct pt_regs *regs, long offset) +{ + regs_set_return_ip(regs, regs->nip + offset); +} static inline unsigned long instruction_pointer(struct pt_regs *regs) { @@ -132,7 +173,7 @@ static inline unsigned long instruction_pointer(struct pt_regs *regs) static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long val) { - regs->nip = val; + regs_set_return_ip(regs, val); } static inline unsigned long user_stack_pointer(struct pt_regs *regs) @@ -145,15 +186,6 @@ static inline unsigned long frame_pointer(struct pt_regs *regs) return 0; } -#ifdef CONFIG_SMP -extern unsigned long profile_pc(struct pt_regs *regs); -#else -#define profile_pc(regs) instruction_pointer(regs) -#endif - -long do_syscall_trace_enter(struct pt_regs *regs); -void do_syscall_trace_leave(struct pt_regs *regs); - #ifdef __powerpc64__ #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1) #else diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 31881ef3641d..69a2885adfe9 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -190,6 +190,10 @@ int main(void) OFFSET(PACATOC, paca_struct, kernel_toc); OFFSET(PACAKBASE, paca_struct, kernelbase); OFFSET(PACAKMSR, paca_struct, kernel_msr); +#ifdef CONFIG_PPC_BOOK3S_64 + OFFSET(PACAHSRR_VALID, paca_struct, hsrr_valid); + OFFSET(PACASRR_VALID, paca_struct, srr_valid); +#endif OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask); OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened); OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled); diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 03a45a88b4b8..9a1d5e5599d3 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -64,6 +64,30 @@ exception_marker: .section ".text" .align 7 +.macro DEBUG_SRR_VALID srr +#ifdef CONFIG_PPC_RFI_SRR_DEBUG + .ifc \srr,srr + mfspr r11,SPRN_SRR0 + ld r12,_NIP(r1) +100: tdne r11,r12 + EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) + mfspr r11,SPRN_SRR1 + ld r12,_MSR(r1) +100: tdne r11,r12 + EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) + .else + mfspr r11,SPRN_HSRR0 + ld r12,_NIP(r1) +100: tdne r11,r12 + EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) + mfspr r11,SPRN_HSRR1 + ld r12,_MSR(r1) +100: tdne r11,r12 + EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) + .endif +#endif +.endm + #ifdef CONFIG_PPC_BOOK3S .macro system_call_vectored name trapnr .globl system_call_vectored_\name @@ -286,6 +310,11 @@ END_BTB_FLUSH_SECTION ld r11,exception_marker@toc(r2) std r11,-16(r10) /* "regshere" marker */ +#ifdef CONFIG_PPC_BOOK3S + li r11,1 + stb r11,PACASRR_VALID(r13) +#endif + /* * We always enter kernel from userspace with irq soft-mask enabled and * nothing pending. system_call_exception() will call @@ -306,18 +335,27 @@ END_BTB_FLUSH_SECTION bl syscall_exit_prepare ld r2,_CCR(r1) + ld r6,_LINK(r1) + mtlr r6 + +#ifdef CONFIG_PPC_BOOK3S + lbz r4,PACASRR_VALID(r13) + cmpdi r4,0 + bne 1f + li r4,0 + stb r4,PACASRR_VALID(r13) +#endif ld r4,_NIP(r1) ld r5,_MSR(r1) - ld r6,_LINK(r1) + mtspr SPRN_SRR0,r4 + mtspr SPRN_SRR1,r5 +1: + DEBUG_SRR_VALID srr BEGIN_FTR_SECTION stdcx. r0,0,r1 /* to clear the reservation */ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) - mtspr SPRN_SRR0,r4 - mtspr SPRN_SRR1,r5 - mtlr r6 - cmpdi r3,0 bne .Lsyscall_restore_regs /* Zero volatile regs that may contain sensitive kernel data */ @@ -673,19 +711,40 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()) kuap_user_restore r3, r4 #endif .Lfast_user_interrupt_return_\srr\(): - ld r11,_NIP(r1) - ld r12,_MSR(r1) + BEGIN_FTR_SECTION ld r10,_PPR(r1) mtspr SPRN_PPR,r10 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) + +#ifdef CONFIG_PPC_BOOK3S + .ifc \srr,srr + lbz r4,PACASRR_VALID(r13) + .else + lbz r4,PACAHSRR_VALID(r13) + .endif + cmpdi r4,0 + li r4,0 + bne 1f +#endif + ld r11,_NIP(r1) + ld r12,_MSR(r1) .ifc \srr,srr mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r12 +1: +#ifdef CONFIG_PPC_BOOK3S + stb r4,PACASRR_VALID(r13) +#endif .else mtspr SPRN_HSRR0,r11 mtspr SPRN_HSRR1,r12 +1: +#ifdef CONFIG_PPC_BOOK3S + stb r4,PACAHSRR_VALID(r13) +#endif .endif + DEBUG_SRR_VALID \srr BEGIN_FTR_SECTION stdcx. r0,0,r1 /* to clear the reservation */ @@ -730,15 +789,34 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) .Lfast_kernel_interrupt_return_\srr\(): cmpdi cr1,r3,0 +#ifdef CONFIG_PPC_BOOK3S + .ifc \srr,srr + lbz r4,PACASRR_VALID(r13) + .else + lbz r4,PACAHSRR_VALID(r13) + .endif + cmpdi r4,0 + li r4,0 + bne 1f +#endif ld r11,_NIP(r1) ld r12,_MSR(r1) .ifc \srr,srr mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r12 +1: +#ifdef CONFIG_PPC_BOOK3S + stb r4,PACASRR_VALID(r13) +#endif .else mtspr SPRN_HSRR0,r11 mtspr SPRN_HSRR1,r12 +1: +#ifdef CONFIG_PPC_BOOK3S + stb r4,PACAHSRR_VALID(r13) +#endif .endif + DEBUG_SRR_VALID \srr BEGIN_FTR_SECTION stdcx. r0,0,r1 /* to clear the reservation */ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 1bc27af1b425..3d238a3b2a24 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -485,6 +485,20 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real) std r0,GPR0(r1) /* save r0 in stackframe */ std r10,GPR1(r1) /* save r1 in stackframe */ + /* Mark our [H]SRRs valid for return */ + li r10,1 + .if IHSRR_IF_HVMODE + BEGIN_FTR_SECTION + stb r10,PACAHSRR_VALID(r13) + FTR_SECTION_ELSE + stb r10,PACASRR_VALID(r13) + ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) + .elseif IHSRR + stb r10,PACAHSRR_VALID(r13) + .else + stb r10,PACASRR_VALID(r13) + .endif + .if ISET_RI li r10,MSR_RI mtmsrd r10,1 /* Set MSR_RI */ @@ -584,10 +598,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) .macro EXCEPTION_RESTORE_REGS hsrr=0 /* Move original SRR0 and SRR1 into the respective regs */ ld r9,_MSR(r1) + li r10,0 .if \hsrr mtspr SPRN_HSRR1,r9 + stb r10,PACAHSRR_VALID(r13) .else mtspr SPRN_SRR1,r9 + stb r10,PACASRR_VALID(r13) .endif ld r9,_NIP(r1) .if \hsrr @@ -1718,6 +1735,8 @@ EXC_COMMON_BEGIN(hdecrementer_common) * * Be careful to avoid touching the kernel stack. */ + li r10,0 + stb r10,PACAHSRR_VALID(r13) ld r10,PACA_EXGEN+EX_CTR(r13) mtctr r10 mtcrf 0x80,r9 @@ -2513,6 +2532,8 @@ BEGIN_FTR_SECTION ld r10,PACA_EXGEN+EX_CFAR(r13) mtspr SPRN_CFAR,r10 END_FTR_SECTION_IFSET(CPU_FTR_CFAR) + li r10,0 + stb r10,PACAHSRR_VALID(r13) ld r10,PACA_EXGEN+EX_R10(r13) ld r11,PACA_EXGEN+EX_R11(r13) ld r12,PACA_EXGEN+EX_R12(r13) @@ -2673,6 +2694,12 @@ masked_interrupt: ori r11,r11,PACA_IRQ_HARD_DIS stb r11,PACAIRQHAPPENED(r13) 2: /* done */ + li r10,0 + .if \hsrr + stb r10,PACAHSRR_VALID(r13) + .else + stb r10,PACASRR_VALID(r13) + .endif ld r10,PACA_EXGEN+EX_CTR(r13) mtctr r10 mtcrf 0x80,r9 diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 2c57ece6671c..6010adcee16e 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -103,6 +103,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) ori r12,r12,MSR_FP or r12,r12,r4 std r12,_MSR(r1) +#ifdef CONFIG_PPC_BOOK3S_64 + li r4,0 + stb r4,PACASRR_VALID(r13) +#endif #endif li r4,1 stb r4,THREAD_LOAD_FP(r5) diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 8fc7a14e4d71..21a638aff72f 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -486,7 +486,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) return; reset: - regs->msr &= ~MSR_SE; + regs_set_return_msr(regs, regs->msr & ~MSR_SE); for (i = 0; i < nr_wp_slots(); i++) { info = counter_arch_bp(__this_cpu_read(bp_per_reg[i])); __set_breakpoint(i, info); @@ -537,7 +537,7 @@ static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp, current->thread.last_hit_ubp[i] = bp[i]; info[i] = NULL; } - regs->msr |= MSR_SE; + regs_set_return_msr(regs, regs->msr | MSR_SE); return false; } diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index c48565762698..bdee7262c080 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -147,7 +147,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs) return 0; if (*(u32 *)regs->nip == BREAK_INSTR) - regs->nip += BREAK_INSTR_SIZE; + regs_add_return_ip(regs, BREAK_INSTR_SIZE); return 1; } @@ -372,7 +372,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) { - regs->nip = pc; + regs_set_return_ip(regs, pc); } /* @@ -394,7 +394,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, case 'c': /* handle the optional parameter */ if (kgdb_hex2long(&ptr, &addr)) - linux_regs->nip = addr; + regs_set_return_ip(linux_regs, addr); atomic_set(&kgdb_cpu_doing_single_step, -1); /* set the trace bit if we're stepping */ @@ -402,9 +402,9 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, #ifdef CONFIG_PPC_ADV_DEBUG_REGS mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); - linux_regs->msr |= MSR_DE; + regs_set_return_msr(linux_regs, linux_regs->msr | MSR_DE); #else - linux_regs->msr |= MSR_SE; + regs_set_return_msr(linux_regs, linux_regs->msr | MSR_SE); #endif atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id()); diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c index 660138f6c4b2..7154d58338cc 100644 --- a/arch/powerpc/kernel/kprobes-ftrace.c +++ b/arch/powerpc/kernel/kprobes-ftrace.c @@ -39,7 +39,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, * On powerpc, NIP is *before* this instruction for the * pre handler */ - regs->nip -= MCOUNT_INSN_SIZE; + regs_add_return_ip(regs, -MCOUNT_INSN_SIZE); __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; @@ -48,7 +48,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, * Emulate singlestep (and also recover regs->nip) * as if there is a nop */ - regs->nip += MCOUNT_INSN_SIZE; + regs_add_return_ip(regs, MCOUNT_INSN_SIZE); if (unlikely(p->post_handler)) { kcb->kprobe_status = KPROBE_HIT_SSDONE; p->post_handler(p, regs, 0); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 8ac248cb518a..04714895d3f9 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -194,7 +194,7 @@ static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs * variant as values in regs could play a part in * if the trap is taken or not */ - regs->nip = (unsigned long)p->ainsn.insn; + regs_set_return_ip(regs, (unsigned long)p->ainsn.insn); } static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) @@ -335,8 +335,9 @@ int kprobe_handler(struct pt_regs *regs) kprobe_opcode_t insn = *p->ainsn.insn; if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) { /* Turn off 'trace' bits */ - regs->msr &= ~MSR_SINGLESTEP; - regs->msr |= kcb->kprobe_saved_msr; + regs_set_return_msr(regs, + (regs->msr & ~MSR_SINGLESTEP) | + kcb->kprobe_saved_msr); goto no_kprobe; } @@ -431,7 +432,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) * we end up emulating it in kprobe_handler(), which increments the nip * again. */ - regs->nip = orig_ret_address - 4; + regs_set_return_ip(regs, orig_ret_address - 4); regs->link = orig_ret_address; return 0; @@ -466,8 +467,8 @@ int kprobe_post_handler(struct pt_regs *regs) } /* Adjust nip to after the single-stepped instruction */ - regs->nip = (unsigned long)cur->addr + len; - regs->msr |= kcb->kprobe_saved_msr; + regs_set_return_ip(regs, (unsigned long)cur->addr + len); + regs_set_return_msr(regs, regs->msr | kcb->kprobe_saved_msr); /*Restore back the original saved kprobes variables and continue. */ if (kcb->kprobe_status == KPROBE_REENTER) { @@ -506,9 +507,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) * and allow the page fault handler to continue as a * normal page fault. */ - regs->nip = (unsigned long)cur->addr; - regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */ - regs->msr |= kcb->kprobe_saved_msr; + regs_set_return_ip(regs, (unsigned long)cur->addr); + /* Turn off 'trace' bits */ + regs_set_return_msr(regs, + (regs->msr & ~MSR_SINGLESTEP) | + kcb->kprobe_saved_msr); if (kcb->kprobe_status == KPROBE_REENTER) restore_previous_kprobe(kcb); else @@ -539,7 +542,7 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) * zero, try to fix up. */ if ((entry = search_exception_tables(regs->nip)) != NULL) { - regs->nip = extable_fixup(entry); + regs_set_return_ip(regs, extable_fixup(entry)); return 1; } diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index 9a3c2a84a2ac..f4b922a1e6ad 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -273,7 +273,7 @@ void mce_common_process_ue(struct pt_regs *regs, entry = search_kernel_exception_table(regs->nip); if (entry) { mce_err->ignore_event = true; - regs->nip = extable_fixup(entry); + regs_set_return_ip(regs, extable_fixup(entry)); } } diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 2b8fe40069ad..8b9f82dc6ece 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -106,7 +106,7 @@ static void optimized_callback(struct optimized_kprobe *op, kprobes_inc_nmissed_count(&op->kp); } else { __this_cpu_write(current_kprobe, &op->kp); - regs->nip = (unsigned long)op->kp.addr; + regs_set_return_ip(regs, (unsigned long)op->kp.addr); get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; opt_pre_handler(&op->kp, regs); __this_cpu_write(current_kprobe, NULL); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 4e593fc2c66d..8ab1bfdfd31a 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -96,7 +96,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk) if (tsk == current && tsk->thread.regs && MSR_TM_ACTIVE(tsk->thread.regs->msr) && !test_thread_flag(TIF_RESTORE_TM)) { - tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; + regs_set_return_msr(&tsk->thread.ckpt_regs, + tsk->thread.regs->msr); set_thread_flag(TIF_RESTORE_TM); } } @@ -161,7 +162,7 @@ static void __giveup_fpu(struct task_struct *tsk) msr &= ~(MSR_FP|MSR_FE0|MSR_FE1); if (cpu_has_feature(CPU_FTR_VSX)) msr &= ~MSR_VSX; - tsk->thread.regs->msr = msr; + regs_set_return_msr(tsk->thread.regs, msr); } void giveup_fpu(struct task_struct *tsk) @@ -244,7 +245,7 @@ static void __giveup_altivec(struct task_struct *tsk) msr &= ~MSR_VEC; if (cpu_has_feature(CPU_FTR_VSX)) msr &= ~MSR_VSX; - tsk->thread.regs->msr = msr; + regs_set_return_msr(tsk->thread.regs, msr); } void giveup_altivec(struct task_struct *tsk) @@ -559,7 +560,7 @@ void notrace restore_math(struct pt_regs *regs) msr_check_and_clear(new_msr); - regs->msr |= new_msr | fpexc_mode; + regs_set_return_msr(regs, regs->msr | new_msr | fpexc_mode); } } #endif /* CONFIG_PPC_BOOK3S_64 */ @@ -1114,7 +1115,7 @@ void restore_tm_state(struct pt_regs *regs) #endif restore_math(regs); - regs->msr |= msr_diff; + regs_set_return_msr(regs, regs->msr | msr_diff); } #else /* !CONFIG_PPC_TRANSACTIONAL_MEM */ @@ -1257,14 +1258,16 @@ struct task_struct *__switch_to(struct task_struct *prev, } /* - * Call restore_sprs() before calling _switch(). If we move it after - * _switch() then we miss out on calling it for new tasks. The reason - * for this is we manually create a stack frame for new tasks that - * directly returns through ret_from_fork() or + * Call restore_sprs() and set_return_regs_changed() before calling + * _switch(). If we move it after _switch() then we miss out on calling + * it for new tasks. The reason for this is we manually create a stack + * frame for new tasks that directly returns through ret_from_fork() or * ret_from_kernel_thread(). See copy_thread() for details. */ restore_sprs(old_thread, new_thread); + set_return_regs_changed(); /* _switch changes stack (and regs) */ + #ifdef CONFIG_PPC32 kuap_assert_locked(); #endif @@ -1850,13 +1853,14 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) } regs->gpr[2] = toc; } - regs->nip = entry; - regs->msr = MSR_USER64; + regs_set_return_ip(regs, entry); + regs_set_return_msr(regs, MSR_USER64); } else { - regs->nip = start; regs->gpr[2] = 0; - regs->msr = MSR_USER32; + regs_set_return_ip(regs, start); + regs_set_return_msr(regs, MSR_USER32); } + #endif #ifdef CONFIG_VSX current->thread.used_vsr = 0; @@ -1887,7 +1891,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) current->thread.tm_tfiar = 0; current->thread.load_tm = 0; #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ - } EXPORT_SYMBOL(start_thread); @@ -1935,9 +1938,10 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val) if (val > PR_FP_EXC_PRECISE) return -EINVAL; tsk->thread.fpexc_mode = __pack_fe01(val); - if (regs != NULL && (regs->msr & MSR_FP) != 0) - regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) - | tsk->thread.fpexc_mode; + if (regs != NULL && (regs->msr & MSR_FP) != 0) { + regs_set_return_msr(regs, (regs->msr & ~(MSR_FE0|MSR_FE1)) + | tsk->thread.fpexc_mode); + } return 0; } @@ -1983,9 +1987,9 @@ int set_endian(struct task_struct *tsk, unsigned int val) return -EINVAL; if (val == PR_ENDIAN_BIG) - regs->msr &= ~MSR_LE; + regs_set_return_msr(regs, regs->msr & ~MSR_LE); else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE) - regs->msr |= MSR_LE; + regs_set_return_msr(regs, regs->msr | MSR_LE); else return -EINVAL; diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index cb5be081e499..05ce15b854e2 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -1792,6 +1793,8 @@ static int prom_rtas_hcall(uint64_t args) asm volatile("sc 1\n" : "=r" (arg1) : "r" (arg1), "r" (arg2) :); + srr_regs_clobbered(); + return arg1; } diff --git a/arch/powerpc/kernel/ptrace/ptrace-adv.c b/arch/powerpc/kernel/ptrace/ptrace-adv.c index 3990c01ef8cf..399f5d94a3df 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-adv.c +++ b/arch/powerpc/kernel/ptrace/ptrace-adv.c @@ -12,7 +12,7 @@ void user_enable_single_step(struct task_struct *task) if (regs != NULL) { task->thread.debug.dbcr0 &= ~DBCR0_BT; task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; - regs->msr |= MSR_DE; + regs_set_return_msr(regs, regs->msr | MSR_DE); } set_tsk_thread_flag(task, TIF_SINGLESTEP); } @@ -24,7 +24,7 @@ void user_enable_block_step(struct task_struct *task) if (regs != NULL) { task->thread.debug.dbcr0 &= ~DBCR0_IC; task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT; - regs->msr |= MSR_DE; + regs_set_return_msr(regs, regs->msr | MSR_DE); } set_tsk_thread_flag(task, TIF_SINGLESTEP); } @@ -50,7 +50,7 @@ void user_disable_single_step(struct task_struct *task) * All debug events were off..... */ task->thread.debug.dbcr0 &= ~DBCR0_IDM; - regs->msr &= ~MSR_DE; + regs_set_return_msr(regs, regs->msr & ~MSR_DE); } } clear_tsk_thread_flag(task, TIF_SINGLESTEP); @@ -82,6 +82,7 @@ int ptrace_get_debugreg(struct task_struct *child, unsigned long addr, int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data) { + struct pt_regs *regs = task->thread.regs; #ifdef CONFIG_HAVE_HW_BREAKPOINT int ret; struct thread_struct *thread = &task->thread; @@ -112,7 +113,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned l dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W); if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0, task->thread.debug.dbcr1)) { - task->thread.regs->msr &= ~MSR_DE; + regs_set_return_msr(regs, regs->msr & ~MSR_DE); task->thread.debug.dbcr0 &= ~DBCR0_IDM; } return 0; @@ -132,7 +133,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned l dbcr_dac(task) |= DBCR_DAC1R; if (data & 0x2UL) dbcr_dac(task) |= DBCR_DAC1W; - task->thread.regs->msr |= MSR_DE; + regs_set_return_msr(regs, regs->msr | MSR_DE); return 0; } @@ -220,7 +221,7 @@ static long set_instruction_bp(struct task_struct *child, } out: child->thread.debug.dbcr0 |= DBCR0_IDM; - child->thread.regs->msr |= MSR_DE; + regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE); return slot; } @@ -336,7 +337,7 @@ static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) return -ENOSPC; } child->thread.debug.dbcr0 |= DBCR0_IDM; - child->thread.regs->msr |= MSR_DE; + regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE); return slot + 4; } @@ -430,7 +431,7 @@ static int set_dac_range(struct task_struct *child, child->thread.debug.dbcr2 |= DBCR2_DAC12MX; else /* PPC_BREAKPOINT_MODE_MASK */ child->thread.debug.dbcr2 |= DBCR2_DAC12MM; - child->thread.regs->msr |= MSR_DE; + regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE); return 5; } @@ -485,7 +486,8 @@ long ppc_del_hwdebug(struct task_struct *child, long data) if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0, child->thread.debug.dbcr1)) { child->thread.debug.dbcr0 &= ~DBCR0_IDM; - child->thread.regs->msr &= ~MSR_DE; + regs_set_return_msr(child->thread.regs, + child->thread.regs->msr & ~MSR_DE); } } return rc; diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c index aa36fcad36cd..a5dd7d2e2c9e 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c +++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c @@ -11,10 +11,8 @@ void user_enable_single_step(struct task_struct *task) { struct pt_regs *regs = task->thread.regs; - if (regs != NULL) { - regs->msr &= ~MSR_BE; - regs->msr |= MSR_SE; - } + if (regs != NULL) + regs_set_return_msr(regs, (regs->msr & ~MSR_BE) | MSR_SE); set_tsk_thread_flag(task, TIF_SINGLESTEP); } @@ -22,10 +20,8 @@ void user_enable_block_step(struct task_struct *task) { struct pt_regs *regs = task->thread.regs; - if (regs != NULL) { - regs->msr &= ~MSR_SE; - regs->msr |= MSR_BE; - } + if (regs != NULL) + regs_set_return_msr(regs, (regs->msr & ~MSR_SE) | MSR_BE); set_tsk_thread_flag(task, TIF_SINGLESTEP); } @@ -34,7 +30,7 @@ void user_disable_single_step(struct task_struct *task) struct pt_regs *regs = task->thread.regs; if (regs != NULL) - regs->msr &= ~(MSR_SE | MSR_BE); + regs_set_return_msr(regs, regs->msr & ~(MSR_SE | MSR_BE)); clear_tsk_thread_flag(task, TIF_SINGLESTEP); } diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c index 773bcc4ca843..b8be1d6668b5 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-view.c +++ b/arch/powerpc/kernel/ptrace/ptrace-view.c @@ -113,8 +113,9 @@ static unsigned long get_user_msr(struct task_struct *task) static __always_inline int set_user_msr(struct task_struct *task, unsigned long msr) { - task->thread.regs->msr &= ~MSR_DEBUGCHANGE; - task->thread.regs->msr |= msr & MSR_DEBUGCHANGE; + unsigned long newmsr = (task->thread.regs->msr & ~MSR_DEBUGCHANGE) | + (msr & MSR_DEBUGCHANGE); + regs_set_return_msr(task->thread.regs, newmsr); return 0; } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 6bada744402b..99f2cce635fb 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -46,6 +47,13 @@ /* This is here deliberately so it's only used in this file */ void enter_rtas(unsigned long); +static inline void do_enter_rtas(unsigned long args) +{ + enter_rtas(args); + + srr_regs_clobbered(); /* rtas uses SRRs, invalidate */ +} + struct rtas_t rtas = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; @@ -384,7 +392,7 @@ static char *__fetch_rtas_last_error(char *altbuf) save_args = rtas.args; rtas.args = err_args; - enter_rtas(__pa(&rtas.args)); + do_enter_rtas(__pa(&rtas.args)); err_args = rtas.args; rtas.args = save_args; @@ -430,7 +438,7 @@ va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, for (i = 0; i < nret; ++i) args->rets[i] = 0; - enter_rtas(__pa(args)); + do_enter_rtas(__pa(args)); } void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...) @@ -1138,7 +1146,7 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) flags = lock_rtas(); rtas.args = args; - enter_rtas(__pa(&rtas.args)); + do_enter_rtas(__pa(&rtas.args)); args = rtas.args; /* A -1 return code indicates that the last command couldn't diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 9ded046edb0e..e600764a926c 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -214,7 +214,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, regs->gpr[0] = __NR_restart_syscall; else regs->gpr[3] = regs->orig_gpr3; - regs->nip -= 4; + regs_add_return_ip(regs, -4); regs->result = 0; } else { if (trap_is_scv(regs)) { @@ -322,16 +322,16 @@ static unsigned long get_tm_stackpointer(struct task_struct *tsk) * For signals taken in non-TM or suspended mode, we use the * normal/non-checkpointed stack pointer. */ - - unsigned long ret = tsk->thread.regs->gpr[1]; + struct pt_regs *regs = tsk->thread.regs; + unsigned long ret = regs->gpr[1]; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BUG_ON(tsk != current); - if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) { + if (MSR_TM_ACTIVE(regs->msr)) { preempt_disable(); tm_reclaim_current(TM_CAUSE_SIGNAL); - if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr)) + if (MSR_TM_TRANSACTIONAL(regs->msr)) ret = tsk->thread.ckpt_regs.gpr[1]; /* @@ -341,7 +341,7 @@ static unsigned long get_tm_stackpointer(struct task_struct *tsk) * (tm_recheckpoint_new_task() would recheckpoint). Besides, we * enter the signal handler in non-transactional state. */ - tsk->thread.regs->msr &= ~MSR_TS_MASK; + regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK); preempt_enable(); } #endif diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index f55ac65c7492..0608581967f0 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -479,14 +479,14 @@ static long restore_user_regs(struct pt_regs *regs, /* if doing signal return, restore the previous little-endian mode */ if (sig) - regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); + regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE)); #ifdef CONFIG_ALTIVEC /* * Force the process to reload the altivec registers from * current->thread when it next does altivec instructions */ - regs->msr &= ~MSR_VEC; + regs_set_return_msr(regs, regs->msr & ~MSR_VEC); if (msr & MSR_VEC) { /* restore altivec registers from the stack */ unsafe_copy_from_user(¤t->thread.vr_state, &sr->mc_vregs, @@ -508,7 +508,7 @@ static long restore_user_regs(struct pt_regs *regs, * Force the process to reload the VSX registers from * current->thread when it next does VSX instruction. */ - regs->msr &= ~MSR_VSX; + regs_set_return_msr(regs, regs->msr & ~MSR_VSX); if (msr & MSR_VSX) { /* * Restore altivec registers from the stack to a local @@ -524,12 +524,12 @@ static long restore_user_regs(struct pt_regs *regs, * force the process to reload the FP registers from * current->thread when it next does FP instructions */ - regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); + regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1)); #ifdef CONFIG_SPE /* force the process to reload the spe registers from current->thread when it next does spe instructions */ - regs->msr &= ~MSR_SPE; + regs_set_return_msr(regs, regs->msr & ~MSR_SPE); if (msr & MSR_SPE) { /* restore spe registers from the stack */ unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs, @@ -580,9 +580,9 @@ static long restore_tm_user_regs(struct pt_regs *regs, unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed); /* Restore the previous little-endian mode */ - regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); + regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE)); - regs->msr &= ~MSR_VEC; + regs_set_return_msr(regs, regs->msr & ~MSR_VEC); if (msr & MSR_VEC) { /* restore altivec registers from the stack */ unsafe_copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs, @@ -601,11 +601,11 @@ static long restore_tm_user_regs(struct pt_regs *regs, if (cpu_has_feature(CPU_FTR_ALTIVEC)) mtspr(SPRN_VRSAVE, current->thread.ckvrsave); - regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); + regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1)); unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed); - regs->msr &= ~MSR_VSX; + regs_set_return_msr(regs, regs->msr & ~MSR_VSX); if (msr & MSR_VSX) { /* * Restore altivec registers from the stack to a local @@ -672,7 +672,7 @@ static long restore_tm_user_regs(struct pt_regs *regs, * * Pull in the MSR TM bits from the user context */ - regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK); + regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK)); /* Now, recheckpoint. This loads up all of the checkpointed (older) * registers, including FP and V[S]Rs. After recheckpointing, the * transactional versions should be loaded. @@ -687,11 +687,11 @@ static long restore_tm_user_regs(struct pt_regs *regs, msr_check_and_set(msr & (MSR_FP | MSR_VEC)); if (msr & MSR_FP) { load_fp_state(¤t->thread.fp_state); - regs->msr |= (MSR_FP | current->thread.fpexc_mode); + regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode)); } if (msr & MSR_VEC) { load_vr_state(¤t->thread.vr_state); - regs->msr |= MSR_VEC; + regs_set_return_msr(regs, regs->msr | MSR_VEC); } preempt_enable(); @@ -801,10 +801,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, regs->gpr[4] = (unsigned long)&frame->info; regs->gpr[5] = (unsigned long)&frame->uc; regs->gpr[6] = (unsigned long)frame; - regs->nip = (unsigned long) ksig->ka.sa.sa_handler; + regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler); /* enter the signal handler in native-endian mode */ - regs->msr &= ~MSR_LE; - regs->msr |= (MSR_KERNEL & MSR_LE); + regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE)); + return 0; failed: @@ -889,10 +889,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, regs->gpr[1] = newsp; regs->gpr[3] = ksig->sig; regs->gpr[4] = (unsigned long) sc; - regs->nip = (unsigned long)ksig->ka.sa.sa_handler; + regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler); /* enter the signal handler in native-endian mode */ - regs->msr &= ~MSR_LE; - regs->msr |= (MSR_KERNEL & MSR_LE); + regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE)); + return 0; failed: @@ -1142,7 +1142,7 @@ SYSCALL_DEFINE0(rt_sigreturn) * set, and recheckpoint was not called. This avoid * hitting a TM Bad thing at RFID */ - regs->msr &= ~MSR_TS_MASK; + regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK); } /* Fall through, for non-TM restore */ #endif @@ -1231,7 +1231,7 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx, affect the contents of these registers. After this point, failure is a problem, anyway, and it's very unlikely unless the user is really doing something wrong. */ - regs->msr = new_msr; + regs_set_return_msr(regs, new_msr); #ifdef CONFIG_PPC_ADV_DEBUG_REGS current->thread.debug.dbcr0 = new_dbcr0; #endif diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 16b41470ec92..1831bba0582e 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -354,7 +354,7 @@ static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_ /* get MSR separately, transfer the LE bit if doing signal return */ unsafe_get_user(msr, &sc->gp_regs[PT_MSR], efault_out); if (sig) - regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); + regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE)); unsafe_get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3], efault_out); unsafe_get_user(regs->ctr, &sc->gp_regs[PT_CTR], efault_out); unsafe_get_user(regs->link, &sc->gp_regs[PT_LNK], efault_out); @@ -376,7 +376,7 @@ static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_ * This has to be done before copying stuff into tsk->thread.fpr/vr * for the reasons explained in the previous comment. */ - regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); + regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX)); #ifdef CONFIG_ALTIVEC unsafe_get_user(v_regs, &sc->v_regs, efault_out); @@ -468,7 +468,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, return -EINVAL; /* pull in MSR LE from user context */ - regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); + regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE)); /* The following non-GPR non-FPR non-VR state is also checkpointed: */ err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]); @@ -495,7 +495,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, * This has to be done before copying stuff into tsk->thread.fpr/vr * for the reasons explained in the previous comment. */ - regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); + regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX)); #ifdef CONFIG_ALTIVEC err |= __get_user(v_regs, &sc->v_regs); @@ -565,7 +565,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, preempt_disable(); /* pull in MSR TS bits from user context */ - regs->msr |= msr & MSR_TS_MASK; + regs_set_return_msr(regs, regs->msr | (msr & MSR_TS_MASK)); /* * Ensure that TM is enabled in regs->msr before we leave the signal @@ -583,7 +583,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, * to be de-scheduled with MSR[TS] set but without calling * tm_recheckpoint(). This can cause a bug. */ - regs->msr |= MSR_TM; + regs_set_return_msr(regs, regs->msr | MSR_TM); /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(&tsk->thread); @@ -591,11 +591,11 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, msr_check_and_set(msr & (MSR_FP | MSR_VEC)); if (msr & MSR_FP) { load_fp_state(&tsk->thread.fp_state); - regs->msr |= (MSR_FP | tsk->thread.fpexc_mode); + regs_set_return_msr(regs, regs->msr | (MSR_FP | tsk->thread.fpexc_mode)); } if (msr & MSR_VEC) { load_vr_state(&tsk->thread.vr_state); - regs->msr |= MSR_VEC; + regs_set_return_msr(regs, regs->msr | MSR_VEC); } preempt_enable(); @@ -717,6 +717,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, /* This returns like rt_sigreturn */ set_thread_flag(TIF_RESTOREALL); + return 0; efault_out: @@ -783,7 +784,7 @@ SYSCALL_DEFINE0(rt_sigreturn) * the MSR[TS] that came from user context later, at * restore_tm_sigcontexts. */ - regs->msr &= ~MSR_TS_MASK; + regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK); if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) goto badframe; @@ -815,7 +816,8 @@ SYSCALL_DEFINE0(rt_sigreturn) * MSR[TS] set, but without CPU in the proper state, * causing a TM bad thing. */ - current->thread.regs->msr &= ~MSR_TS_MASK; + regs_set_return_msr(current->thread.regs, + current->thread.regs->msr & ~MSR_TS_MASK); if (!user_read_access_begin(&uc->uc_mcontext, sizeof(uc->uc_mcontext))) goto badframe; @@ -829,6 +831,7 @@ SYSCALL_DEFINE0(rt_sigreturn) goto badframe; set_thread_flag(TIF_RESTOREALL); + return 0; badframe_block: @@ -908,12 +911,12 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, /* Set up to return from userspace. */ if (tsk->mm->context.vdso) { - regs->nip = VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64); + regs_set_return_ip(regs, VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64)); } else { err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); if (err) goto badframe; - regs->nip = (unsigned long) &frame->tramp[0]; + regs_set_return_ip(regs, (unsigned long) &frame->tramp[0]); } /* Allocate a dummy caller frame for the signal handler. */ @@ -938,8 +941,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, } /* enter the signal handler in native-endian mode */ - regs->msr &= ~MSR_LE; - regs->msr |= (MSR_KERNEL & MSR_LE); + regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE)); regs->gpr[1] = newsp; regs->gpr[3] = ksig->sig; regs->result = 0; diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index a552c9e68d7e..bf4ae0f0e36c 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -114,7 +114,8 @@ SYSCALL_DEFINE0(switch_endian) { struct thread_info *ti; - current->thread.regs->msr ^= MSR_LE; + regs_set_return_msr(current->thread.regs, + current->thread.regs->msr ^ MSR_LE); /* * Set TIF_RESTOREALL so that r3 isn't clobbered on return to diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index c929d93c35d0..dfbce527c98e 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -428,7 +428,7 @@ void hv_nmi_check_nonrecoverable(struct pt_regs *regs) return; nonrecoverable: - regs->msr &= ~MSR_RI; + regs_set_return_msr(regs, regs->msr & ~MSR_RI); #endif } DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception) @@ -550,8 +550,8 @@ static inline int check_io_access(struct pt_regs *regs) printk(KERN_DEBUG "%s bad port %lx at %p\n", (*nip & 0x100)? "OUT to": "IN from", regs->gpr[rb] - _IO_BASE, nip); - regs->msr |= MSR_RI; - regs->nip = extable_fixup(entry); + regs_set_return_msr(regs, regs->msr | MSR_RI); + regs_set_return_ip(regs, extable_fixup(entry)); return 1; } } @@ -587,8 +587,8 @@ static inline int check_io_access(struct pt_regs *regs) #define REASON_BOUNDARY SRR1_BOUNDARY #define single_stepping(regs) ((regs)->msr & MSR_SE) -#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) -#define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE) +#define clear_single_step(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_SE)) +#define clear_br_trace(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_BE)) #endif #define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4) @@ -1032,7 +1032,7 @@ static void p9_hmi_special_emu(struct pt_regs *regs) #endif /* !__LITTLE_ENDIAN__ */ /* Go to next instruction */ - regs->nip += 4; + regs_add_return_ip(regs, 4); } #endif /* CONFIG_VSX */ @@ -1477,7 +1477,7 @@ static void do_program_check(struct pt_regs *regs) if (!(regs->msr & MSR_PR) && /* not user-mode */ report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) { - regs->nip += 4; + regs_add_return_ip(regs, 4); return; } _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); @@ -1539,7 +1539,7 @@ static void do_program_check(struct pt_regs *regs) if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { switch (emulate_instruction(regs)) { case 0: - regs->nip += 4; + regs_add_return_ip(regs, 4); emulate_single_step(regs); return; case -EFAULT: @@ -1567,7 +1567,7 @@ DEFINE_INTERRUPT_HANDLER(program_check_exception) */ DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt) { - regs->msr |= REASON_ILLEGAL; + regs_set_return_msr(regs, regs->msr | REASON_ILLEGAL); do_program_check(regs); } @@ -1594,7 +1594,7 @@ DEFINE_INTERRUPT_HANDLER(alignment_exception) if (fixed == 1) { /* skip over emulated instruction */ - regs->nip += inst_length(reason); + regs_add_return_ip(regs, inst_length(reason)); emulate_single_step(regs); return; } @@ -1660,7 +1660,7 @@ static void tm_unavailable(struct pt_regs *regs) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (user_mode(regs)) { current->thread.load_tm++; - regs->msr |= MSR_TM; + regs_set_return_msr(regs, regs->msr | MSR_TM); tm_enable(); tm_restore_sprs(¤t->thread); return; @@ -1752,7 +1752,7 @@ DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception) pr_err("DSCR based mfspr emulation failed\n"); return; } - regs->nip += 4; + regs_add_return_ip(regs, 4); emulate_single_step(regs); } return; @@ -1949,7 +1949,7 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status) */ if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, current->thread.debug.dbcr1)) - regs->msr |= MSR_DE; + regs_set_return_msr(regs, regs->msr | MSR_DE); else /* Make sure the IDM flag is off */ current->thread.debug.dbcr0 &= ~DBCR0_IDM; @@ -1970,7 +1970,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException) * instead of stopping here when hitting a BT */ if (debug_status & DBSR_BT) { - regs->msr &= ~MSR_DE; + regs_set_return_msr(regs, regs->msr & ~MSR_DE); /* Disable BT */ mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); @@ -1981,7 +1981,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException) if (user_mode(regs)) { current->thread.debug.dbcr0 &= ~DBCR0_BT; current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; - regs->msr |= MSR_DE; + regs_set_return_msr(regs, regs->msr | MSR_DE); return; } @@ -1995,7 +1995,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException) if (debugger_sstep(regs)) return; } else if (debug_status & DBSR_IC) { /* Instruction complete */ - regs->msr &= ~MSR_DE; + regs_set_return_msr(regs, regs->msr & ~MSR_DE); /* Disable instruction completion */ mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); @@ -2017,7 +2017,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException) current->thread.debug.dbcr0 &= ~DBCR0_IC; if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, current->thread.debug.dbcr1)) - regs->msr |= MSR_DE; + regs_set_return_msr(regs, regs->msr | MSR_DE); else /* Make sure the IDM bit is off */ current->thread.debug.dbcr0 &= ~DBCR0_IDM; @@ -2045,7 +2045,7 @@ DEFINE_INTERRUPT_HANDLER(altivec_assist_exception) PPC_WARN_EMULATED(altivec, regs); err = emulate_altivec(regs); if (err == 0) { - regs->nip += 4; /* skip emulated instruction */ + regs_add_return_ip(regs, 4); /* skip emulated instruction */ emulate_single_step(regs); return; } @@ -2110,7 +2110,7 @@ DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException) err = do_spe_mathemu(regs); if (err == 0) { - regs->nip += 4; /* skip emulated instruction */ + regs_add_return_ip(regs, 4); /* skip emulated instruction */ emulate_single_step(regs); return; } @@ -2141,10 +2141,10 @@ DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException) giveup_spe(current); preempt_enable(); - regs->nip -= 4; + regs_add_return_ip(regs, -4); err = speround_handler(regs); if (err == 0) { - regs->nip += 4; /* skip emulated instruction */ + regs_add_return_ip(regs, 4); /* skip emulated instruction */ emulate_single_step(regs); return; } diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index 1aefd2ecbb8a..c6975467d9ff 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c @@ -62,7 +62,7 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) autask->saved_trap_nr = current->thread.trap_nr; current->thread.trap_nr = UPROBE_TRAP_NR; - regs->nip = current->utask->xol_vaddr; + regs_set_return_ip(regs, current->utask->xol_vaddr); user_enable_single_step(current); return 0; @@ -119,7 +119,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) * support doesn't exist and have to fix-up the next instruction * to be executed. */ - regs->nip = (unsigned long)ppc_inst_next((void *)utask->vaddr, auprobe->insn); + regs_set_return_ip(regs, (unsigned long)ppc_inst_next((void *)utask->vaddr, auprobe->insn)); user_disable_single_step(current); return 0; diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index 54dbefcb4cde..fc120fac1910 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -73,6 +73,10 @@ _GLOBAL(load_up_altivec) addi r5,r4,THREAD /* Get THREAD */ oris r12,r12,MSR_VEC@h std r12,_MSR(r1) +#ifdef CONFIG_PPC_BOOK3S_64 + li r4,0 + stb r4,PACASRR_VALID(r13) +#endif #endif li r4,1 stb r4,THREAD_LOAD_VEC(r5) @@ -131,6 +135,8 @@ _GLOBAL(load_up_vsx) /* enable use of VSX after return */ oris r12,r12,MSR_VSX@h std r12,_MSR(r1) + li r4,0 + stb r4,PACASRR_VALID(r13) b fast_interrupt_return_srr #endif /* CONFIG_VSX */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 279eae8f9dbc..010fa75518ae 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4626,6 +4626,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) vcpu->arch.state = KVMPPC_VCPU_NOTREADY; atomic_dec(&kvm->arch.vcpus_running); + + srr_regs_clobbered(); + return r; } diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index d7733b07f489..1ed5ceee73eb 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -1848,6 +1849,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu) /* Make sure we save the guest TAR/EBB/DSCR state */ kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); + srr_regs_clobbered(); out: vcpu->mode = OUTSIDE_GUEST_MODE; return ret; diff --git a/arch/powerpc/lib/error-inject.c b/arch/powerpc/lib/error-inject.c index 407b992fb02f..e834079d2b5c 100644 --- a/arch/powerpc/lib/error-inject.c +++ b/arch/powerpc/lib/error-inject.c @@ -11,6 +11,6 @@ void override_function_with_return(struct pt_regs *regs) * function in the kernel/module, captured on a kprobe. We don't need * to worry about 32-bit userspace on a 64-bit kernel. */ - regs->nip = regs->link; + regs_set_return_ip(regs, regs->link); } NOKPROBE_SYMBOL(override_function_with_return); diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index aee42bcc775b..d8d5f901cee1 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -3225,7 +3225,7 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) default: WARN_ON_ONCE(1); } - regs->nip = next_pc; + regs_set_return_ip(regs, next_pc); } NOKPROBE_SYMBOL(emulate_update_regs); @@ -3563,7 +3563,7 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr) /* can't step mtmsr[d] that would clear MSR_RI */ return -1; /* here op.val is the mask of bits to change */ - regs->msr = (regs->msr & ~op.val) | (val & op.val); + regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val)); goto instr_done; #ifdef CONFIG_PPC64 @@ -3576,7 +3576,7 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr) if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) && cpu_has_feature(CPU_FTR_REAL_LE) && regs->gpr[0] == 0x1ebe) { - regs->msr ^= MSR_LE; + regs_set_return_msr(regs, regs->msr ^ MSR_LE); goto instr_done; } regs->gpr[9] = regs->gpr[13]; @@ -3584,8 +3584,8 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr) regs->gpr[11] = regs->nip + 4; regs->gpr[12] = regs->msr & MSR_MASK; regs->gpr[13] = (unsigned long) get_paca(); - regs->nip = (unsigned long) &system_call_common; - regs->msr = MSR_KERNEL; + regs_set_return_ip(regs, (unsigned long) &system_call_common); + regs_set_return_msr(regs, MSR_KERNEL); return 1; #ifdef CONFIG_PPC_BOOK3S_64 @@ -3595,8 +3595,8 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr) regs->gpr[11] = regs->nip + 4; regs->gpr[12] = regs->msr & MSR_MASK; regs->gpr[13] = (unsigned long) get_paca(); - regs->nip = (unsigned long) &system_call_vectored_emulate; - regs->msr = MSR_KERNEL; + regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate); + regs_set_return_msr(regs, MSR_KERNEL); return 1; #endif @@ -3607,7 +3607,8 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr) return 0; instr_done: - regs->nip = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)); + regs_set_return_ip(regs, + truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type))); return 1; } NOKPROBE_SYMBOL(emulate_step); diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index 1cbfa468813b..8b4f6b3e96c4 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -1609,6 +1609,7 @@ static int __init emulate_compute_instr(struct pt_regs *regs, if (!regs || !ppc_inst_val(instr)) return -EINVAL; + /* This is not a return frame regs */ regs->nip = patch_site_addr(&patch__exec_instr); analysed = analyse_instr(&op, regs, instr); diff --git a/arch/powerpc/math-emu/math.c b/arch/powerpc/math-emu/math.c index 327165f26ca6..36761bd00f38 100644 --- a/arch/powerpc/math-emu/math.c +++ b/arch/powerpc/math-emu/math.c @@ -453,7 +453,7 @@ do_mathemu(struct pt_regs *regs) break; } - regs->nip += 4; + regs_add_return_ip(regs, 4); return 0; illegal: diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c index 0a05e51964c1..39b84e7452e1 100644 --- a/arch/powerpc/math-emu/math_efp.c +++ b/arch/powerpc/math-emu/math_efp.c @@ -710,7 +710,7 @@ update_regs: illegal: if (have_e500_cpu_a005_erratum) { /* according to e500 cpu a005 erratum, reissue efp inst */ - regs->nip -= 4; + regs_add_return_ip(regs, -4); pr_debug("re-issue efp inst: %08lx\n", speinsn); return 0; } diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c index 53065d564161..85521b3e7098 100644 --- a/arch/powerpc/platforms/embedded6xx/holly.c +++ b/arch/powerpc/platforms/embedded6xx/holly.c @@ -251,8 +251,8 @@ static int ppc750_machine_check_exception(struct pt_regs *regs) /* Are we prepared to handle this fault */ if ((entry = search_exception_tables(regs->nip)) != NULL) { tsi108_clear_pci_cfg_error(); - regs->msr |= MSR_RI; - regs->nip = extable_fixup(entry); + regs_set_return_msr(regs, regs->msr | MSR_RI); + regs_set_return_ip(regs, extable_fixup(entry)); return 1; } return 0; diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c index 5565647dc879..d8da6a483e59 100644 --- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c @@ -173,8 +173,8 @@ static int mpc7448_machine_check_exception(struct pt_regs *regs) /* Are we prepared to handle this fault */ if ((entry = search_exception_tables(regs->nip)) != NULL) { tsi108_clear_pci_cfg_error(); - regs->msr |= MSR_RI; - regs->nip = extable_fixup(entry); + regs_set_return_msr(regs, regs->msr | MSR_RI); + regs_set_return_ip(regs, extable_fixup(entry)); return 1; } return 0; diff --git a/arch/powerpc/platforms/pasemi/idle.c b/arch/powerpc/platforms/pasemi/idle.c index 1c954c90b0f4..9b88e3cded7d 100644 --- a/arch/powerpc/platforms/pasemi/idle.c +++ b/arch/powerpc/platforms/pasemi/idle.c @@ -37,7 +37,7 @@ static int pasemi_system_reset_exception(struct pt_regs *regs) */ if (regs->msr & SRR1_WAKEMASK) - regs->nip = regs->link; + regs_set_return_ip(regs, regs->link); switch (regs->msr & SRR1_WAKEMASK) { case SRR1_WAKEDEC: @@ -58,7 +58,7 @@ static int pasemi_system_reset_exception(struct pt_regs *regs) restore_astate(hard_smp_processor_id()); /* everything handled */ - regs->msr |= MSR_RI; + regs_set_return_msr(regs, regs->msr | MSR_RI); return 1; } diff --git a/arch/powerpc/platforms/powernv/opal-call.c b/arch/powerpc/platforms/powernv/opal-call.c index 01401e3da7ca..f812c74c61e5 100644 --- a/arch/powerpc/platforms/powernv/opal-call.c +++ b/arch/powerpc/platforms/powernv/opal-call.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include +#include #include #include #include @@ -100,6 +101,9 @@ static int64_t opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3, bool mmu = (msr & (MSR_IR|MSR_DR)); int64_t ret; + /* OPAL call / firmware may use SRR and/or HSRR */ + srr_regs_clobbered(); + msr &= ~MSR_EE; if (unlikely(!mmu)) diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 303d7c775740..2835376e61a4 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -773,7 +773,7 @@ bool opal_mce_check_early_recovery(struct pt_regs *regs) * Setup regs->nip to rfi into fixup address. */ if (recover_addr) - regs->nip = recover_addr; + regs_set_return_ip(regs, recover_addr); out: return !!recover_addr; diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index 8a2b8d64265b..ab9fc6506861 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -108,6 +108,10 @@ _GLOBAL_TOC(plpar_hcall_norets_notrace) mfcr r0 stw r0,8(r1) HVSC /* invoke the hypervisor */ + + li r4,0 + stb r4,PACASRR_VALID(r13) + lwz r0,8(r1) mtcrf 0xff,r0 blr /* return r3 = status */ @@ -120,6 +124,9 @@ _GLOBAL_TOC(plpar_hcall_norets) HCALL_BRANCH(plpar_hcall_norets_trace) HVSC /* invoke the hypervisor */ + li r4,0 + stb r4,PACASRR_VALID(r13) + lwz r0,8(r1) mtcrf 0xff,r0 blr /* return r3 = status */ @@ -129,6 +136,10 @@ plpar_hcall_norets_trace: HCALL_INST_PRECALL(R4) HVSC HCALL_INST_POSTCALL_NORETS + + li r4,0 + stb r4,PACASRR_VALID(r13) + lwz r0,8(r1) mtcrf 0xff,r0 blr @@ -159,6 +170,9 @@ _GLOBAL_TOC(plpar_hcall) std r6, 16(r12) std r7, 24(r12) + li r4,0 + stb r4,PACASRR_VALID(r13) + lwz r0,8(r1) mtcrf 0xff,r0 @@ -188,6 +202,9 @@ plpar_hcall_trace: HCALL_INST_POSTCALL(r12) + li r4,0 + stb r4,PACASRR_VALID(r13) + lwz r0,8(r1) mtcrf 0xff,r0 @@ -223,6 +240,9 @@ _GLOBAL(plpar_hcall_raw) std r6, 16(r12) std r7, 24(r12) + li r4,0 + stb r4,PACASRR_VALID(r13) + lwz r0,8(r1) mtcrf 0xff,r0 @@ -262,6 +282,9 @@ _GLOBAL_TOC(plpar_hcall9) std r11,56(r12) std r0, 64(r12) + li r4,0 + stb r4,PACASRR_VALID(r13) + lwz r0,8(r1) mtcrf 0xff,r0 @@ -300,6 +323,9 @@ plpar_hcall9_trace: HCALL_INST_POSTCALL(r12) + li r4,0 + stb r4,PACASRR_VALID(r13) + lwz r0,8(r1) mtcrf 0xff,r0 @@ -339,6 +365,9 @@ _GLOBAL(plpar_hcall9_raw) std r11,56(r12) std r0, 64(r12) + li r4,0 + stb r4,PACASRR_VALID(r13) + lwz r0,8(r1) mtcrf 0xff,r0 diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 2f636308cf60..167f2e1b8d39 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -487,8 +487,8 @@ int pSeries_system_reset_exception(struct pt_regs *regs) if ((be64_to_cpu(regs->msr) & (MSR_LE|MSR_RI|MSR_DR|MSR_IR|MSR_ME|MSR_PR| MSR_ILE|MSR_HV|MSR_SF)) == (MSR_DR|MSR_SF)) { - regs->nip = be64_to_cpu((__be64)regs->nip); - regs->msr = 0; + regs_set_return_ip(regs, be64_to_cpu((__be64)regs->nip)); + regs_set_return_msr(regs, 0); } #endif diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 69af73765783..b8f76f3fd994 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -1072,7 +1072,7 @@ int fsl_pci_mcheck_exception(struct pt_regs *regs) ret = get_kernel_nofault(inst, (void *)regs->nip); if (!ret && mcheck_handle_load(regs, inst)) { - regs->nip += 4; + regs_add_return_ip(regs, 4); return 1; } } diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 07c164f7f8cf..5a95b8ea23d8 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -108,8 +108,8 @@ int fsl_rio_mcheck_exception(struct pt_regs *regs) __func__); out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); - regs->msr |= MSR_RI; - regs->nip = extable_fixup(entry); + regs_set_return_msr(regs, regs->msr | MSR_RI); + regs_set_return_ip(regs, extable_fixup(entry)); return 1; } } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index bd7ee794be92..dbc38483473c 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -514,7 +514,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) bp = in_breakpoint_table(regs->nip, &offset); if (bp != NULL) { - regs->nip = bp->address + offset; + regs_set_return_ip(regs, bp->address + offset); atomic_dec(&bp->ref_count); } @@ -702,7 +702,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) if (regs->msr & MSR_DE) { bp = at_breakpoint(regs->nip); if (bp != NULL) { - regs->nip = (unsigned long) &bp->instr[0]; + regs_set_return_ip(regs, (unsigned long) &bp->instr[0]); atomic_inc(&bp->ref_count); } } @@ -712,7 +712,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) if (bp != NULL) { int stepped = emulate_step(regs, ppc_inst_read(bp->instr)); if (stepped == 0) { - regs->nip = (unsigned long) &bp->instr[0]; + regs_set_return_ip(regs, (unsigned long) &bp->instr[0]); atomic_inc(&bp->ref_count); } else if (stepped < 0) { printf("Couldn't single-step %s instruction\n", @@ -766,7 +766,7 @@ static int xmon_bpt(struct pt_regs *regs) /* Are we at the trap at bp->instr[1] for some bp? */ bp = in_breakpoint_table(regs->nip, &offset); if (bp != NULL && (offset == 4 || offset == 8)) { - regs->nip = bp->address + offset; + regs_set_return_ip(regs, bp->address + offset); atomic_dec(&bp->ref_count); return 1; } @@ -836,7 +836,7 @@ static int xmon_fault_handler(struct pt_regs *regs) if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) { bp = in_breakpoint_table(regs->nip, &offset); if (bp != NULL) { - regs->nip = bp->address + offset; + regs_set_return_ip(regs, bp->address + offset); atomic_dec(&bp->ref_count); } } @@ -1188,7 +1188,7 @@ cmds(struct pt_regs *excp) #ifdef CONFIG_BOOKE static int do_step(struct pt_regs *regs) { - regs->msr |= MSR_DE; + regs_set_return_msr(regs, regs->msr | MSR_DE); mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); return 1; } @@ -1221,7 +1221,7 @@ static int do_step(struct pt_regs *regs) } } } - regs->msr |= MSR_SE; + regs_set_return_msr(regs, regs->msr | MSR_SE); return 1; } #endif