mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 05:34:13 +08:00
powerpc/book3s64/pkeys: Store/restore userspace AMR/IAMR correctly on entry and exit from kernel
This prepare kernel to operate with a different value than userspace AMR/IAMR. For this, AMR/IAMR need to be saved and restored on entry and return from the kernel. With KUAP we modify kernel AMR when accessing user address from the kernel via copy_to/from_user interfaces. We don't need to modify IAMR value in similar fashion. If MMU_FTR_PKEY is enabled we need to save AMR/IAMR in pt_regs on entering kernel from userspace. If not we can assume that AMR/IAMR is not modified from userspace. We need to save AMR if we have MMU_FTR_BOOK3S_KUAP feature enabled and we are interrupted within kernel. This is required so that if we get interrupted within copy_to/from_user we continue with the right AMR value. If we hae MMU_FTR_BOOK3S_KUEP enabled we need to restore IAMR on return to userspace beause kernel will be running with a different IAMR value. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Reviewed-by: Sandipan Das <sandipan@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20201127044424.40686-11-aneesh.kumar@linux.ibm.com
This commit is contained in:
parent
d7df77e890
commit
8e560921b5
@ -13,17 +13,46 @@
|
|||||||
|
|
||||||
#ifdef __ASSEMBLY__
|
#ifdef __ASSEMBLY__
|
||||||
|
|
||||||
.macro kuap_restore_amr gpr1, gpr2
|
.macro kuap_user_restore gpr1
|
||||||
#ifdef CONFIG_PPC_KUAP
|
#if defined(CONFIG_PPC_PKEY)
|
||||||
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
||||||
mfspr \gpr1, SPRN_AMR
|
/*
|
||||||
|
* AMR and IAMR are going to be different when
|
||||||
|
* returning to userspace.
|
||||||
|
*/
|
||||||
|
ld \gpr1, STACK_REGS_AMR(r1)
|
||||||
|
isync
|
||||||
|
mtspr SPRN_AMR, \gpr1
|
||||||
|
/*
|
||||||
|
* Restore IAMR only when returning to userspace
|
||||||
|
*/
|
||||||
|
ld \gpr1, STACK_REGS_IAMR(r1)
|
||||||
|
mtspr SPRN_IAMR, \gpr1
|
||||||
|
|
||||||
|
/* No isync required, see kuap_user_restore() */
|
||||||
|
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_PKEY, 67)
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro kuap_kernel_restore gpr1, gpr2
|
||||||
|
#if defined(CONFIG_PPC_PKEY)
|
||||||
|
|
||||||
|
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
||||||
|
/*
|
||||||
|
* AMR is going to be mostly the same since we are
|
||||||
|
* returning to the kernel. Compare and do a mtspr.
|
||||||
|
*/
|
||||||
ld \gpr2, STACK_REGS_AMR(r1)
|
ld \gpr2, STACK_REGS_AMR(r1)
|
||||||
|
mfspr \gpr1, SPRN_AMR
|
||||||
cmpd \gpr1, \gpr2
|
cmpd \gpr1, \gpr2
|
||||||
beq 998f
|
beq 100f
|
||||||
isync
|
isync
|
||||||
mtspr SPRN_AMR, \gpr2
|
mtspr SPRN_AMR, \gpr2
|
||||||
/* No isync required, see kuap_restore_amr() */
|
/*
|
||||||
998:
|
* No isync required, see kuap_restore_amr()
|
||||||
|
* No need to restore IAMR when returning to kernel space.
|
||||||
|
*/
|
||||||
|
100:
|
||||||
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
|
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
@ -42,23 +71,98 @@
|
|||||||
.endm
|
.endm
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* if (pkey) {
|
||||||
|
*
|
||||||
|
* save AMR -> stack;
|
||||||
|
* if (kuap) {
|
||||||
|
* if (AMR != BLOCKED)
|
||||||
|
* KUAP_BLOCKED -> AMR;
|
||||||
|
* }
|
||||||
|
* if (from_user) {
|
||||||
|
* save IAMR -> stack;
|
||||||
|
* if (kuep) {
|
||||||
|
* KUEP_BLOCKED ->IAMR
|
||||||
|
* }
|
||||||
|
* }
|
||||||
|
* return;
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* if (kuap) {
|
||||||
|
* if (from_kernel) {
|
||||||
|
* save AMR -> stack;
|
||||||
|
* if (AMR != BLOCKED)
|
||||||
|
* KUAP_BLOCKED -> AMR;
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* }
|
||||||
|
*/
|
||||||
.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
|
.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
|
||||||
#ifdef CONFIG_PPC_KUAP
|
#if defined(CONFIG_PPC_PKEY)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* if both pkey and kuap is disabled, nothing to do
|
||||||
|
*/
|
||||||
|
BEGIN_MMU_FTR_SECTION_NESTED(68)
|
||||||
|
b 100f // skip_save_amr
|
||||||
|
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* if pkey is disabled and we are entering from userspace
|
||||||
|
* don't do anything.
|
||||||
|
*/
|
||||||
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
||||||
.ifnb \msr_pr_cr
|
.ifnb \msr_pr_cr
|
||||||
bne \msr_pr_cr, 99f
|
/*
|
||||||
|
* Without pkey we are not changing AMR outside the kernel
|
||||||
|
* hence skip this completely.
|
||||||
|
*/
|
||||||
|
bne \msr_pr_cr, 100f // from userspace
|
||||||
.endif
|
.endif
|
||||||
|
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* pkey is enabled or pkey is disabled but entering from kernel
|
||||||
|
*/
|
||||||
mfspr \gpr1, SPRN_AMR
|
mfspr \gpr1, SPRN_AMR
|
||||||
std \gpr1, STACK_REGS_AMR(r1)
|
std \gpr1, STACK_REGS_AMR(r1)
|
||||||
li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
|
|
||||||
sldi \gpr2, \gpr2, AMR_KUAP_SHIFT
|
/*
|
||||||
|
* update kernel AMR with AMR_KUAP_BLOCKED only
|
||||||
|
* if KUAP feature is enabled
|
||||||
|
*/
|
||||||
|
BEGIN_MMU_FTR_SECTION_NESTED(69)
|
||||||
|
LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
|
||||||
cmpd \use_cr, \gpr1, \gpr2
|
cmpd \use_cr, \gpr1, \gpr2
|
||||||
beq \use_cr, 99f
|
beq \use_cr, 102f
|
||||||
// We don't isync here because we very recently entered via rfid
|
/*
|
||||||
|
* We don't isync here because we very recently entered via an interrupt
|
||||||
|
*/
|
||||||
mtspr SPRN_AMR, \gpr2
|
mtspr SPRN_AMR, \gpr2
|
||||||
isync
|
isync
|
||||||
99:
|
102:
|
||||||
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
|
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* if entering from kernel we don't need save IAMR
|
||||||
|
*/
|
||||||
|
.ifnb \msr_pr_cr
|
||||||
|
beq \msr_pr_cr, 100f // from kernel space
|
||||||
|
mfspr \gpr1, SPRN_IAMR
|
||||||
|
std \gpr1, STACK_REGS_IAMR(r1)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* update kernel IAMR with AMR_KUEP_BLOCKED only
|
||||||
|
* if KUEP feature is enabled
|
||||||
|
*/
|
||||||
|
BEGIN_MMU_FTR_SECTION_NESTED(70)
|
||||||
|
LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
|
||||||
|
mtspr SPRN_IAMR, \gpr2
|
||||||
|
isync
|
||||||
|
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
|
||||||
|
.endif
|
||||||
|
|
||||||
|
100: // skip_save_amr
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
@ -68,22 +172,42 @@
|
|||||||
|
|
||||||
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
|
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_KUAP
|
#ifdef CONFIG_PPC_PKEY
|
||||||
|
|
||||||
#include <asm/mmu.h>
|
#include <asm/mmu.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
|
|
||||||
static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
|
static inline void kuap_user_restore(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP) && unlikely(regs->kuap != amr)) {
|
if (!mmu_has_feature(MMU_FTR_PKEY))
|
||||||
isync();
|
return;
|
||||||
mtspr(SPRN_AMR, regs->kuap);
|
|
||||||
/*
|
isync();
|
||||||
* No isync required here because we are about to RFI back to
|
mtspr(SPRN_AMR, regs->amr);
|
||||||
* previous context before any user accesses would be made,
|
mtspr(SPRN_IAMR, regs->iamr);
|
||||||
* which is a CSI.
|
/*
|
||||||
*/
|
* No isync required here because we are about to rfi
|
||||||
|
* back to previous context before any user accesses
|
||||||
|
* would be made, which is a CSI.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
static inline void kuap_kernel_restore(struct pt_regs *regs,
|
||||||
|
unsigned long amr)
|
||||||
|
{
|
||||||
|
if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
|
||||||
|
if (unlikely(regs->amr != amr)) {
|
||||||
|
isync();
|
||||||
|
mtspr(SPRN_AMR, regs->amr);
|
||||||
|
/*
|
||||||
|
* No isync required here because we are about to rfi
|
||||||
|
* back to previous context before any user accesses
|
||||||
|
* would be made, which is a CSI.
|
||||||
|
*/
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* No need to restore IAMR when returning to kernel space.
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long kuap_get_and_check_amr(void)
|
static inline unsigned long kuap_get_and_check_amr(void)
|
||||||
@ -97,6 +221,26 @@ static inline unsigned long kuap_get_and_check_amr(void)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#else /* CONFIG_PPC_PKEY */
|
||||||
|
|
||||||
|
static inline void kuap_user_restore(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long kuap_get_and_check_amr(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_PPC_PKEY */
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_KUAP
|
||||||
|
|
||||||
static inline void kuap_check_amr(void)
|
static inline void kuap_check_amr(void)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
|
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
|
||||||
@ -145,21 +289,6 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
|
|||||||
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
|
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
|
||||||
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
|
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
|
||||||
}
|
}
|
||||||
#else /* CONFIG_PPC_KUAP */
|
|
||||||
static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { }
|
|
||||||
|
|
||||||
static inline unsigned long kuap_get_and_check_amr(void)
|
|
||||||
{
|
|
||||||
return 0UL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long get_kuap(void)
|
|
||||||
{
|
|
||||||
return AMR_KUAP_BLOCKED;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void set_kuap(unsigned long value) { }
|
|
||||||
#endif /* !CONFIG_PPC_KUAP */
|
|
||||||
|
|
||||||
static __always_inline void allow_user_access(void __user *to, const void __user *from,
|
static __always_inline void allow_user_access(void __user *to, const void __user *from,
|
||||||
unsigned long size, unsigned long dir)
|
unsigned long size, unsigned long dir)
|
||||||
@ -176,6 +305,21 @@ static __always_inline void allow_user_access(void __user *to, const void __user
|
|||||||
BUILD_BUG();
|
BUILD_BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#else /* CONFIG_PPC_KUAP */
|
||||||
|
|
||||||
|
static inline unsigned long get_kuap(void)
|
||||||
|
{
|
||||||
|
return AMR_KUAP_BLOCKED;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void set_kuap(unsigned long value) { }
|
||||||
|
|
||||||
|
static __always_inline void allow_user_access(void __user *to, const void __user *from,
|
||||||
|
unsigned long size, unsigned long dir)
|
||||||
|
{ }
|
||||||
|
|
||||||
|
#endif /* !CONFIG_PPC_KUAP */
|
||||||
|
|
||||||
static inline void prevent_user_access(void __user *to, const void __user *from,
|
static inline void prevent_user_access(void __user *to, const void __user *from,
|
||||||
unsigned long size, unsigned long dir)
|
unsigned long size, unsigned long dir)
|
||||||
{
|
{
|
||||||
|
@ -61,8 +61,11 @@ struct pt_regs
|
|||||||
unsigned long amr;
|
unsigned long amr;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
#ifdef CONFIG_PPC_PKEY
|
||||||
|
unsigned long iamr;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
unsigned long __pad[2]; /* Maintain 16 byte interrupt stack alignment */
|
unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
@ -358,11 +358,13 @@ int main(void)
|
|||||||
|
|
||||||
#ifdef CONFIG_PPC_PKEY
|
#ifdef CONFIG_PPC_PKEY
|
||||||
STACK_PT_REGS_OFFSET(STACK_REGS_AMR, amr);
|
STACK_PT_REGS_OFFSET(STACK_REGS_AMR, amr);
|
||||||
|
STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_PPC_KUAP
|
#ifdef CONFIG_PPC_KUAP
|
||||||
STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap);
|
STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#if defined(CONFIG_PPC32)
|
#if defined(CONFIG_PPC32)
|
||||||
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
||||||
DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
|
DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
|
||||||
|
@ -653,8 +653,8 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
|
|||||||
kuap_check_amr r3, r4
|
kuap_check_amr r3, r4
|
||||||
ld r5,_MSR(r1)
|
ld r5,_MSR(r1)
|
||||||
andi. r0,r5,MSR_PR
|
andi. r0,r5,MSR_PR
|
||||||
bne .Lfast_user_interrupt_return
|
bne .Lfast_user_interrupt_return_amr
|
||||||
kuap_restore_amr r3, r4
|
kuap_kernel_restore r3, r4
|
||||||
andi. r0,r5,MSR_RI
|
andi. r0,r5,MSR_RI
|
||||||
li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
|
li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
|
||||||
bne+ .Lfast_kernel_interrupt_return
|
bne+ .Lfast_kernel_interrupt_return
|
||||||
@ -674,6 +674,8 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return)
|
|||||||
cmpdi r3,0
|
cmpdi r3,0
|
||||||
bne- .Lrestore_nvgprs
|
bne- .Lrestore_nvgprs
|
||||||
|
|
||||||
|
.Lfast_user_interrupt_return_amr:
|
||||||
|
kuap_user_restore r3
|
||||||
.Lfast_user_interrupt_return:
|
.Lfast_user_interrupt_return:
|
||||||
ld r11,_NIP(r1)
|
ld r11,_NIP(r1)
|
||||||
ld r12,_MSR(r1)
|
ld r12,_MSR(r1)
|
||||||
|
@ -1059,7 +1059,7 @@ EXC_COMMON_BEGIN(system_reset_common)
|
|||||||
ld r10,SOFTE(r1)
|
ld r10,SOFTE(r1)
|
||||||
stb r10,PACAIRQSOFTMASK(r13)
|
stb r10,PACAIRQSOFTMASK(r13)
|
||||||
|
|
||||||
kuap_restore_amr r9, r10
|
kuap_kernel_restore r9, r10
|
||||||
EXCEPTION_RESTORE_REGS
|
EXCEPTION_RESTORE_REGS
|
||||||
RFI_TO_USER_OR_KERNEL
|
RFI_TO_USER_OR_KERNEL
|
||||||
|
|
||||||
@ -2875,7 +2875,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
|
|||||||
ld r10,SOFTE(r1)
|
ld r10,SOFTE(r1)
|
||||||
stb r10,PACAIRQSOFTMASK(r13)
|
stb r10,PACAIRQSOFTMASK(r13)
|
||||||
|
|
||||||
kuap_restore_amr r9, r10
|
kuap_kernel_restore r9, r10
|
||||||
EXCEPTION_RESTORE_REGS hsrr=0
|
EXCEPTION_RESTORE_REGS hsrr=0
|
||||||
RFI_TO_KERNEL
|
RFI_TO_KERNEL
|
||||||
|
|
||||||
|
@ -35,7 +35,25 @@ notrace long system_call_exception(long r3, long r4, long r5,
|
|||||||
BUG_ON(!FULL_REGS(regs));
|
BUG_ON(!FULL_REGS(regs));
|
||||||
BUG_ON(regs->softe != IRQS_ENABLED);
|
BUG_ON(regs->softe != IRQS_ENABLED);
|
||||||
|
|
||||||
kuap_check_amr();
|
#ifdef CONFIG_PPC_PKEY
|
||||||
|
if (mmu_has_feature(MMU_FTR_PKEY)) {
|
||||||
|
unsigned long amr, iamr;
|
||||||
|
/*
|
||||||
|
* When entering from userspace we mostly have the AMR/IAMR
|
||||||
|
* different from kernel default values. Hence don't compare.
|
||||||
|
*/
|
||||||
|
amr = mfspr(SPRN_AMR);
|
||||||
|
iamr = mfspr(SPRN_IAMR);
|
||||||
|
regs->amr = amr;
|
||||||
|
regs->iamr = iamr;
|
||||||
|
if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
|
||||||
|
mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
|
||||||
|
if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP))
|
||||||
|
mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
|
||||||
|
isync();
|
||||||
|
} else
|
||||||
|
#endif
|
||||||
|
kuap_check_amr();
|
||||||
|
|
||||||
account_cpu_user_entry();
|
account_cpu_user_entry();
|
||||||
|
|
||||||
@ -245,6 +263,12 @@ again:
|
|||||||
|
|
||||||
account_cpu_user_exit();
|
account_cpu_user_exit();
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */
|
||||||
|
/*
|
||||||
|
* We do this at the end so that we do context switch with KERNEL AMR
|
||||||
|
*/
|
||||||
|
kuap_user_restore(regs);
|
||||||
|
#endif
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -330,6 +354,10 @@ again:
|
|||||||
|
|
||||||
account_cpu_user_exit();
|
account_cpu_user_exit();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We do this at the end so that we do context switch with KERNEL AMR
|
||||||
|
*/
|
||||||
|
kuap_user_restore(regs);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -400,7 +428,7 @@ again:
|
|||||||
* which would cause Read-After-Write stalls. Hence, we take the AMR
|
* which would cause Read-After-Write stalls. Hence, we take the AMR
|
||||||
* value from the check above.
|
* value from the check above.
|
||||||
*/
|
*/
|
||||||
kuap_restore_amr(regs, amr);
|
kuap_kernel_restore(regs, amr);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user