2019-05-27 14:55:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2008-06-24 09:32:36 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
|
|
|
|
*
|
|
|
|
* Modifications for ppc64:
|
|
|
|
* Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
|
|
|
|
*
|
|
|
|
* Copyright 2008 Michael Ellerman, IBM Corporation.
|
|
|
|
*/
|
|
|
|
|
2010-06-29 05:08:29 +08:00
|
|
|
#include <linux/types.h>
|
2016-07-23 17:12:38 +08:00
|
|
|
#include <linux/jump_label.h>
|
2008-06-24 09:32:36 +08:00
|
|
|
#include <linux/kernel.h>
|
2008-06-24 09:33:03 +08:00
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/init.h>
|
2017-02-04 07:16:44 +08:00
|
|
|
#include <linux/sched/mm.h>
|
2008-06-24 09:32:36 +08:00
|
|
|
#include <asm/cputable.h>
|
|
|
|
#include <asm/code-patching.h>
|
2011-11-14 20:54:47 +08:00
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/sections.h>
|
2016-07-05 13:03:41 +08:00
|
|
|
#include <asm/setup.h>
|
2018-05-22 07:00:00 +08:00
|
|
|
#include <asm/security_features.h>
|
2016-07-05 13:03:41 +08:00
|
|
|
#include <asm/firmware.h>
|
2008-06-24 09:32:36 +08:00
|
|
|
|
|
|
|
struct fixup_entry {
|
|
|
|
unsigned long mask;
|
|
|
|
unsigned long value;
|
|
|
|
long start_off;
|
|
|
|
long end_off;
|
powerpc: Introduce infrastructure for feature sections with alternatives
The current feature section logic only supports nop'ing out code, this means
if you want to choose at runtime between instruction sequences, one or both
cases will have to execute the nop'ed out contents of the other section, eg:
BEGIN_FTR_SECTION
or 1,1,1
END_FTR_SECTION_IFSET(FOO)
BEGIN_FTR_SECTION
or 2,2,2
END_FTR_SECTION_IFCLR(FOO)
and the resulting code will be either,
or 1,1,1
nop
or,
nop
or 2,2,2
For small code segments this is fine, but for larger code blocks and in
performance criticial code segments, it would be nice to avoid the nops.
This commit starts to implement logic to allow the following:
BEGIN_FTR_SECTION
or 1,1,1
FTR_SECTION_ELSE
or 2,2,2
ALT_FTR_SECTION_END_IFSET(FOO)
and the resulting code will be:
or 1,1,1
or,
or 2,2,2
We achieve this by extending the existing FTR macros. The current feature
section semantic just becomes a special case, ie. if the else case is empty
we nop out the default case.
The key limitation is that the size of the else case must be less than or
equal to the size of the default case. If the else case is smaller the
remainder of the section is nop'ed.
We let the linker put the else case code in with the rest of the text,
so that relative branches from the else case are more likley to link,
this has the disadvantage that we can't free the unused else cases.
This commit introduces the required macro and linker script changes, but
does not enable the patching of the alternative sections.
We also need to update two hand-made section entries in reg.h and timex.h
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-06-24 09:32:54 +08:00
|
|
|
long alt_start_off;
|
|
|
|
long alt_end_off;
|
2008-06-24 09:32:36 +08:00
|
|
|
};
|
|
|
|
|
2008-06-24 09:33:02 +08:00
|
|
|
static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
|
2008-06-24 09:32:36 +08:00
|
|
|
{
|
2008-06-24 09:33:02 +08:00
|
|
|
/*
|
|
|
|
* We store the offset to the code as a negative offset from
|
|
|
|
* the start of the alt_entry, to support the VDSO. This
|
|
|
|
* routine converts that back into an actual address.
|
|
|
|
*/
|
|
|
|
return (unsigned int *)((unsigned long)fcur + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
|
|
|
|
unsigned int *alt_start, unsigned int *alt_end)
|
|
|
|
{
|
2020-05-06 11:40:25 +08:00
|
|
|
int err;
|
2008-06-24 09:33:02 +08:00
|
|
|
unsigned int instr;
|
|
|
|
|
|
|
|
instr = *src;
|
|
|
|
|
|
|
|
if (instr_is_relative_branch(*src)) {
|
|
|
|
unsigned int *target = (unsigned int *)branch_target(src);
|
|
|
|
|
|
|
|
/* Branch within the section doesn't need translating */
|
2018-04-16 21:25:19 +08:00
|
|
|
if (target < alt_start || target > alt_end) {
|
2020-05-06 11:40:25 +08:00
|
|
|
err = translate_branch(&instr, dest, src);
|
|
|
|
if (err)
|
2008-06-24 09:33:02 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-24 15:31:09 +08:00
|
|
|
raw_patch_instruction(dest, instr);
|
2008-06-24 09:33:02 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
|
|
|
|
{
|
|
|
|
unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
|
|
|
|
|
|
|
|
start = calc_addr(fcur, fcur->start_off);
|
|
|
|
end = calc_addr(fcur, fcur->end_off);
|
|
|
|
alt_start = calc_addr(fcur, fcur->alt_start_off);
|
|
|
|
alt_end = calc_addr(fcur, fcur->alt_end_off);
|
|
|
|
|
|
|
|
if ((alt_end - alt_start) > (end - start))
|
|
|
|
return 1;
|
2008-06-24 09:32:36 +08:00
|
|
|
|
|
|
|
if ((value & fcur->mask) == fcur->value)
|
2008-06-24 09:33:02 +08:00
|
|
|
return 0;
|
2008-06-24 09:32:36 +08:00
|
|
|
|
2008-06-24 09:33:02 +08:00
|
|
|
src = alt_start;
|
|
|
|
dest = start;
|
2008-06-24 09:32:36 +08:00
|
|
|
|
2008-06-24 09:33:02 +08:00
|
|
|
for (; src < alt_end; src++, dest++) {
|
|
|
|
if (patch_alt_instruction(src, dest, alt_start, alt_end))
|
|
|
|
return 1;
|
2008-06-24 09:32:36 +08:00
|
|
|
}
|
2008-06-24 09:33:02 +08:00
|
|
|
|
|
|
|
for (; dest < end; dest++)
|
2017-11-24 15:31:09 +08:00
|
|
|
raw_patch_instruction(dest, PPC_INST_NOP);
|
2008-06-24 09:33:02 +08:00
|
|
|
|
|
|
|
return 0;
|
2008-06-24 09:32:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
|
|
|
|
{
|
|
|
|
struct fixup_entry *fcur, *fend;
|
|
|
|
|
|
|
|
fcur = fixup_start;
|
|
|
|
fend = fixup_end;
|
|
|
|
|
2008-06-24 09:33:02 +08:00
|
|
|
for (; fcur < fend; fcur++) {
|
|
|
|
if (patch_feature_section(value, fcur)) {
|
2008-07-17 12:46:00 +08:00
|
|
|
WARN_ON(1);
|
2008-06-24 09:33:02 +08:00
|
|
|
printk("Unable to patch feature section at %p - %p" \
|
|
|
|
" with %p - %p\n",
|
|
|
|
calc_addr(fcur, fcur->start_off),
|
|
|
|
calc_addr(fcur, fcur->end_off),
|
|
|
|
calc_addr(fcur, fcur->alt_start_off),
|
|
|
|
calc_addr(fcur, fcur->alt_end_off));
|
|
|
|
}
|
|
|
|
}
|
2008-06-24 09:32:36 +08:00
|
|
|
}
|
2008-06-24 09:33:03 +08:00
|
|
|
|
powerpc/64s: Add support for RFI flush of L1-D cache
On some CPUs we can prevent the Meltdown vulnerability by flushing the
L1-D cache on exit from kernel to user mode, and from hypervisor to
guest.
This is known to be the case on at least Power7, Power8 and Power9. At
this time we do not know the status of the vulnerability on other CPUs
such as the 970 (Apple G5), pasemi CPUs (AmigaOne X1000) or Freescale
CPUs. As more information comes to light we can enable this, or other
mechanisms on those CPUs.
The vulnerability occurs when the load of an architecturally
inaccessible memory region (eg. userspace load of kernel memory) is
speculatively executed to the point where its result can influence the
address of a subsequent speculatively executed load.
In order for that to happen, the first load must hit in the L1,
because before the load is sent to the L2 the permission check is
performed. Therefore if no kernel addresses hit in the L1 the
vulnerability can not occur. We can ensure that is the case by
flushing the L1 whenever we return to userspace. Similarly for
hypervisor vs guest.
In order to flush the L1-D cache on exit, we add a section of nops at
each (h)rfi location that returns to a lower privileged context, and
patch that with some sequence. Newer firmwares are able to advertise
to us that there is a special nop instruction that flushes the L1-D.
If we do not see that advertised, we fall back to doing a displacement
flush in software.
For guest kernels we support migration between some CPU versions, and
different CPUs may use different flush instructions. So that we are
prepared to migrate to a machine with a different flush instruction
activated, we may have to patch more than one flush instruction at
boot if the hypervisor tells us to.
In the end this patch is mostly the work of Nicholas Piggin and
Michael Ellerman. However a cast of thousands contributed to analysis
of the issue, earlier versions of the patch, back ports testing etc.
Many thanks to all of them.
Tested-by: Jon Masters <jcm@redhat.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-01-10 00:07:15 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
2018-10-22 22:54:17 +08:00
|
|
|
static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
|
2018-05-22 07:00:00 +08:00
|
|
|
{
|
|
|
|
unsigned int instrs[3], *dest;
|
|
|
|
long *start, *end;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
start = PTRRELOC(&__start___stf_entry_barrier_fixup),
|
|
|
|
end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
|
|
|
|
|
|
|
|
instrs[0] = 0x60000000; /* nop */
|
|
|
|
instrs[1] = 0x60000000; /* nop */
|
|
|
|
instrs[2] = 0x60000000; /* nop */
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
if (types & STF_BARRIER_FALLBACK) {
|
|
|
|
instrs[i++] = 0x7d4802a6; /* mflr r10 */
|
|
|
|
instrs[i++] = 0x60000000; /* branch patched below */
|
|
|
|
instrs[i++] = 0x7d4803a6; /* mtlr r10 */
|
|
|
|
} else if (types & STF_BARRIER_EIEIO) {
|
|
|
|
instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
|
|
|
|
} else if (types & STF_BARRIER_SYNC_ORI) {
|
|
|
|
instrs[i++] = 0x7c0004ac; /* hwsync */
|
|
|
|
instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
|
|
|
|
instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; start < end; start++, i++) {
|
|
|
|
dest = (void *)start + *start;
|
|
|
|
|
|
|
|
pr_devel("patching dest %lx\n", (unsigned long)dest);
|
|
|
|
|
|
|
|
patch_instruction(dest, instrs[0]);
|
|
|
|
|
|
|
|
if (types & STF_BARRIER_FALLBACK)
|
|
|
|
patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
|
|
|
|
BRANCH_SET_LINK);
|
|
|
|
else
|
|
|
|
patch_instruction(dest + 1, instrs[1]);
|
|
|
|
|
|
|
|
patch_instruction(dest + 2, instrs[2]);
|
|
|
|
}
|
|
|
|
|
|
|
|
printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
|
|
|
|
(types == STF_BARRIER_NONE) ? "no" :
|
|
|
|
(types == STF_BARRIER_FALLBACK) ? "fallback" :
|
|
|
|
(types == STF_BARRIER_EIEIO) ? "eieio" :
|
|
|
|
(types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
|
|
|
|
: "unknown");
|
|
|
|
}
|
|
|
|
|
2018-10-22 22:54:17 +08:00
|
|
|
static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
|
2018-05-22 07:00:00 +08:00
|
|
|
{
|
|
|
|
unsigned int instrs[6], *dest;
|
|
|
|
long *start, *end;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
start = PTRRELOC(&__start___stf_exit_barrier_fixup),
|
|
|
|
end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
|
|
|
|
|
|
|
|
instrs[0] = 0x60000000; /* nop */
|
|
|
|
instrs[1] = 0x60000000; /* nop */
|
|
|
|
instrs[2] = 0x60000000; /* nop */
|
|
|
|
instrs[3] = 0x60000000; /* nop */
|
|
|
|
instrs[4] = 0x60000000; /* nop */
|
|
|
|
instrs[5] = 0x60000000; /* nop */
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
|
|
|
|
if (cpu_has_feature(CPU_FTR_HVMODE)) {
|
|
|
|
instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
|
|
|
|
instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
|
|
|
|
} else {
|
|
|
|
instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
|
|
|
|
instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
|
|
|
|
}
|
|
|
|
instrs[i++] = 0x7c0004ac; /* hwsync */
|
|
|
|
instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
|
|
|
|
instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
|
|
|
|
if (cpu_has_feature(CPU_FTR_HVMODE)) {
|
|
|
|
instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
|
|
|
|
} else {
|
|
|
|
instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
|
|
|
|
}
|
|
|
|
} else if (types & STF_BARRIER_EIEIO) {
|
|
|
|
instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; start < end; start++, i++) {
|
|
|
|
dest = (void *)start + *start;
|
|
|
|
|
|
|
|
pr_devel("patching dest %lx\n", (unsigned long)dest);
|
|
|
|
|
|
|
|
patch_instruction(dest, instrs[0]);
|
|
|
|
patch_instruction(dest + 1, instrs[1]);
|
|
|
|
patch_instruction(dest + 2, instrs[2]);
|
|
|
|
patch_instruction(dest + 3, instrs[3]);
|
|
|
|
patch_instruction(dest + 4, instrs[4]);
|
|
|
|
patch_instruction(dest + 5, instrs[5]);
|
|
|
|
}
|
|
|
|
printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
|
|
|
|
(types == STF_BARRIER_NONE) ? "no" :
|
|
|
|
(types == STF_BARRIER_FALLBACK) ? "fallback" :
|
|
|
|
(types == STF_BARRIER_EIEIO) ? "eieio" :
|
|
|
|
(types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
|
|
|
|
: "unknown");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void do_stf_barrier_fixups(enum stf_barrier_type types)
|
|
|
|
{
|
|
|
|
do_stf_entry_barrier_fixups(types);
|
|
|
|
do_stf_exit_barrier_fixups(types);
|
|
|
|
}
|
|
|
|
|
powerpc/64s: Add support for RFI flush of L1-D cache
On some CPUs we can prevent the Meltdown vulnerability by flushing the
L1-D cache on exit from kernel to user mode, and from hypervisor to
guest.
This is known to be the case on at least Power7, Power8 and Power9. At
this time we do not know the status of the vulnerability on other CPUs
such as the 970 (Apple G5), pasemi CPUs (AmigaOne X1000) or Freescale
CPUs. As more information comes to light we can enable this, or other
mechanisms on those CPUs.
The vulnerability occurs when the load of an architecturally
inaccessible memory region (eg. userspace load of kernel memory) is
speculatively executed to the point where its result can influence the
address of a subsequent speculatively executed load.
In order for that to happen, the first load must hit in the L1,
because before the load is sent to the L2 the permission check is
performed. Therefore if no kernel addresses hit in the L1 the
vulnerability can not occur. We can ensure that is the case by
flushing the L1 whenever we return to userspace. Similarly for
hypervisor vs guest.
In order to flush the L1-D cache on exit, we add a section of nops at
each (h)rfi location that returns to a lower privileged context, and
patch that with some sequence. Newer firmwares are able to advertise
to us that there is a special nop instruction that flushes the L1-D.
If we do not see that advertised, we fall back to doing a displacement
flush in software.
For guest kernels we support migration between some CPU versions, and
different CPUs may use different flush instructions. So that we are
prepared to migrate to a machine with a different flush instruction
activated, we may have to patch more than one flush instruction at
boot if the hypervisor tells us to.
In the end this patch is mostly the work of Nicholas Piggin and
Michael Ellerman. However a cast of thousands contributed to analysis
of the issue, earlier versions of the patch, back ports testing etc.
Many thanks to all of them.
Tested-by: Jon Masters <jcm@redhat.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-01-10 00:07:15 +08:00
|
|
|
void do_rfi_flush_fixups(enum l1d_flush_type types)
|
|
|
|
{
|
|
|
|
unsigned int instrs[3], *dest;
|
|
|
|
long *start, *end;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
start = PTRRELOC(&__start___rfi_flush_fixup),
|
|
|
|
end = PTRRELOC(&__stop___rfi_flush_fixup);
|
|
|
|
|
|
|
|
instrs[0] = 0x60000000; /* nop */
|
|
|
|
instrs[1] = 0x60000000; /* nop */
|
|
|
|
instrs[2] = 0x60000000; /* nop */
|
|
|
|
|
|
|
|
if (types & L1D_FLUSH_FALLBACK)
|
|
|
|
/* b .+16 to fallback flush */
|
|
|
|
instrs[0] = 0x48000010;
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
if (types & L1D_FLUSH_ORI) {
|
|
|
|
instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
|
|
|
|
instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
|
|
|
|
}
|
|
|
|
|
|
|
|
if (types & L1D_FLUSH_MTTRIG)
|
|
|
|
instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
|
|
|
|
|
|
|
|
for (i = 0; start < end; start++, i++) {
|
|
|
|
dest = (void *)start + *start;
|
|
|
|
|
|
|
|
pr_devel("patching dest %lx\n", (unsigned long)dest);
|
|
|
|
|
|
|
|
patch_instruction(dest, instrs[0]);
|
|
|
|
patch_instruction(dest + 1, instrs[1]);
|
|
|
|
patch_instruction(dest + 2, instrs[2]);
|
|
|
|
}
|
|
|
|
|
2018-03-15 06:40:41 +08:00
|
|
|
printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
|
|
|
|
(types == L1D_FLUSH_NONE) ? "no" :
|
|
|
|
(types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
|
|
|
|
(types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
|
|
|
|
? "ori+mttrig type"
|
|
|
|
: "ori type" :
|
|
|
|
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
|
|
|
|
: "unknown");
|
powerpc/64s: Add support for RFI flush of L1-D cache
On some CPUs we can prevent the Meltdown vulnerability by flushing the
L1-D cache on exit from kernel to user mode, and from hypervisor to
guest.
This is known to be the case on at least Power7, Power8 and Power9. At
this time we do not know the status of the vulnerability on other CPUs
such as the 970 (Apple G5), pasemi CPUs (AmigaOne X1000) or Freescale
CPUs. As more information comes to light we can enable this, or other
mechanisms on those CPUs.
The vulnerability occurs when the load of an architecturally
inaccessible memory region (eg. userspace load of kernel memory) is
speculatively executed to the point where its result can influence the
address of a subsequent speculatively executed load.
In order for that to happen, the first load must hit in the L1,
because before the load is sent to the L2 the permission check is
performed. Therefore if no kernel addresses hit in the L1 the
vulnerability can not occur. We can ensure that is the case by
flushing the L1 whenever we return to userspace. Similarly for
hypervisor vs guest.
In order to flush the L1-D cache on exit, we add a section of nops at
each (h)rfi location that returns to a lower privileged context, and
patch that with some sequence. Newer firmwares are able to advertise
to us that there is a special nop instruction that flushes the L1-D.
If we do not see that advertised, we fall back to doing a displacement
flush in software.
For guest kernels we support migration between some CPU versions, and
different CPUs may use different flush instructions. So that we are
prepared to migrate to a machine with a different flush instruction
activated, we may have to patch more than one flush instruction at
boot if the hypervisor tells us to.
In the end this patch is mostly the work of Nicholas Piggin and
Michael Ellerman. However a cast of thousands contributed to analysis
of the issue, earlier versions of the patch, back ports testing etc.
Many thanks to all of them.
Tested-by: Jon Masters <jcm@redhat.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-01-10 00:07:15 +08:00
|
|
|
}
|
2018-04-24 12:15:55 +08:00
|
|
|
|
2018-04-24 12:15:56 +08:00
|
|
|
void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
|
2018-04-24 12:15:55 +08:00
|
|
|
{
|
|
|
|
unsigned int instr, *dest;
|
|
|
|
long *start, *end;
|
|
|
|
int i;
|
|
|
|
|
2018-04-24 12:15:56 +08:00
|
|
|
start = fixup_start;
|
|
|
|
end = fixup_end;
|
2018-04-24 12:15:55 +08:00
|
|
|
|
|
|
|
instr = 0x60000000; /* nop */
|
|
|
|
|
|
|
|
if (enable) {
|
|
|
|
pr_info("barrier-nospec: using ORI speculation barrier\n");
|
|
|
|
instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; start < end; start++, i++) {
|
|
|
|
dest = (void *)start + *start;
|
|
|
|
|
|
|
|
pr_devel("patching dest %lx\n", (unsigned long)dest);
|
|
|
|
patch_instruction(dest, instr);
|
|
|
|
}
|
|
|
|
|
|
|
|
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
|
|
|
|
}
|
|
|
|
|
2018-07-28 07:06:34 +08:00
|
|
|
#endif /* CONFIG_PPC_BOOK3S_64 */
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_BARRIER_NOSPEC
|
2018-04-24 12:15:56 +08:00
|
|
|
void do_barrier_nospec_fixups(bool enable)
|
|
|
|
{
|
|
|
|
void *start, *end;
|
|
|
|
|
|
|
|
start = PTRRELOC(&__start___barrier_nospec_fixup),
|
|
|
|
end = PTRRELOC(&__stop___barrier_nospec_fixup);
|
|
|
|
|
|
|
|
do_barrier_nospec_fixups_range(enable, start, end);
|
|
|
|
}
|
2018-07-28 07:06:34 +08:00
|
|
|
#endif /* CONFIG_PPC_BARRIER_NOSPEC */
|
powerpc/64s: Add support for RFI flush of L1-D cache
On some CPUs we can prevent the Meltdown vulnerability by flushing the
L1-D cache on exit from kernel to user mode, and from hypervisor to
guest.
This is known to be the case on at least Power7, Power8 and Power9. At
this time we do not know the status of the vulnerability on other CPUs
such as the 970 (Apple G5), pasemi CPUs (AmigaOne X1000) or Freescale
CPUs. As more information comes to light we can enable this, or other
mechanisms on those CPUs.
The vulnerability occurs when the load of an architecturally
inaccessible memory region (eg. userspace load of kernel memory) is
speculatively executed to the point where its result can influence the
address of a subsequent speculatively executed load.
In order for that to happen, the first load must hit in the L1,
because before the load is sent to the L2 the permission check is
performed. Therefore if no kernel addresses hit in the L1 the
vulnerability can not occur. We can ensure that is the case by
flushing the L1 whenever we return to userspace. Similarly for
hypervisor vs guest.
In order to flush the L1-D cache on exit, we add a section of nops at
each (h)rfi location that returns to a lower privileged context, and
patch that with some sequence. Newer firmwares are able to advertise
to us that there is a special nop instruction that flushes the L1-D.
If we do not see that advertised, we fall back to doing a displacement
flush in software.
For guest kernels we support migration between some CPU versions, and
different CPUs may use different flush instructions. So that we are
prepared to migrate to a machine with a different flush instruction
activated, we may have to patch more than one flush instruction at
boot if the hypervisor tells us to.
In the end this patch is mostly the work of Nicholas Piggin and
Michael Ellerman. However a cast of thousands contributed to analysis
of the issue, earlier versions of the patch, back ports testing etc.
Many thanks to all of them.
Tested-by: Jon Masters <jcm@redhat.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-01-10 00:07:15 +08:00
|
|
|
|
2018-07-28 07:06:37 +08:00
|
|
|
#ifdef CONFIG_PPC_FSL_BOOK3E
|
|
|
|
void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
|
|
|
|
{
|
|
|
|
unsigned int instr[2], *dest;
|
|
|
|
long *start, *end;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
start = fixup_start;
|
|
|
|
end = fixup_end;
|
|
|
|
|
|
|
|
instr[0] = PPC_INST_NOP;
|
|
|
|
instr[1] = PPC_INST_NOP;
|
|
|
|
|
|
|
|
if (enable) {
|
|
|
|
pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
|
|
|
|
instr[0] = PPC_INST_ISYNC;
|
|
|
|
instr[1] = PPC_INST_SYNC;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; start < end; start++, i++) {
|
|
|
|
dest = (void *)start + *start;
|
|
|
|
|
|
|
|
pr_devel("patching dest %lx\n", (unsigned long)dest);
|
|
|
|
patch_instruction(dest, instr[0]);
|
|
|
|
patch_instruction(dest + 1, instr[1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
|
|
|
|
}
|
2018-12-12 22:03:00 +08:00
|
|
|
|
|
|
|
static void patch_btb_flush_section(long *curr)
|
|
|
|
{
|
|
|
|
unsigned int *start, *end;
|
|
|
|
|
|
|
|
start = (void *)curr + *curr;
|
|
|
|
end = (void *)curr + *(curr + 1);
|
|
|
|
for (; start < end; start++) {
|
|
|
|
pr_devel("patching dest %lx\n", (unsigned long)start);
|
|
|
|
patch_instruction(start, PPC_INST_NOP);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void do_btb_flush_fixups(void)
|
|
|
|
{
|
|
|
|
long *start, *end;
|
|
|
|
|
|
|
|
start = PTRRELOC(&__start__btb_flush_fixup);
|
|
|
|
end = PTRRELOC(&__stop__btb_flush_fixup);
|
|
|
|
|
|
|
|
for (; start < end; start += 2)
|
|
|
|
patch_btb_flush_section(start);
|
|
|
|
}
|
2018-07-28 07:06:37 +08:00
|
|
|
#endif /* CONFIG_PPC_FSL_BOOK3E */
|
|
|
|
|
2008-07-01 23:16:40 +08:00
|
|
|
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
|
|
|
|
{
|
2010-02-26 15:29:17 +08:00
|
|
|
long *start, *end;
|
|
|
|
unsigned int *dest;
|
2008-07-01 23:16:40 +08:00
|
|
|
|
|
|
|
if (!(value & CPU_FTR_LWSYNC))
|
|
|
|
return ;
|
|
|
|
|
|
|
|
start = fixup_start;
|
|
|
|
end = fixup_end;
|
|
|
|
|
|
|
|
for (; start < end; start++) {
|
|
|
|
dest = (void *)start + *start;
|
2017-11-24 15:31:09 +08:00
|
|
|
raw_patch_instruction(dest, PPC_INST_LWSYNC);
|
2008-07-01 23:16:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-05 13:03:41 +08:00
|
|
|
static void do_final_fixups(void)
|
2011-11-14 20:54:47 +08:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
|
|
|
|
int *src, *dest;
|
|
|
|
unsigned long length;
|
|
|
|
|
|
|
|
if (PHYSICAL_START == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
src = (int *)(KERNELBASE + PHYSICAL_START);
|
|
|
|
dest = (int *)KERNELBASE;
|
|
|
|
length = (__end_interrupts - _stext) / sizeof(int);
|
|
|
|
|
|
|
|
while (length--) {
|
2017-11-24 15:31:09 +08:00
|
|
|
raw_patch_instruction(dest, *src);
|
2011-11-14 20:54:47 +08:00
|
|
|
src++;
|
|
|
|
dest++;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-07-26 20:29:18 +08:00
|
|
|
static unsigned long __initdata saved_cpu_features;
|
|
|
|
static unsigned int __initdata saved_mmu_features;
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
static unsigned long __initdata saved_firmware_features;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void __init apply_feature_fixups(void)
|
2016-07-05 13:03:41 +08:00
|
|
|
{
|
2016-08-02 13:53:01 +08:00
|
|
|
struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec));
|
2016-07-05 13:03:41 +08:00
|
|
|
|
2016-07-26 20:29:18 +08:00
|
|
|
*PTRRELOC(&saved_cpu_features) = spec->cpu_features;
|
|
|
|
*PTRRELOC(&saved_mmu_features) = spec->mmu_features;
|
|
|
|
|
2016-07-05 13:03:41 +08:00
|
|
|
/*
|
|
|
|
* Apply the CPU-specific and firmware specific fixups to kernel text
|
|
|
|
* (nop out sections not relevant to this CPU or this firmware).
|
|
|
|
*/
|
|
|
|
do_feature_fixups(spec->cpu_features,
|
|
|
|
PTRRELOC(&__start___ftr_fixup),
|
|
|
|
PTRRELOC(&__stop___ftr_fixup));
|
|
|
|
|
|
|
|
do_feature_fixups(spec->mmu_features,
|
|
|
|
PTRRELOC(&__start___mmu_ftr_fixup),
|
|
|
|
PTRRELOC(&__stop___mmu_ftr_fixup));
|
|
|
|
|
|
|
|
do_lwsync_fixups(spec->cpu_features,
|
|
|
|
PTRRELOC(&__start___lwsync_fixup),
|
|
|
|
PTRRELOC(&__stop___lwsync_fixup));
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC64
|
2016-07-26 20:29:18 +08:00
|
|
|
saved_firmware_features = powerpc_firmware_features;
|
2016-07-05 13:03:41 +08:00
|
|
|
do_feature_fixups(powerpc_firmware_features,
|
|
|
|
&__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
|
|
|
|
#endif
|
|
|
|
do_final_fixups();
|
2016-08-10 15:27:34 +08:00
|
|
|
}
|
2016-07-23 17:12:38 +08:00
|
|
|
|
2016-08-10 15:27:34 +08:00
|
|
|
void __init setup_feature_keys(void)
|
|
|
|
{
|
2016-07-23 17:12:38 +08:00
|
|
|
/*
|
|
|
|
* Initialise jump label. This causes all the cpu/mmu_has_feature()
|
|
|
|
* checks to take on their correct polarity based on the current set of
|
|
|
|
* CPU/MMU features.
|
|
|
|
*/
|
|
|
|
jump_label_init();
|
powerpc: Add option to use jump label for cpu_has_feature()
We do binary patching of asm code using CPU features, which is a
one-time operation, done during early boot. However checks of CPU
features in C code are currently done at run time, even though the set
of CPU features can never change after boot.
We can optimise this by using jump labels to implement cpu_has_feature(),
meaning checks in C code are binary patched into a single nop or branch.
For a C sequence along the lines of:
if (cpu_has_feature(FOO))
return 2;
The generated code before is roughly:
ld r9,-27640(r2)
ld r9,0(r9)
lwz r9,32(r9)
cmpwi cr7,r9,0
bge cr7, 1f
li r3,2
blr
1: ...
After (true):
nop
li r3,2
blr
After (false):
b 1f
li r3,2
blr
1: ...
mpe: Rename MAX_CPU_FEATURES as we already have a #define with that
name, and define it simply as a constant, rather than doing tricks with
sizeof and NULL pointers. Rename the array to cpu_feature_keys. Use the
kconfig we added to guard it. Add BUILD_BUG_ON() if the feature is not a
compile time constant. Rewrite the change log.
Signed-off-by: Kevin Hao <haokexin@gmail.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-23 17:12:41 +08:00
|
|
|
cpu_feature_keys_init();
|
2016-07-23 17:12:42 +08:00
|
|
|
mmu_feature_keys_init();
|
2016-07-05 13:03:41 +08:00
|
|
|
}
|
|
|
|
|
2016-07-26 20:29:18 +08:00
|
|
|
static int __init check_features(void)
|
|
|
|
{
|
|
|
|
WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
|
|
|
|
"CPU features changed after feature patching!\n");
|
|
|
|
WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
|
|
|
|
"MMU features changed after feature patching!\n");
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
WARN(saved_firmware_features != powerpc_firmware_features,
|
|
|
|
"Firmware features changed after feature patching!\n");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
late_initcall(check_features);
|
|
|
|
|
2008-06-24 09:33:03 +08:00
|
|
|
#ifdef CONFIG_FTR_FIXUP_SELFTEST
|
|
|
|
|
|
|
|
#define check(x) \
|
|
|
|
if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
|
|
|
|
|
|
|
|
/* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
|
|
|
|
static struct fixup_entry fixup;
|
|
|
|
|
|
|
|
static long calc_offset(struct fixup_entry *entry, unsigned int *p)
|
|
|
|
{
|
|
|
|
return (unsigned long)p - (unsigned long)entry;
|
|
|
|
}
|
|
|
|
|
2014-08-20 06:55:18 +08:00
|
|
|
static void test_basic_patching(void)
|
2008-06-24 09:33:03 +08:00
|
|
|
{
|
2017-07-13 05:36:07 +08:00
|
|
|
extern unsigned int ftr_fixup_test1[];
|
|
|
|
extern unsigned int end_ftr_fixup_test1[];
|
|
|
|
extern unsigned int ftr_fixup_test1_orig[];
|
|
|
|
extern unsigned int ftr_fixup_test1_expected[];
|
2018-04-16 22:39:03 +08:00
|
|
|
int size = 4 * (end_ftr_fixup_test1 - ftr_fixup_test1);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
fixup.value = fixup.mask = 8;
|
2017-07-13 05:36:07 +08:00
|
|
|
fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1);
|
|
|
|
fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2);
|
2008-06-24 09:33:03 +08:00
|
|
|
fixup.alt_start_off = fixup.alt_end_off = 0;
|
|
|
|
|
|
|
|
/* Sanity check */
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
/* Check we don't patch if the value matches */
|
|
|
|
patch_feature_section(8, &fixup);
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
/* Check we do patch if the value doesn't match */
|
|
|
|
patch_feature_section(0, &fixup);
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
/* Check we do patch if the mask doesn't match */
|
2017-07-13 05:36:07 +08:00
|
|
|
memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size);
|
|
|
|
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
patch_feature_section(~8, &fixup);
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void test_alternative_patching(void)
|
|
|
|
{
|
2017-07-13 05:36:07 +08:00
|
|
|
extern unsigned int ftr_fixup_test2[];
|
|
|
|
extern unsigned int end_ftr_fixup_test2[];
|
|
|
|
extern unsigned int ftr_fixup_test2_orig[];
|
|
|
|
extern unsigned int ftr_fixup_test2_alt[];
|
|
|
|
extern unsigned int ftr_fixup_test2_expected[];
|
2018-04-16 22:39:03 +08:00
|
|
|
int size = 4 * (end_ftr_fixup_test2 - ftr_fixup_test2);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
fixup.value = fixup.mask = 0xF;
|
2017-07-13 05:36:07 +08:00
|
|
|
fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1);
|
|
|
|
fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2);
|
|
|
|
fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt);
|
|
|
|
fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
/* Sanity check */
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
/* Check we don't patch if the value matches */
|
|
|
|
patch_feature_section(0xF, &fixup);
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
/* Check we do patch if the value doesn't match */
|
|
|
|
patch_feature_section(0, &fixup);
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
/* Check we do patch if the mask doesn't match */
|
2017-07-13 05:36:07 +08:00
|
|
|
memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size);
|
|
|
|
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
patch_feature_section(~0xF, &fixup);
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void test_alternative_case_too_big(void)
|
|
|
|
{
|
2017-07-13 05:36:07 +08:00
|
|
|
extern unsigned int ftr_fixup_test3[];
|
|
|
|
extern unsigned int end_ftr_fixup_test3[];
|
|
|
|
extern unsigned int ftr_fixup_test3_orig[];
|
|
|
|
extern unsigned int ftr_fixup_test3_alt[];
|
2018-04-16 22:39:03 +08:00
|
|
|
int size = 4 * (end_ftr_fixup_test3 - ftr_fixup_test3);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
fixup.value = fixup.mask = 0xC;
|
2017-07-13 05:36:07 +08:00
|
|
|
fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1);
|
|
|
|
fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2);
|
|
|
|
fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt);
|
|
|
|
fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
/* Sanity check */
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
/* Expect nothing to be patched, and the error returned to us */
|
|
|
|
check(patch_feature_section(0xF, &fixup) == 1);
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
check(patch_feature_section(0, &fixup) == 1);
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
check(patch_feature_section(~0xF, &fixup) == 1);
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void test_alternative_case_too_small(void)
|
|
|
|
{
|
2017-07-13 05:36:07 +08:00
|
|
|
extern unsigned int ftr_fixup_test4[];
|
|
|
|
extern unsigned int end_ftr_fixup_test4[];
|
|
|
|
extern unsigned int ftr_fixup_test4_orig[];
|
|
|
|
extern unsigned int ftr_fixup_test4_alt[];
|
|
|
|
extern unsigned int ftr_fixup_test4_expected[];
|
2018-04-16 22:39:03 +08:00
|
|
|
int size = 4 * (end_ftr_fixup_test4 - ftr_fixup_test4);
|
2008-06-24 09:33:03 +08:00
|
|
|
unsigned long flag;
|
|
|
|
|
|
|
|
/* Check a high-bit flag */
|
|
|
|
flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
|
|
|
|
fixup.value = fixup.mask = flag;
|
2017-07-13 05:36:07 +08:00
|
|
|
fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1);
|
|
|
|
fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5);
|
|
|
|
fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt);
|
|
|
|
fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
/* Sanity check */
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
/* Check we don't patch if the value matches */
|
|
|
|
patch_feature_section(flag, &fixup);
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
/* Check we do patch if the value doesn't match */
|
|
|
|
patch_feature_section(0, &fixup);
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
/* Check we do patch if the mask doesn't match */
|
2017-07-13 05:36:07 +08:00
|
|
|
memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size);
|
|
|
|
check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
patch_feature_section(~flag, &fixup);
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void test_alternative_case_with_branch(void)
|
|
|
|
{
|
2017-07-13 05:36:07 +08:00
|
|
|
extern unsigned int ftr_fixup_test5[];
|
|
|
|
extern unsigned int end_ftr_fixup_test5[];
|
|
|
|
extern unsigned int ftr_fixup_test5_expected[];
|
2018-04-16 22:39:03 +08:00
|
|
|
int size = 4 * (end_ftr_fixup_test5 - ftr_fixup_test5);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void test_alternative_case_with_external_branch(void)
|
|
|
|
{
|
2017-07-13 05:36:07 +08:00
|
|
|
extern unsigned int ftr_fixup_test6[];
|
|
|
|
extern unsigned int end_ftr_fixup_test6[];
|
|
|
|
extern unsigned int ftr_fixup_test6_expected[];
|
2018-04-16 22:39:03 +08:00
|
|
|
int size = 4 * (end_ftr_fixup_test6 - ftr_fixup_test6);
|
2008-06-24 09:33:03 +08:00
|
|
|
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
}
|
|
|
|
|
2018-04-16 22:39:05 +08:00
|
|
|
static void test_alternative_case_with_branch_to_end(void)
|
|
|
|
{
|
|
|
|
extern unsigned int ftr_fixup_test7[];
|
|
|
|
extern unsigned int end_ftr_fixup_test7[];
|
|
|
|
extern unsigned int ftr_fixup_test7_expected[];
|
|
|
|
int size = 4 * (end_ftr_fixup_test7 - ftr_fixup_test7);
|
|
|
|
|
|
|
|
check(memcmp(ftr_fixup_test7, ftr_fixup_test7_expected, size) == 0);
|
|
|
|
}
|
|
|
|
|
2008-06-24 09:33:03 +08:00
|
|
|
static void test_cpu_macros(void)
|
|
|
|
{
|
2017-07-13 05:36:07 +08:00
|
|
|
extern u8 ftr_fixup_test_FTR_macros[];
|
|
|
|
extern u8 ftr_fixup_test_FTR_macros_expected[];
|
|
|
|
unsigned long size = ftr_fixup_test_FTR_macros_expected -
|
|
|
|
ftr_fixup_test_FTR_macros;
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
/* The fixups have already been done for us during boot */
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test_FTR_macros,
|
|
|
|
ftr_fixup_test_FTR_macros_expected, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void test_fw_macros(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_PPC64
|
2017-07-13 05:36:07 +08:00
|
|
|
extern u8 ftr_fixup_test_FW_FTR_macros[];
|
|
|
|
extern u8 ftr_fixup_test_FW_FTR_macros_expected[];
|
|
|
|
unsigned long size = ftr_fixup_test_FW_FTR_macros_expected -
|
|
|
|
ftr_fixup_test_FW_FTR_macros;
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
/* The fixups have already been done for us during boot */
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(ftr_fixup_test_FW_FTR_macros,
|
|
|
|
ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
|
2008-06-24 09:33:03 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2008-07-01 23:16:40 +08:00
|
|
|
static void test_lwsync_macros(void)
|
|
|
|
{
|
2017-07-13 05:36:07 +08:00
|
|
|
extern u8 lwsync_fixup_test[];
|
|
|
|
extern u8 end_lwsync_fixup_test[];
|
|
|
|
extern u8 lwsync_fixup_test_expected_LWSYNC[];
|
|
|
|
extern u8 lwsync_fixup_test_expected_SYNC[];
|
|
|
|
unsigned long size = end_lwsync_fixup_test -
|
|
|
|
lwsync_fixup_test;
|
2008-07-01 23:16:40 +08:00
|
|
|
|
|
|
|
/* The fixups have already been done for us during boot */
|
|
|
|
if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(lwsync_fixup_test,
|
|
|
|
lwsync_fixup_test_expected_LWSYNC, size) == 0);
|
2008-07-01 23:16:40 +08:00
|
|
|
} else {
|
2017-07-13 05:36:07 +08:00
|
|
|
check(memcmp(lwsync_fixup_test,
|
|
|
|
lwsync_fixup_test_expected_SYNC, size) == 0);
|
2008-07-01 23:16:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-06-24 09:33:03 +08:00
|
|
|
static int __init test_feature_fixups(void)
|
|
|
|
{
|
|
|
|
printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
|
|
|
|
|
|
|
|
test_basic_patching();
|
|
|
|
test_alternative_patching();
|
|
|
|
test_alternative_case_too_big();
|
|
|
|
test_alternative_case_too_small();
|
|
|
|
test_alternative_case_with_branch();
|
|
|
|
test_alternative_case_with_external_branch();
|
2018-04-16 22:39:05 +08:00
|
|
|
test_alternative_case_with_branch_to_end();
|
2008-06-24 09:33:03 +08:00
|
|
|
test_cpu_macros();
|
|
|
|
test_fw_macros();
|
2008-07-01 23:16:40 +08:00
|
|
|
test_lwsync_macros();
|
2008-06-24 09:33:03 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
late_initcall(test_feature_fixups);
|
|
|
|
|
|
|
|
#endif /* CONFIG_FTR_FIXUP_SELFTEST */
|