2019-06-03 13:44:50 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2014-11-14 23:54:08 +08:00
|
|
|
/*
|
|
|
|
* alternative runtime patching
|
|
|
|
* inspired by the x86 version
|
|
|
|
*
|
|
|
|
* Copyright (C) 2014 ARM Ltd.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) "alternatives: " fmt
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/alternative.h>
|
|
|
|
#include <asm/cpufeature.h>
|
2015-06-01 17:47:40 +08:00
|
|
|
#include <asm/insn.h>
|
2016-08-25 01:27:28 +08:00
|
|
|
#include <asm/sections.h>
|
2014-11-14 23:54:08 +08:00
|
|
|
#include <linux/stop_machine.h>
|
|
|
|
|
2021-02-04 09:43:49 +08:00
|
|
|
#define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f)
|
2015-06-01 17:47:40 +08:00
|
|
|
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
|
|
|
|
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
|
|
|
|
|
2020-06-30 21:06:04 +08:00
|
|
|
/* Volatile, as we may be patching the guts of READ_ONCE() */
|
|
|
|
static volatile int all_alternatives_applied;
|
2019-01-31 22:58:52 +08:00
|
|
|
|
|
|
|
static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS);
|
2018-01-08 23:38:06 +08:00
|
|
|
|
2014-11-28 21:40:45 +08:00
|
|
|
struct alt_region {
|
|
|
|
struct alt_instr *begin;
|
|
|
|
struct alt_instr *end;
|
|
|
|
};
|
|
|
|
|
2019-01-31 22:58:52 +08:00
|
|
|
bool alternative_is_applied(u16 cpufeature)
|
|
|
|
{
|
|
|
|
if (WARN_ON(cpufeature >= ARM64_NCAPS))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return test_bit(cpufeature, applied_alternatives);
|
|
|
|
}
|
|
|
|
|
2015-06-01 17:47:40 +08:00
|
|
|
/*
|
|
|
|
* Check if the target PC is within an alternative block.
|
|
|
|
*/
|
arm64: alternatives: mark patch_alternative() as `noinstr`
The alternatives code must be `noinstr` such that it does not patch itself,
as the cache invalidation is only performed after all the alternatives have
been applied.
Mark patch_alternative() as `noinstr`. Mark branch_insn_requires_update()
and get_alt_insn() with `__always_inline` since they are both only called
through patch_alternative().
Booting a kernel in QEMU TCG with KCSAN=y and ARM64_USE_LSE_ATOMICS=y caused
a boot hang:
[ 0.241121] CPU: All CPU(s) started at EL2
The alternatives code was patching the atomics in __tsan_read4() from LL/SC
atomics to LSE atomics.
The following fragment is using LL/SC atomics in the .text section:
| <__tsan_unaligned_read4+304>: ldxr x6, [x2]
| <__tsan_unaligned_read4+308>: add x6, x6, x5
| <__tsan_unaligned_read4+312>: stxr w7, x6, [x2]
| <__tsan_unaligned_read4+316>: cbnz w7, <__tsan_unaligned_read4+304>
This LL/SC atomic sequence was to be replaced with LSE atomics. However since
the alternatives code was instrumentable, __tsan_read4() was being called after
only the first instruction was replaced, which led to the following code in memory:
| <__tsan_unaligned_read4+304>: ldadd x5, x6, [x2]
| <__tsan_unaligned_read4+308>: add x6, x6, x5
| <__tsan_unaligned_read4+312>: stxr w7, x6, [x2]
| <__tsan_unaligned_read4+316>: cbnz w7, <__tsan_unaligned_read4+304>
This caused an infinite loop as the `stxr` instruction never completed successfully,
so `w7` was always 0.
Signed-off-by: Joey Gouly <joey.gouly@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20220405104733.11476-1-joey.gouly@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2022-04-05 18:47:33 +08:00
|
|
|
static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
|
2015-06-01 17:47:40 +08:00
|
|
|
{
|
2020-07-09 20:59:53 +08:00
|
|
|
unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
|
|
|
|
return !(pc >= replptr && pc <= (replptr + alt->alt_len));
|
2015-06-01 17:47:40 +08:00
|
|
|
}
|
|
|
|
|
2016-09-09 21:07:13 +08:00
|
|
|
#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
|
|
|
|
|
arm64: alternatives: mark patch_alternative() as `noinstr`
The alternatives code must be `noinstr` such that it does not patch itself,
as the cache invalidation is only performed after all the alternatives have
been applied.
Mark patch_alternative() as `noinstr`. Mark branch_insn_requires_update()
and get_alt_insn() with `__always_inline` since they are both only called
through patch_alternative().
Booting a kernel in QEMU TCG with KCSAN=y and ARM64_USE_LSE_ATOMICS=y caused
a boot hang:
[ 0.241121] CPU: All CPU(s) started at EL2
The alternatives code was patching the atomics in __tsan_read4() from LL/SC
atomics to LSE atomics.
The following fragment is using LL/SC atomics in the .text section:
| <__tsan_unaligned_read4+304>: ldxr x6, [x2]
| <__tsan_unaligned_read4+308>: add x6, x6, x5
| <__tsan_unaligned_read4+312>: stxr w7, x6, [x2]
| <__tsan_unaligned_read4+316>: cbnz w7, <__tsan_unaligned_read4+304>
This LL/SC atomic sequence was to be replaced with LSE atomics. However since
the alternatives code was instrumentable, __tsan_read4() was being called after
only the first instruction was replaced, which led to the following code in memory:
| <__tsan_unaligned_read4+304>: ldadd x5, x6, [x2]
| <__tsan_unaligned_read4+308>: add x6, x6, x5
| <__tsan_unaligned_read4+312>: stxr w7, x6, [x2]
| <__tsan_unaligned_read4+316>: cbnz w7, <__tsan_unaligned_read4+304>
This caused an infinite loop as the `stxr` instruction never completed successfully,
so `w7` was always 0.
Signed-off-by: Joey Gouly <joey.gouly@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20220405104733.11476-1-joey.gouly@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2022-04-05 18:47:33 +08:00
|
|
|
static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
|
2015-06-01 17:47:40 +08:00
|
|
|
{
|
|
|
|
u32 insn;
|
|
|
|
|
|
|
|
insn = le32_to_cpu(*altinsnptr);
|
|
|
|
|
|
|
|
if (aarch64_insn_is_branch_imm(insn)) {
|
|
|
|
s32 offset = aarch64_get_branch_offset(insn);
|
|
|
|
unsigned long target;
|
|
|
|
|
|
|
|
target = (unsigned long)altinsnptr + offset;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're branching inside the alternate sequence,
|
|
|
|
* do not rewrite the instruction, as it is already
|
|
|
|
* correct. Otherwise, generate the new instruction.
|
|
|
|
*/
|
|
|
|
if (branch_insn_requires_update(alt, target)) {
|
|
|
|
offset = target - (unsigned long)insnptr;
|
|
|
|
insn = aarch64_set_branch_offset(insn, offset);
|
|
|
|
}
|
2016-09-09 21:07:13 +08:00
|
|
|
} else if (aarch64_insn_is_adrp(insn)) {
|
|
|
|
s32 orig_offset, new_offset;
|
|
|
|
unsigned long target;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're replacing an adrp instruction, which uses PC-relative
|
|
|
|
* immediate addressing, adjust the offset to reflect the new
|
|
|
|
* PC. adrp operates on 4K aligned addresses.
|
|
|
|
*/
|
|
|
|
orig_offset = aarch64_insn_adrp_get_offset(insn);
|
|
|
|
target = align_down(altinsnptr, SZ_4K) + orig_offset;
|
|
|
|
new_offset = target - align_down(insnptr, SZ_4K);
|
|
|
|
insn = aarch64_insn_adrp_set_offset(insn, new_offset);
|
2016-09-09 21:07:11 +08:00
|
|
|
} else if (aarch64_insn_uses_literal(insn)) {
|
|
|
|
/*
|
|
|
|
* Disallow patching unhandled instructions using PC relative
|
|
|
|
* literal addresses
|
|
|
|
*/
|
|
|
|
BUG();
|
2015-06-01 17:47:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return insn;
|
|
|
|
}
|
|
|
|
|
arm64: alternatives: mark patch_alternative() as `noinstr`
The alternatives code must be `noinstr` such that it does not patch itself,
as the cache invalidation is only performed after all the alternatives have
been applied.
Mark patch_alternative() as `noinstr`. Mark branch_insn_requires_update()
and get_alt_insn() with `__always_inline` since they are both only called
through patch_alternative().
Booting a kernel in QEMU TCG with KCSAN=y and ARM64_USE_LSE_ATOMICS=y caused
a boot hang:
[ 0.241121] CPU: All CPU(s) started at EL2
The alternatives code was patching the atomics in __tsan_read4() from LL/SC
atomics to LSE atomics.
The following fragment is using LL/SC atomics in the .text section:
| <__tsan_unaligned_read4+304>: ldxr x6, [x2]
| <__tsan_unaligned_read4+308>: add x6, x6, x5
| <__tsan_unaligned_read4+312>: stxr w7, x6, [x2]
| <__tsan_unaligned_read4+316>: cbnz w7, <__tsan_unaligned_read4+304>
This LL/SC atomic sequence was to be replaced with LSE atomics. However since
the alternatives code was instrumentable, __tsan_read4() was being called after
only the first instruction was replaced, which led to the following code in memory:
| <__tsan_unaligned_read4+304>: ldadd x5, x6, [x2]
| <__tsan_unaligned_read4+308>: add x6, x6, x5
| <__tsan_unaligned_read4+312>: stxr w7, x6, [x2]
| <__tsan_unaligned_read4+316>: cbnz w7, <__tsan_unaligned_read4+304>
This caused an infinite loop as the `stxr` instruction never completed successfully,
so `w7` was always 0.
Signed-off-by: Joey Gouly <joey.gouly@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20220405104733.11476-1-joey.gouly@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2022-04-05 18:47:33 +08:00
|
|
|
static noinstr void patch_alternative(struct alt_instr *alt,
|
2017-12-03 20:02:14 +08:00
|
|
|
__le32 *origptr, __le32 *updptr, int nr_inst)
|
|
|
|
{
|
|
|
|
__le32 *replptr;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
replptr = ALT_REPL_PTR(alt);
|
|
|
|
for (i = 0; i < nr_inst; i++) {
|
|
|
|
u32 insn;
|
|
|
|
|
|
|
|
insn = get_alt_insn(alt, origptr + i, replptr + i);
|
|
|
|
updptr[i] = cpu_to_le32(insn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-22 16:31:15 +08:00
|
|
|
/*
|
|
|
|
* We provide our own, private D-cache cleaning function so that we don't
|
|
|
|
* accidentally call into the cache.S code, which is patched by us at
|
|
|
|
* runtime.
|
|
|
|
*/
|
|
|
|
static void clean_dcache_range_nopatch(u64 start, u64 end)
|
|
|
|
{
|
|
|
|
u64 cur, d_size, ctr_el0;
|
|
|
|
|
|
|
|
ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
|
|
|
|
d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
|
|
|
|
CTR_DMINLINE_SHIFT);
|
|
|
|
cur = start & ~(d_size - 1);
|
|
|
|
do {
|
|
|
|
/*
|
|
|
|
* We must clean+invalidate to the PoC in order to avoid
|
|
|
|
* Cortex-A53 errata 826319, 827319, 824069 and 819472
|
|
|
|
* (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE)
|
|
|
|
*/
|
|
|
|
asm volatile("dc civac, %0" : : "r" (cur) : "memory");
|
|
|
|
} while (cur += d_size, cur < end);
|
|
|
|
}
|
|
|
|
|
Assorted arm64 fixes and clean-ups, the most important:
- Restore terminal stack frame records. Their previous removal caused
traces which cross secondary_start_kernel to terminate one entry too
late, with a spurious "0" entry.
- Fix boot warning with pseudo-NMI due to the way we manipulate the PMR
register.
- ACPI fixes: avoid corruption of interrupt mappings on watchdog probe
failure (GTDT), prevent unregistering of GIC SGIs.
- Force SPARSEMEM_VMEMMAP as the only memory model, it saves with having
to test all the other combinations.
- Documentation fixes and updates: tagged address ABI exceptions on
brk/mmap/mremap(), event stream frequency, update booting requirements
on the configuration of traps.
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEE5RElWfyWxS+3PLO2a9axLQDIXvEFAmCVba0ACgkQa9axLQDI
XvEClxAAsqigp+Mnotdr8YUOuXLjHWU41EMShV6WbFcmlViEyZxxtZ5qavw19T3L
rPxb8hq9QqI8kCd+j4MAU7cdc0ry+047njJmQ3Va0WeiDsbgEfPvLWPguDbeDFXW
EjKKib+F/u58IffDkn6rVA7ZVPgYHRH+8yw6EdApp0BN4JuxEFzGBzG4EWKXnNHH
IOu4IIXlbLX+U1kTtUFR4u6i4uBs2pZdEYzo1NF/Joacg14F01CBRuh8U04eeWFD
HF4pWd4eCl/bLYPurF1rOi1dIUyrPuaPgNInGEdSaocD0hIvQH0r0wyIt+aMmqvK
9Jm+dDEGeLxQn2nDrXfyldYG5EbFa3OplkUt2MVDDMWwN2Gpsjlnf/ucff/SBT/N
7D6AL2OH6KDDCsNgU1JH9H6rAlh4nWJcsMBrWmP7aQtBMRyccQLywrt4HXB8cy7E
+MyhTit05P3lpsrK2uZSFujK35Ts8hxywA7lAlU7YP4ADKu3Noc6qXSaxZRe+1Gb
O5k3Qdcih0VLE843PjJj8f8fW1ysJW5J60cK9BaZxpB77gNufKkh/hS6YAiA8qkt
PT3J0jk/cgGvwKK54rW52dG7qvDImgUMGkXGKQnEimgb62DatCZ4ZOPC+UoiDiqO
SEd1DSW0Lt1VxVIulAjatVgzIJGM0jGCm9L7/vBguR0+Lahakbg=
=vYok
-----END PGP SIGNATURE-----
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull more arm64 updates from Catalin Marinas:
"A mix of fixes and clean-ups that turned up too late for the first
pull request:
- Restore terminal stack frame records. Their previous removal caused
traces which cross secondary_start_kernel to terminate one entry
too late, with a spurious "0" entry.
- Fix boot warning with pseudo-NMI due to the way we manipulate the
PMR register.
- ACPI fixes: avoid corruption of interrupt mappings on watchdog
probe failure (GTDT), prevent unregistering of GIC SGIs.
- Force SPARSEMEM_VMEMMAP as the only memory model, it saves with
having to test all the other combinations.
- Documentation fixes and updates: tagged address ABI exceptions on
brk/mmap/mremap(), event stream frequency, update booting
requirements on the configuration of traps"
* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: kernel: Update the stale comment
arm64: Fix the documented event stream frequency
arm64: entry: always set GIC_PRIO_PSR_I_SET during entry
arm64: Explicitly document boot requirements for SVE
arm64: Explicitly require that FPSIMD instructions do not trap
arm64: Relax booting requirements for configuration of traps
arm64: cpufeatures: use min and max
arm64: stacktrace: restore terminal records
arm64/vdso: Discard .note.gnu.property sections in vDSO
arm64: doc: Add brk/mmap/mremap() to the Tagged Address ABI Exceptions
psci: Remove unneeded semicolon
ACPI: irq: Prevent unregistering of GIC SGIs
ACPI: GTDT: Don't corrupt interrupt mappings on watchdow probe failure
arm64: Show three registers per line
arm64: remove HAVE_DEBUG_BUGVERBOSE
arm64: alternative: simplify passing alt_region
arm64: Force SPARSEMEM_VMEMMAP as the only memory management model
arm64: vdso32: drop -no-integrated-as flag
2021-05-08 03:11:05 +08:00
|
|
|
static void __nocfi __apply_alternatives(struct alt_region *region, bool is_module,
|
2019-01-31 22:58:53 +08:00
|
|
|
unsigned long *feature_mask)
|
2014-11-14 23:54:08 +08:00
|
|
|
{
|
|
|
|
struct alt_instr *alt;
|
2017-12-03 20:02:14 +08:00
|
|
|
__le32 *origptr, *updptr;
|
|
|
|
alternative_cb_t alt_cb;
|
2014-11-14 23:54:08 +08:00
|
|
|
|
2014-11-28 21:40:45 +08:00
|
|
|
for (alt = region->begin; alt < region->end; alt++) {
|
2017-12-03 20:02:14 +08:00
|
|
|
int nr_inst;
|
2015-06-01 17:47:40 +08:00
|
|
|
|
2019-01-31 22:58:53 +08:00
|
|
|
if (!test_bit(alt->cpufeature, feature_mask))
|
|
|
|
continue;
|
|
|
|
|
2017-12-03 20:02:14 +08:00
|
|
|
/* Use ARM64_CB_PATCH as an unconditional patch */
|
|
|
|
if (alt->cpufeature < ARM64_CB_PATCH &&
|
|
|
|
!cpus_have_cap(alt->cpufeature))
|
2014-11-14 23:54:08 +08:00
|
|
|
continue;
|
|
|
|
|
2017-12-03 20:02:14 +08:00
|
|
|
if (alt->cpufeature == ARM64_CB_PATCH)
|
|
|
|
BUG_ON(alt->alt_len != 0);
|
|
|
|
else
|
|
|
|
BUG_ON(alt->alt_len != alt->orig_len);
|
2014-11-14 23:54:08 +08:00
|
|
|
|
|
|
|
pr_info_once("patching kernel code\n");
|
|
|
|
|
2015-06-01 17:47:40 +08:00
|
|
|
origptr = ALT_ORIG_PTR(alt);
|
2018-06-22 16:31:15 +08:00
|
|
|
updptr = is_module ? origptr : lm_alias(origptr);
|
2017-12-03 20:02:14 +08:00
|
|
|
nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
|
2015-06-01 17:47:40 +08:00
|
|
|
|
2017-12-03 20:02:14 +08:00
|
|
|
if (alt->cpufeature < ARM64_CB_PATCH)
|
|
|
|
alt_cb = patch_alternative;
|
|
|
|
else
|
|
|
|
alt_cb = ALT_REPL_PTR(alt);
|
|
|
|
|
|
|
|
alt_cb(alt, origptr, updptr, nr_inst);
|
2015-06-01 17:47:40 +08:00
|
|
|
|
2018-06-22 16:31:15 +08:00
|
|
|
if (!is_module) {
|
|
|
|
clean_dcache_range_nopatch((u64)origptr,
|
|
|
|
(u64)(origptr + nr_inst));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The core module code takes care of cache maintenance in
|
|
|
|
* flush_module_icache().
|
|
|
|
*/
|
|
|
|
if (!is_module) {
|
|
|
|
dsb(ish);
|
arm64: Rename arm64-internal cache maintenance functions
Although naming across the codebase isn't that consistent, it
tends to follow certain patterns. Moreover, the term "flush"
isn't defined in the Arm Architecture reference manual, and might
be interpreted to mean clean, invalidate, or both for a cache.
Rename arm64-internal functions to make the naming internally
consistent, as well as making it consistent with the Arm ARM, by
specifying whether it applies to the instruction, data, or both
caches, whether the operation is a clean, invalidate, or both.
Also specify which point the operation applies to, i.e., to the
point of unification (PoU), coherency (PoC), or persistence
(PoP).
This commit applies the following sed transformation to all files
under arch/arm64:
"s/\b__flush_cache_range\b/caches_clean_inval_pou_macro/g;"\
"s/\b__flush_icache_range\b/caches_clean_inval_pou/g;"\
"s/\binvalidate_icache_range\b/icache_inval_pou/g;"\
"s/\b__flush_dcache_area\b/dcache_clean_inval_poc/g;"\
"s/\b__inval_dcache_area\b/dcache_inval_poc/g;"\
"s/__clean_dcache_area_poc\b/dcache_clean_poc/g;"\
"s/\b__clean_dcache_area_pop\b/dcache_clean_pop/g;"\
"s/\b__clean_dcache_area_pou\b/dcache_clean_pou/g;"\
"s/\b__flush_cache_user_range\b/caches_clean_inval_user_pou/g;"\
"s/\b__flush_icache_all\b/icache_inval_all_pou/g;"
Note that __clean_dcache_area_poc is deliberately missing a word
boundary check at the beginning in order to match the efistub
symbols in image-vars.h.
Also note that, despite its name, __flush_icache_range operates
on both instruction and data caches. The name change here
reflects that.
No functional change intended.
Acked-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20210524083001.2586635-19-tabba@google.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-05-24 16:30:01 +08:00
|
|
|
icache_inval_all_pou();
|
2018-06-22 16:31:15 +08:00
|
|
|
isb();
|
2019-01-31 22:58:52 +08:00
|
|
|
|
2019-01-31 22:58:53 +08:00
|
|
|
/* Ignore ARM64_CB bit from feature mask */
|
|
|
|
bitmap_or(applied_alternatives, applied_alternatives,
|
|
|
|
feature_mask, ARM64_NCAPS);
|
|
|
|
bitmap_and(applied_alternatives, applied_alternatives,
|
|
|
|
cpu_hwcaps, ARM64_NCAPS);
|
2014-11-14 23:54:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-29 02:07:28 +08:00
|
|
|
/*
|
|
|
|
* We might be patching the stop_machine state machine, so implement a
|
|
|
|
* really simple polling protocol here.
|
|
|
|
*/
|
|
|
|
static int __apply_alternatives_multi_stop(void *unused)
|
2014-11-14 23:54:08 +08:00
|
|
|
{
|
2014-11-28 21:40:45 +08:00
|
|
|
struct alt_region region = {
|
2016-08-25 01:27:28 +08:00
|
|
|
.begin = (struct alt_instr *)__alt_instructions,
|
|
|
|
.end = (struct alt_instr *)__alt_instructions_end,
|
2014-11-28 21:40:45 +08:00
|
|
|
};
|
|
|
|
|
2015-07-29 02:07:28 +08:00
|
|
|
/* We always have a CPU 0 at this point (__init) */
|
|
|
|
if (smp_processor_id()) {
|
2020-06-30 21:06:04 +08:00
|
|
|
while (!all_alternatives_applied)
|
2015-07-29 02:07:28 +08:00
|
|
|
cpu_relax();
|
2015-08-05 01:52:09 +08:00
|
|
|
isb();
|
2015-07-29 02:07:28 +08:00
|
|
|
} else {
|
2019-01-31 22:58:53 +08:00
|
|
|
DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE);
|
|
|
|
|
|
|
|
bitmap_complement(remaining_capabilities, boot_capabilities,
|
|
|
|
ARM64_NPATCHABLE);
|
|
|
|
|
2019-01-31 22:58:52 +08:00
|
|
|
BUG_ON(all_alternatives_applied);
|
2019-01-31 22:58:53 +08:00
|
|
|
__apply_alternatives(®ion, false, remaining_capabilities);
|
2015-07-29 02:07:28 +08:00
|
|
|
/* Barriers provided by the cache flushing */
|
2020-06-30 21:06:04 +08:00
|
|
|
all_alternatives_applied = 1;
|
2015-07-29 02:07:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init apply_alternatives_all(void)
|
|
|
|
{
|
2014-11-14 23:54:08 +08:00
|
|
|
/* better not try code patching on a live SMP system */
|
2015-07-29 02:07:28 +08:00
|
|
|
stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
|
2014-11-28 21:40:45 +08:00
|
|
|
}
|
|
|
|
|
2019-01-31 22:58:53 +08:00
|
|
|
/*
|
|
|
|
* This is called very early in the boot process (directly after we run
|
|
|
|
* a feature detect on the boot CPU). No need to worry about other CPUs
|
|
|
|
* here.
|
|
|
|
*/
|
|
|
|
void __init apply_boot_alternatives(void)
|
|
|
|
{
|
|
|
|
struct alt_region region = {
|
|
|
|
.begin = (struct alt_instr *)__alt_instructions,
|
|
|
|
.end = (struct alt_instr *)__alt_instructions_end,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* If called on non-boot cpu things could go wrong */
|
|
|
|
WARN_ON(smp_processor_id() != 0);
|
|
|
|
|
|
|
|
__apply_alternatives(®ion, false, &boot_capabilities[0]);
|
|
|
|
}
|
|
|
|
|
2018-06-22 16:31:15 +08:00
|
|
|
#ifdef CONFIG_MODULES
|
|
|
|
void apply_alternatives_module(void *start, size_t length)
|
2014-11-28 21:40:45 +08:00
|
|
|
{
|
|
|
|
struct alt_region region = {
|
|
|
|
.begin = start,
|
|
|
|
.end = start + length,
|
|
|
|
};
|
2019-01-31 22:58:53 +08:00
|
|
|
DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE);
|
|
|
|
|
|
|
|
bitmap_fill(all_capabilities, ARM64_NPATCHABLE);
|
2014-11-28 21:40:45 +08:00
|
|
|
|
2019-01-31 22:58:53 +08:00
|
|
|
__apply_alternatives(®ion, true, &all_capabilities[0]);
|
2014-11-14 23:54:08 +08:00
|
|
|
}
|
2018-06-22 16:31:15 +08:00
|
|
|
#endif
|