2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-27 20:13:57 +08:00

LoongArch fixes for v6.6-rc3

-----BEGIN PGP SIGNATURE-----
 
 iQJKBAABCAA0FiEEzOlt8mkP+tbeiYy5AoYrw/LiJnoFAmUKki4WHGNoZW5odWFj
 YWlAa2VybmVsLm9yZwAKCRAChivD8uImepsVEACcMAw3/Gg3ldIDlV6mWSYGn6kA
 eF2Cc89q4C53CYYlYHalBqVdOObonR0g4roz385UjlGXeVtOuYzKB2DMy8GE3V7s
 63Q82jpkGtgpJ9/md+2FnOoaT6CiN+kbcwdbSmEsz+9yht9IzRlO5R0urH92jwsU
 wpnFzGtn1kHgGv+yC8XQDvk5ZvYiiA9bWrXiaLl+aEF0qeQBhgI+f7+Jew/VWBNR
 ykH0TcOp0cjt7AqYlOHb3YXqwIO6U5sVLIfrHzCxKkrfeV/DE8J0FU3/YQ/okMr7
 tjBJxS4o1UsNyT+9ItXjqYClOAy1IaW+2UmC8r2k79hZKEyicHu3/o7xpBCvoQoa
 9OAKFAtO1UyX3h3uUynouaSXCuQ48GAetnkGMFuhuUVlF9Aq9OdA6lAWeuolkace
 VYs3djjkAvsWq6HH2tm5lpcq8jXsbc2QRbHl+f4BGgyoXtEk7NXsqfPcvJeFDMFF
 /PKYFQnPWebv4LoqxSNjN7S7S23N0k9tH+lITX8WvMJzRQaUTt4S19e7YHotNMty
 UXDBIW6mjVIOT11zzNcsEzkMXA/8Q4VvZbQy67nfweg8KMLMChBdkRphK+4pOLN/
 0Pvge3SAAVI/cdNWOxwqzvHvQbqVsjb4p4GmghPSLOojFPKW47ueWm8xeq/Hd05r
 ssZZGOC8/H1AqDvOIw==
 =EKBZ
 -----END PGP SIGNATURE-----

Merge tag 'loongarch-fixes-6.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch fixes from Huacai Chen:
 "Fix lockdep, fix a boot failure, fix some build warnings, fix document
  links, and some cleanups"

* tag 'loongarch-fixes-6.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
  docs/zh_CN/LoongArch: Update the links of ABI
  docs/LoongArch: Update the links of ABI
  LoongArch: Don't inline kasan_mem_to_shadow()/kasan_shadow_to_mem()
  kasan: Cleanup the __HAVE_ARCH_SHADOW_MAP usage
  LoongArch: Set all reserved memblocks on Node#0 at initialization
  LoongArch: Remove dead code in relocate_new_kernel
  LoongArch: Use _UL() and _ULL()
  LoongArch: Fix some build warnings with W=1
  LoongArch: Fix lockdep static memory detection
This commit is contained in:
Linus Torvalds 2023-09-23 10:57:03 -07:00
commit 93397d3a2f
26 changed files with 178 additions and 135 deletions

View File

@ -381,9 +381,9 @@ Documentation of LoongArch ISA:
Documentation of LoongArch ELF psABI:
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-CN.pdf (in Chinese)
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-CN.pdf (in Chinese)
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-EN.pdf (in English)
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-EN.pdf (in English)
Linux kernel repository of Loongson and LoongArch:

View File

@ -344,9 +344,9 @@ LoongArch指令集架构的文档
LoongArch的ELF psABI文档
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-CN.pdf (中文版)
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-CN.pdf (中文版)
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-EN.pdf (英文版)
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-EN.pdf (英文版)
Loongson与LoongArch的Linux内核源码仓库

View File

@ -19,7 +19,7 @@
*/
#ifndef __ASSEMBLY__
#ifndef PHYS_OFFSET
#define PHYS_OFFSET _AC(0, UL)
#define PHYS_OFFSET _UL(0)
#endif
extern unsigned long vm_map_base;
#endif /* __ASSEMBLY__ */
@ -43,7 +43,7 @@ extern unsigned long vm_map_base;
* Memory above this physical address will be considered highmem.
*/
#ifndef HIGHMEM_START
#define HIGHMEM_START (_AC(1, UL) << _AC(DMW_PABITS, UL))
#define HIGHMEM_START (_UL(1) << _UL(DMW_PABITS))
#endif
#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK))
@ -65,16 +65,16 @@ extern unsigned long vm_map_base;
#define _ATYPE_
#define _ATYPE32_
#define _ATYPE64_
#define _CONST64_(x) x
#else
#define _ATYPE_ __PTRDIFF_TYPE__
#define _ATYPE32_ int
#define _ATYPE64_ __s64
#ifdef CONFIG_64BIT
#define _CONST64_(x) x ## UL
#else
#define _CONST64_(x) x ## ULL
#endif
#ifdef CONFIG_64BIT
#define _CONST64_(x) _UL(x)
#else
#define _CONST64_(x) _ULL(x)
#endif
/*

View File

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_EXCEPTION_H
#define __ASM_EXCEPTION_H
#include <asm/ptrace.h>
#include <linux/kprobes.h>
void show_registers(struct pt_regs *regs);
asmlinkage void cache_parity_error(void);
asmlinkage void noinstr do_ade(struct pt_regs *regs);
asmlinkage void noinstr do_ale(struct pt_regs *regs);
asmlinkage void noinstr do_bce(struct pt_regs *regs);
asmlinkage void noinstr do_bp(struct pt_regs *regs);
asmlinkage void noinstr do_ri(struct pt_regs *regs);
asmlinkage void noinstr do_fpu(struct pt_regs *regs);
asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr);
asmlinkage void noinstr do_lsx(struct pt_regs *regs);
asmlinkage void noinstr do_lasx(struct pt_regs *regs);
asmlinkage void noinstr do_lbt(struct pt_regs *regs);
asmlinkage void noinstr do_watch(struct pt_regs *regs);
asmlinkage void noinstr do_syscall(struct pt_regs *regs);
asmlinkage void noinstr do_reserved(struct pt_regs *regs);
asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp);
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
unsigned long write, unsigned long address);
asmlinkage void handle_ade(void);
asmlinkage void handle_ale(void);
asmlinkage void handle_bce(void);
asmlinkage void handle_sys(void);
asmlinkage void handle_bp(void);
asmlinkage void handle_ri(void);
asmlinkage void handle_fpu(void);
asmlinkage void handle_fpe(void);
asmlinkage void handle_lsx(void);
asmlinkage void handle_lasx(void);
asmlinkage void handle_lbt(void);
asmlinkage void handle_watch(void);
asmlinkage void handle_reserved(void);
asmlinkage void handle_vint(void);
asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs);
#endif /* __ASM_EXCEPTION_H */

View File

@ -10,8 +10,6 @@
#include <asm/io.h>
#include <asm/pgtable.h>
#define __HAVE_ARCH_SHADOW_MAP
#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
@ -62,61 +60,22 @@
extern bool kasan_early_stage;
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
#define kasan_mem_to_shadow kasan_mem_to_shadow
void *kasan_mem_to_shadow(const void *addr);
#define kasan_shadow_to_mem kasan_shadow_to_mem
const void *kasan_shadow_to_mem(const void *shadow_addr);
#define kasan_arch_is_ready kasan_arch_is_ready
static __always_inline bool kasan_arch_is_ready(void)
{
return !kasan_early_stage;
}
static inline void *kasan_mem_to_shadow(const void *addr)
#define addr_has_metadata addr_has_metadata
static __always_inline bool addr_has_metadata(const void *addr)
{
if (!kasan_arch_is_ready()) {
return (void *)(kasan_early_shadow_page);
} else {
unsigned long maddr = (unsigned long)addr;
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
unsigned long offset = 0;
maddr &= XRANGE_SHADOW_MASK;
switch (xrange) {
case XKPRANGE_CC_SEG:
offset = XKPRANGE_CC_SHADOW_OFFSET;
break;
case XKPRANGE_UC_SEG:
offset = XKPRANGE_UC_SHADOW_OFFSET;
break;
case XKVRANGE_VC_SEG:
offset = XKVRANGE_VC_SHADOW_OFFSET;
break;
default:
WARN_ON(1);
return NULL;
}
return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
}
}
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
{
unsigned long addr = (unsigned long)shadow_addr;
if (unlikely(addr > KASAN_SHADOW_END) ||
unlikely(addr < KASAN_SHADOW_START)) {
WARN_ON(1);
return NULL;
}
if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
else {
WARN_ON(1);
return NULL;
}
return (kasan_mem_to_shadow((void *)addr) != NULL);
}
void kasan_init(void);

View File

@ -70,6 +70,7 @@ struct secondary_data {
extern struct secondary_data cpuboot_data;
extern asmlinkage void smpboot_entry(void);
extern asmlinkage void start_secondary(void);
extern void calculate_cpu_foreign_map(void);

View File

@ -19,6 +19,10 @@ obj-$(CONFIG_CPU_HAS_LBT) += lbt.o
obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
ifdef CONFIG_FUNCTION_TRACER
ifndef CONFIG_DYNAMIC_FTRACE
obj-y += mcount.o ftrace.o

View File

@ -281,7 +281,6 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
}
void __init acpi_numa_arch_fixup(void) {}
#endif
void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)

View File

@ -50,7 +50,6 @@ void __init memblock_init(void)
}
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
/* Reserve the first 2MB */
memblock_reserve(PHYS_OFFSET, 0x200000);
@ -58,4 +57,7 @@ void __init memblock_init(void)
/* Reserve the kernel text/data/bss */
memblock_reserve(__pa_symbol(&_text),
__pa_symbol(&_end) - __pa_symbol(&_text));
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.reserved, 0);
}

View File

@ -6,6 +6,7 @@
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/ftrace.h>
Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)

View File

@ -37,6 +37,7 @@
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/elf.h>
#include <asm/exec.h>
#include <asm/fpu.h>
#include <asm/lbt.h>
#include <asm/io.h>

View File

@ -72,7 +72,6 @@ copy_word:
LONG_ADDI s5, s5, -1
beqz s5, process_entry
b copy_word
b process_entry
done:
ibar 0

View File

@ -13,6 +13,7 @@
#include <linux/audit.h>
#include <linux/cache.h>
#include <linux/context_tracking.h>
#include <linux/entry-common.h>
#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/mm.h>
@ -891,8 +892,8 @@ static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned lon
return new_sp;
}
void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
struct extctx_layout *extctx)
static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
struct extctx_layout *extctx)
{
unsigned long sp;
@ -922,7 +923,7 @@ void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
* Atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage long sys_rt_sigreturn(void)
SYSCALL_DEFINE0(rt_sigreturn)
{
int sig;
sigset_t set;

View File

@ -13,6 +13,7 @@
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/profile.h>
#include <linux/seq_file.h>
#include <linux/smp.h>
#include <linux/threads.h>
@ -556,10 +557,12 @@ void smp_send_stop(void)
smp_call_function(stop_this_cpu, NULL, 0);
}
#ifdef CONFIG_PROFILING
int setup_profiling_timer(unsigned int multiplier)
{
return 0;
}
#endif
static void flush_tlb_all_ipi(void *info)
{

View File

@ -13,6 +13,7 @@
#include <linux/unistd.h>
#include <asm/asm.h>
#include <asm/exception.h>
#include <asm/signal.h>
#include <asm/switch_to.h>
#include <asm-generic/syscalls.h>

View File

@ -29,7 +29,7 @@ static void constant_event_handler(struct clock_event_device *dev)
{
}
irqreturn_t constant_timer_interrupt(int irq, void *data)
static irqreturn_t constant_timer_interrupt(int irq, void *data)
{
int cpu = smp_processor_id();
struct clock_event_device *cd;

View File

@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
@ -7,6 +8,8 @@
#include <linux/percpu.h>
#include <asm/bootinfo.h>
#include <acpi/processor.h>
static DEFINE_PER_CPU(struct cpu, cpu_devices);
#ifdef CONFIG_HOTPLUG_CPU

View File

@ -25,7 +25,6 @@
#include <linux/ptrace.h>
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/kprobes.h>
#include <linux/notifier.h>
#include <linux/irq.h>
#include <linux/perf_event.h>
@ -35,6 +34,7 @@
#include <asm/branch.h>
#include <asm/break.h>
#include <asm/cpu.h>
#include <asm/exception.h>
#include <asm/fpu.h>
#include <asm/lbt.h>
#include <asm/inst.h>
@ -53,21 +53,6 @@
#include "access-helper.h"
extern asmlinkage void handle_ade(void);
extern asmlinkage void handle_ale(void);
extern asmlinkage void handle_bce(void);
extern asmlinkage void handle_sys(void);
extern asmlinkage void handle_bp(void);
extern asmlinkage void handle_ri(void);
extern asmlinkage void handle_fpu(void);
extern asmlinkage void handle_fpe(void);
extern asmlinkage void handle_lbt(void);
extern asmlinkage void handle_lsx(void);
extern asmlinkage void handle_lasx(void);
extern asmlinkage void handle_reserved(void);
extern asmlinkage void handle_watch(void);
extern asmlinkage void handle_vint(void);
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
const char *loglvl, bool user)
{
@ -439,8 +424,8 @@ static inline void setup_vint_size(unsigned int size)
* happen together with Overflow or Underflow, and `ptrace' can set
* any bits.
*/
void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
struct task_struct *tsk)
static void force_fcsr_sig(unsigned long fcsr,
void __user *fault_addr, struct task_struct *tsk)
{
int si_code = FPE_FLTUNK;
@ -458,7 +443,7 @@ void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
force_sig_fault(SIGFPE, si_code, fault_addr);
}
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
static int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
{
int si_code;
@ -824,7 +809,7 @@ out:
asmlinkage void noinstr do_ri(struct pt_regs *regs)
{
int status = SIGILL;
unsigned int opcode = 0;
unsigned int __maybe_unused opcode;
unsigned int __user *era = (unsigned int __user *)exception_era(regs);
irqentry_state_t state = irqentry_enter(regs);

View File

@ -53,33 +53,6 @@ SECTIONS
. = ALIGN(PECOFF_SEGMENT_ALIGN);
_etext = .;
/*
* struct alt_inst entries. From the header (alternative.h):
* "Alternative instructions for different CPU types or capabilities"
* Think locking instructions on spinlocks.
*/
. = ALIGN(4);
.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
__alt_instructions = .;
*(.altinstructions)
__alt_instructions_end = .;
}
#ifdef CONFIG_RELOCATABLE
. = ALIGN(8);
.la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) {
__la_abs_begin = .;
*(.la_abs)
__la_abs_end = .;
}
#endif
.got : ALIGN(16) { *(.got) }
.plt : ALIGN(16) { *(.plt) }
.got.plt : ALIGN(16) { *(.got.plt) }
.data.rel : { *(.data.rel*) }
. = ALIGN(PECOFF_SEGMENT_ALIGN);
__init_begin = .;
__inittext_begin = .;
@ -94,6 +67,18 @@ SECTIONS
__initdata_begin = .;
/*
* struct alt_inst entries. From the header (alternative.h):
* "Alternative instructions for different CPU types or capabilities"
* Think locking instructions on spinlocks.
*/
. = ALIGN(4);
.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
__alt_instructions = .;
*(.altinstructions)
__alt_instructions_end = .;
}
INIT_DATA_SECTION(16)
.exit.data : {
EXIT_DATA
@ -113,6 +98,11 @@ SECTIONS
_sdata = .;
RO_DATA(4096)
.got : ALIGN(16) { *(.got) }
.plt : ALIGN(16) { *(.plt) }
.got.plt : ALIGN(16) { *(.got.plt) }
RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE)
.rela.dyn : ALIGN(8) {
@ -121,6 +111,17 @@ SECTIONS
__rela_dyn_end = .;
}
.data.rel : { *(.data.rel*) }
#ifdef CONFIG_RELOCATABLE
. = ALIGN(8);
.la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) {
__la_abs_begin = .;
*(.la_abs)
__la_abs_end = .;
}
#endif
.sdata : {
*(.sdata)
}

View File

@ -20,12 +20,12 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kdebug.h>
#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <linux/kfence.h>
#include <asm/branch.h>
#include <asm/exception.h>
#include <asm/mmu_context.h>
#include <asm/ptrace.h>

View File

@ -50,18 +50,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
return (pte_t *) pmd;
}
/*
* This function checks for proper alignment of input addr and len parameters.
*/
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
int pmd_huge(pmd_t pmd)
{
return (pmd_val(pmd) & _PAGE_HUGE) != 0;

View File

@ -4,6 +4,7 @@
*/
#include <asm/io.h>
#include <asm-generic/early_ioremap.h>
void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size)
{

View File

@ -35,6 +35,57 @@ static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
bool kasan_early_stage = true;
void *kasan_mem_to_shadow(const void *addr)
{
if (!kasan_arch_is_ready()) {
return (void *)(kasan_early_shadow_page);
} else {
unsigned long maddr = (unsigned long)addr;
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
unsigned long offset = 0;
maddr &= XRANGE_SHADOW_MASK;
switch (xrange) {
case XKPRANGE_CC_SEG:
offset = XKPRANGE_CC_SHADOW_OFFSET;
break;
case XKPRANGE_UC_SEG:
offset = XKPRANGE_UC_SHADOW_OFFSET;
break;
case XKVRANGE_VC_SEG:
offset = XKVRANGE_VC_SHADOW_OFFSET;
break;
default:
WARN_ON(1);
return NULL;
}
return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
}
}
const void *kasan_shadow_to_mem(const void *shadow_addr)
{
unsigned long addr = (unsigned long)shadow_addr;
if (unlikely(addr > KASAN_SHADOW_END) ||
unlikely(addr < KASAN_SHADOW_START)) {
WARN_ON(1);
return NULL;
}
if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
else {
WARN_ON(1);
return NULL;
}
}
/*
* Alloc memory for shadow memory page table.
*/

View File

@ -261,7 +261,7 @@ unsigned long pcpu_handlers[NR_CPUS];
#endif
extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
void setup_tlb_handler(int cpu)
static void setup_tlb_handler(int cpu)
{
setup_ptwalker();
local_flush_tlb_all();

View File

@ -54,7 +54,7 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
#ifndef __HAVE_ARCH_SHADOW_MAP
#ifndef kasan_mem_to_shadow
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)

View File

@ -291,7 +291,7 @@ struct kasan_stack_ring {
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#ifndef __HAVE_ARCH_SHADOW_MAP
#ifndef kasan_shadow_to_mem
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
{
return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
@ -299,15 +299,13 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
}
#endif
#ifndef addr_has_metadata
static __always_inline bool addr_has_metadata(const void *addr)
{
#ifdef __HAVE_ARCH_SHADOW_MAP
return (kasan_mem_to_shadow((void *)addr) != NULL);
#else
return (kasan_reset_tag(addr) >=
kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
#endif
}
#endif
/**
* kasan_check_range - Check memory region, and report if invalid access.