2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 20:53:53 +08:00
linux-next/arch/x86/include/asm/setup.h
Mike Rapoport 88107d330d x86/mm: simplify init_trampoline() and surrounding logic
There are three cases for the trampoline initialization:
* 32-bit does nothing
* 64-bit with kaslr disabled simply copies a PGD entry from the direct map
  to the trampoline PGD
* 64-bit with kaslr enabled maps the real mode trampoline at PUD level

These cases are currently differentiated by a bunch of ifdefs inside
asm/include/pgtable.h and the case of 64-bits with kaslr on uses
pgd_index() helper.

Replacing the ifdefs with a static function in arch/x86/mm/init.c gives
clearer code and allows moving pgd_index() to the generic implementation
in include/linux/pgtable.h

[rppt@linux.ibm.com: take CONFIG_RANDOMIZE_MEMORY into account in kaslr_enabled()]
  Link: http://lkml.kernel.org/r/20200525104045.GB13212@linux.ibm.com

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200514170327.31389-8-rppt@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-09 09:39:13 -07:00

154 lines
3.9 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_SETUP_H
#define _ASM_X86_SETUP_H
#include <uapi/asm/setup.h>
#define COMMAND_LINE_SIZE 2048
#include <linux/linkage.h>
#include <asm/page_types.h>
#ifdef __i386__
#include <linux/pfn.h>
/*
* Reserved space for vmalloc and iomap - defined in asm/page.h
*/
#define MAXMEM_PFN PFN_DOWN(MAXMEM)
#define MAX_NONPAE_PFN (1 << 20)
#endif /* __i386__ */
#define PARAM_SIZE 4096 /* sizeof(struct boot_params) */
#define OLD_CL_MAGIC 0xA33F
#define OLD_CL_ADDRESS 0x020 /* Relative to real mode data */
#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
#ifndef __ASSEMBLY__
#include <asm/bootparam.h>
#include <asm/x86_init.h>
extern u64 relocated_ramdisk;
/* Interrupt control for vSMPowered x86_64 systems */
#ifdef CONFIG_X86_64
void vsmp_init(void);
#else
static inline void vsmp_init(void) { }
#endif
void setup_bios_corruption_check(void);
void early_platform_quirks(void);
extern unsigned long saved_video_mode;
extern void reserve_standard_io_resources(void);
extern void i386_reserve_resources(void);
extern unsigned long __startup_64(unsigned long physaddr, struct boot_params *bp);
extern unsigned long __startup_secondary_64(void);
extern int early_make_pgtable(unsigned long address);
#ifdef CONFIG_X86_INTEL_MID
extern void x86_intel_mid_early_setup(void);
#else
static inline void x86_intel_mid_early_setup(void) { }
#endif
#ifdef CONFIG_X86_INTEL_CE
extern void x86_ce4100_early_setup(void);
#else
static inline void x86_ce4100_early_setup(void) { }
#endif
#ifndef _SETUP
#include <asm/espfix.h>
#include <linux/kernel.h>
/*
* This is set up by the setup-routine at boot-time
*/
extern struct boot_params boot_params;
extern char _text[];
static inline bool kaslr_enabled(void)
{
return IS_ENABLED(CONFIG_RANDOMIZE_MEMORY) &&
!!(boot_params.hdr.loadflags & KASLR_FLAG);
}
/*
* Apply no randomization if KASLR was disabled at boot or if KASAN
* is enabled. KASAN shadow mappings rely on regions being PGD aligned.
*/
static inline bool kaslr_memory_enabled(void)
{
return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
}
static inline unsigned long kaslr_offset(void)
{
return (unsigned long)&_text - __START_KERNEL;
}
/*
* Do NOT EVER look at the BIOS memory size location.
* It does not work on many machines.
*/
#define LOWMEMSIZE() (0x9f000)
/* exceedingly early brk-like allocator */
extern unsigned long _brk_end;
void *extend_brk(size_t size, size_t align);
/*
* Reserve space in the brk section. The name must be unique within
* the file, and somewhat descriptive. The size is in bytes. Must be
* used at file scope.
*
* (This uses a temp function to wrap the asm so we can pass it the
* size parameter; otherwise we wouldn't be able to. We can't use a
* "section" attribute on a normal variable because it always ends up
* being @progbits, which ends up allocating space in the vmlinux
* executable.)
*/
#define RESERVE_BRK(name,sz) \
static void __section(.discard.text) __used notrace \
__brk_reservation_fn_##name##__(void) { \
asm volatile ( \
".pushsection .brk_reservation,\"aw\",@nobits;" \
".brk." #name ":" \
" 1:.skip %c0;" \
" .size .brk." #name ", . - 1b;" \
" .popsection" \
: : "i" (sz)); \
}
/* Helper for reserving space for arrays of things */
#define RESERVE_BRK_ARRAY(type, name, entries) \
type *name; \
RESERVE_BRK(name, sizeof(type) * entries)
extern void probe_roms(void);
#ifdef __i386__
asmlinkage void __init i386_start_kernel(void);
#else
asmlinkage void __init x86_64_start_kernel(char *real_mode);
asmlinkage void __init x86_64_start_reservations(char *real_mode_data);
#endif /* __i386__ */
#endif /* _SETUP */
#else
#define RESERVE_BRK(name,sz) \
.pushsection .brk_reservation,"aw",@nobits; \
.brk.name: \
1: .skip sz; \
.size .brk.name,.-1b; \
.popsection
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_SETUP_H */