mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 15:43:59 +08:00
714d8e7e27
The main change here is a significant head.S rework that allows us to boot on machines with physical memory at a really high address without having to increase our mapped VA range. Other changes include: - AES performance boost for Cortex-A57 - AArch32 (compat) userspace with 64k pages - Cortex-A53 erratum workaround for #845719 - defconfig updates (new platforms, PCI, ...) -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABCgAGBQJVLnQpAAoJELescNyEwWM03RIH/iwcDc0MBZgkwfD5cnY+29p4 m89lMDo3SyGQT4NynHSw7P3R7c3zULmI+9hmJMw/yfjjjL6m7X+vVAF3xj1Am4Al OzCqYLHyFnlRktzJ6dWeF1Ese7tWqPpxn+OCXgYNpz/r5MfF/HhlyX/qNzAQPKrw ZpDvnt44DgUfweqjTbwQUg2wkyCRjmz57MQYxDcmJStdpHIu24jWOvDIo3OJGjyS L49I9DU6DGUhkISZmmBE0T7vmKMD1BcgI7OIzX2WIqn521QT+GSLMhRxaHmK1s1V A8gaMTwpo0xFhTAt7sbw/5+2663WmfRdZI+FtduvORsoxX6KdDn7DH1NQixIm8s= =+F0I -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 updates from Will Deacon: "Here are the core arm64 updates for 4.1. Highlights include a significant rework to head.S (allowing us to boot on machines with physical memory at a really high address), an AES performance boost on Cortex-A57 and the ability to run a 32-bit userspace with 64k pages (although this requires said userspace to be built with a recent binutils). The head.S rework spilt over into KVM, so there are some changes under arch/arm/ which have been acked by Marc Zyngier (KVM co-maintainer). In particular, the linker script changes caused us some issues in -next, so there are a few merge commits where we had to apply fixes on top of a stable branch. Other changes include: - AES performance boost for Cortex-A57 - AArch32 (compat) userspace with 64k pages - Cortex-A53 erratum workaround for #845719 - defconfig updates (new platforms, PCI, ...)" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (39 commits) arm64: fix midr range for Cortex-A57 erratum 832075 arm64: errata: add workaround for cortex-a53 erratum #845719 arm64: Use bool function return values of true/false not 1/0 arm64: defconfig: updates for 4.1 arm64: Extract feature parsing code from cpu_errata.c arm64: alternative: Allow immediate branch as alternative instruction arm64: insn: Add aarch64_insn_decode_immediate ARM: kvm: round HYP section to page size instead of log2 upper bound ARM: kvm: assert on HYP section boundaries not actual code size arm64: head.S: ensure idmap_t0sz is visible arm64: pmu: add support for interrupt-affinity property dt: pmu: extend ARM PMU binding to allow for explicit interrupt affinity arm64: head.S: ensure visibility of page tables arm64: KVM: use ID map with increased VA range if required arm64: mm: increase VA range of identity map ARM: kvm: implement replacement for ld's LOG2CEIL() arm64: proc: remove unused cpu_get_pgd macro arm64: enforce x1|x2|x3 == 0 upon kernel entry as per boot protocol arm64: remove __calc_phys_offset arm64: merge __enable_mmu and __turn_mmu_on ...
354 lines
7.1 KiB
ArmAsm
354 lines
7.1 KiB
ArmAsm
/* ld script to make ARM Linux kernel
|
|
* taken from the i386 version by Russell King
|
|
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
|
|
*/
|
|
|
|
#include <asm-generic/vmlinux.lds.h>
|
|
#include <asm/cache.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/memory.h>
|
|
#include <asm/page.h>
|
|
#ifdef CONFIG_ARM_KERNMEM_PERMS
|
|
#include <asm/pgtable.h>
|
|
#endif
|
|
|
|
#define PROC_INFO \
|
|
. = ALIGN(4); \
|
|
VMLINUX_SYMBOL(__proc_info_begin) = .; \
|
|
*(.proc.info.init) \
|
|
VMLINUX_SYMBOL(__proc_info_end) = .;
|
|
|
|
#define IDMAP_TEXT \
|
|
ALIGN_FUNCTION(); \
|
|
VMLINUX_SYMBOL(__idmap_text_start) = .; \
|
|
*(.idmap.text) \
|
|
VMLINUX_SYMBOL(__idmap_text_end) = .; \
|
|
. = ALIGN(PAGE_SIZE); \
|
|
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
|
|
*(.hyp.idmap.text) \
|
|
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
#define ARM_CPU_DISCARD(x)
|
|
#define ARM_CPU_KEEP(x) x
|
|
#else
|
|
#define ARM_CPU_DISCARD(x) x
|
|
#define ARM_CPU_KEEP(x)
|
|
#endif
|
|
|
|
#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
|
|
defined(CONFIG_GENERIC_BUG)
|
|
#define ARM_EXIT_KEEP(x) x
|
|
#define ARM_EXIT_DISCARD(x)
|
|
#else
|
|
#define ARM_EXIT_KEEP(x)
|
|
#define ARM_EXIT_DISCARD(x) x
|
|
#endif
|
|
|
|
OUTPUT_ARCH(arm)
|
|
ENTRY(stext)
|
|
|
|
#ifndef __ARMEB__
|
|
jiffies = jiffies_64;
|
|
#else
|
|
jiffies = jiffies_64 + 4;
|
|
#endif
|
|
|
|
SECTIONS
|
|
{
|
|
/*
|
|
* XXX: The linker does not define how output sections are
|
|
* assigned to input sections when there are multiple statements
|
|
* matching the same input section name. There is no documented
|
|
* order of matching.
|
|
*
|
|
* unwind exit sections must be discarded before the rest of the
|
|
* unwind sections get included.
|
|
*/
|
|
/DISCARD/ : {
|
|
*(.ARM.exidx.exit.text)
|
|
*(.ARM.extab.exit.text)
|
|
ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
|
|
ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
|
|
ARM_EXIT_DISCARD(EXIT_TEXT)
|
|
ARM_EXIT_DISCARD(EXIT_DATA)
|
|
EXIT_CALL
|
|
#ifndef CONFIG_MMU
|
|
*(.text.fixup)
|
|
*(__ex_table)
|
|
#endif
|
|
#ifndef CONFIG_SMP_ON_UP
|
|
*(.alt.smp.init)
|
|
#endif
|
|
*(.discard)
|
|
*(.discard.*)
|
|
}
|
|
|
|
#ifdef CONFIG_XIP_KERNEL
|
|
. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
|
|
#else
|
|
. = PAGE_OFFSET + TEXT_OFFSET;
|
|
#endif
|
|
.head.text : {
|
|
_text = .;
|
|
HEAD_TEXT
|
|
}
|
|
|
|
#ifdef CONFIG_ARM_KERNMEM_PERMS
|
|
. = ALIGN(1<<SECTION_SHIFT);
|
|
#endif
|
|
|
|
.text : { /* Real text segment */
|
|
_stext = .; /* Text and read-only data */
|
|
IDMAP_TEXT
|
|
__exception_text_start = .;
|
|
*(.exception.text)
|
|
__exception_text_end = .;
|
|
IRQENTRY_TEXT
|
|
TEXT_TEXT
|
|
SCHED_TEXT
|
|
LOCK_TEXT
|
|
KPROBES_TEXT
|
|
*(.gnu.warning)
|
|
*(.glue_7)
|
|
*(.glue_7t)
|
|
. = ALIGN(4);
|
|
*(.got) /* Global offset table */
|
|
ARM_CPU_KEEP(PROC_INFO)
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_RODATA
|
|
. = ALIGN(1<<SECTION_SHIFT);
|
|
#endif
|
|
RO_DATA(PAGE_SIZE)
|
|
|
|
. = ALIGN(4);
|
|
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
|
|
__start___ex_table = .;
|
|
#ifdef CONFIG_MMU
|
|
*(__ex_table)
|
|
#endif
|
|
__stop___ex_table = .;
|
|
}
|
|
|
|
#ifdef CONFIG_ARM_UNWIND
|
|
/*
|
|
* Stack unwinding tables
|
|
*/
|
|
. = ALIGN(8);
|
|
.ARM.unwind_idx : {
|
|
__start_unwind_idx = .;
|
|
*(.ARM.exidx*)
|
|
__stop_unwind_idx = .;
|
|
}
|
|
.ARM.unwind_tab : {
|
|
__start_unwind_tab = .;
|
|
*(.ARM.extab*)
|
|
__stop_unwind_tab = .;
|
|
}
|
|
#endif
|
|
|
|
NOTES
|
|
|
|
_etext = .; /* End of text and rodata section */
|
|
|
|
#ifndef CONFIG_XIP_KERNEL
|
|
# ifdef CONFIG_ARM_KERNMEM_PERMS
|
|
. = ALIGN(1<<SECTION_SHIFT);
|
|
# else
|
|
. = ALIGN(PAGE_SIZE);
|
|
# endif
|
|
__init_begin = .;
|
|
#endif
|
|
/*
|
|
* The vectors and stubs are relocatable code, and the
|
|
* only thing that matters is their relative offsets
|
|
*/
|
|
__vectors_start = .;
|
|
.vectors 0 : AT(__vectors_start) {
|
|
*(.vectors)
|
|
}
|
|
. = __vectors_start + SIZEOF(.vectors);
|
|
__vectors_end = .;
|
|
|
|
__stubs_start = .;
|
|
.stubs 0x1000 : AT(__stubs_start) {
|
|
*(.stubs)
|
|
}
|
|
. = __stubs_start + SIZEOF(.stubs);
|
|
__stubs_end = .;
|
|
|
|
INIT_TEXT_SECTION(8)
|
|
.exit.text : {
|
|
ARM_EXIT_KEEP(EXIT_TEXT)
|
|
}
|
|
.init.proc.info : {
|
|
ARM_CPU_DISCARD(PROC_INFO)
|
|
}
|
|
.init.arch.info : {
|
|
__arch_info_begin = .;
|
|
*(.arch.info.init)
|
|
__arch_info_end = .;
|
|
}
|
|
.init.tagtable : {
|
|
__tagtable_begin = .;
|
|
*(.taglist.init)
|
|
__tagtable_end = .;
|
|
}
|
|
#ifdef CONFIG_SMP_ON_UP
|
|
.init.smpalt : {
|
|
__smpalt_begin = .;
|
|
*(.alt.smp.init)
|
|
__smpalt_end = .;
|
|
}
|
|
#endif
|
|
.init.pv_table : {
|
|
__pv_table_begin = .;
|
|
*(.pv_table)
|
|
__pv_table_end = .;
|
|
}
|
|
.init.data : {
|
|
#ifndef CONFIG_XIP_KERNEL
|
|
INIT_DATA
|
|
#endif
|
|
INIT_SETUP(16)
|
|
INIT_CALLS
|
|
CON_INITCALL
|
|
SECURITY_INITCALL
|
|
INIT_RAM_FS
|
|
}
|
|
#ifndef CONFIG_XIP_KERNEL
|
|
.exit.data : {
|
|
ARM_EXIT_KEEP(EXIT_DATA)
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_SMP
|
|
PERCPU_SECTION(L1_CACHE_BYTES)
|
|
#endif
|
|
|
|
#ifdef CONFIG_XIP_KERNEL
|
|
__data_loc = ALIGN(4); /* location in binary */
|
|
. = PAGE_OFFSET + TEXT_OFFSET;
|
|
#else
|
|
#ifdef CONFIG_ARM_KERNMEM_PERMS
|
|
. = ALIGN(1<<SECTION_SHIFT);
|
|
#else
|
|
. = ALIGN(THREAD_SIZE);
|
|
#endif
|
|
__init_end = .;
|
|
__data_loc = .;
|
|
#endif
|
|
|
|
.data : AT(__data_loc) {
|
|
_data = .; /* address in memory */
|
|
_sdata = .;
|
|
|
|
/*
|
|
* first, the init task union, aligned
|
|
* to an 8192 byte boundary.
|
|
*/
|
|
INIT_TASK_DATA(THREAD_SIZE)
|
|
|
|
#ifdef CONFIG_XIP_KERNEL
|
|
. = ALIGN(PAGE_SIZE);
|
|
__init_begin = .;
|
|
INIT_DATA
|
|
ARM_EXIT_KEEP(EXIT_DATA)
|
|
. = ALIGN(PAGE_SIZE);
|
|
__init_end = .;
|
|
#endif
|
|
|
|
NOSAVE_DATA
|
|
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
|
|
READ_MOSTLY_DATA(L1_CACHE_BYTES)
|
|
|
|
/*
|
|
* and the usual data section
|
|
*/
|
|
DATA_DATA
|
|
CONSTRUCTORS
|
|
|
|
_edata = .;
|
|
}
|
|
_edata_loc = __data_loc + SIZEOF(.data);
|
|
|
|
#ifdef CONFIG_HAVE_TCM
|
|
/*
|
|
* We align everything to a page boundary so we can
|
|
* free it after init has commenced and TCM contents have
|
|
* been copied to its destination.
|
|
*/
|
|
.tcm_start : {
|
|
. = ALIGN(PAGE_SIZE);
|
|
__tcm_start = .;
|
|
__itcm_start = .;
|
|
}
|
|
|
|
/*
|
|
* Link these to the ITCM RAM
|
|
* Put VMA to the TCM address and LMA to the common RAM
|
|
* and we'll upload the contents from RAM to TCM and free
|
|
* the used RAM after that.
|
|
*/
|
|
.text_itcm ITCM_OFFSET : AT(__itcm_start)
|
|
{
|
|
__sitcm_text = .;
|
|
*(.tcm.text)
|
|
*(.tcm.rodata)
|
|
. = ALIGN(4);
|
|
__eitcm_text = .;
|
|
}
|
|
|
|
/*
|
|
* Reset the dot pointer, this is needed to create the
|
|
* relative __dtcm_start below (to be used as extern in code).
|
|
*/
|
|
. = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
|
|
|
|
.dtcm_start : {
|
|
__dtcm_start = .;
|
|
}
|
|
|
|
/* TODO: add remainder of ITCM as well, that can be used for data! */
|
|
.data_dtcm DTCM_OFFSET : AT(__dtcm_start)
|
|
{
|
|
. = ALIGN(4);
|
|
__sdtcm_data = .;
|
|
*(.tcm.data)
|
|
. = ALIGN(4);
|
|
__edtcm_data = .;
|
|
}
|
|
|
|
/* Reset the dot pointer or the linker gets confused */
|
|
. = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
|
|
|
|
/* End marker for freeing TCM copy in linked object */
|
|
.tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
|
|
. = ALIGN(PAGE_SIZE);
|
|
__tcm_end = .;
|
|
}
|
|
#endif
|
|
|
|
BSS_SECTION(0, 0, 0)
|
|
_end = .;
|
|
|
|
STABS_DEBUG
|
|
}
|
|
|
|
/*
|
|
* These must never be empty
|
|
* If you have to comment these two assert statements out, your
|
|
* binutils is too old (for other reasons as well)
|
|
*/
|
|
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
|
|
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
|
|
|
|
/*
|
|
* The HYP init code can't be more than a page long,
|
|
* and should not cross a page boundary.
|
|
* The above comment applies as well.
|
|
*/
|
|
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
|
|
"HYP init code too big or misaligned")
|