mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 20:24:12 +08:00
a9ff696160
Making virt_to_pfn() a static inline taking a strongly typed (const void *) makes the contract of a passing a pointer of that type to the function explicit and exposes any misuse of the macro virt_to_pfn() acting polymorphic and accepting many types such as (void *), (unitptr_t) or (unsigned long) as arguments without warnings. Doing this is a bit intrusive: virt_to_pfn() requires PHYS_PFN_OFFSET and PAGE_SHIFT to be defined, and this is defined in <asm/page.h>, so this must be included *before* <asm/memory.h>. The use of macros were obscuring the unclear inclusion order here, as the macros would eventually be resolved, but a static inline like this cannot be compiled with unresolved macros. The naive solution to include <asm/page.h> at the top of <asm/memory.h> does not work, because <asm/memory.h> sometimes includes <asm/page.h> at the end of itself, which would create a confusing inclusion loop. So instead, take the approach to always unconditionally include <asm/page.h> at the end of <asm/memory.h> arch/arm uses <asm/memory.h> explicitly in a lot of places, however it turns out that if we just unconditionally include <asm/memory.h> into <asm/page.h> and switch all inclusions of <asm/memory.h> to <asm/page.h> instead, we enforce the right order and <asm/memory.h> will always have access to the definitions. Put an inclusion guard in place making it impossible to include <asm/memory.h> explicitly. Link: https://lore.kernel.org/linux-mm/20220701160004.2ffff4e5ab59a55499f4c736@linux-foundation.org/ Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
181 lines
3.4 KiB
ArmAsm
181 lines
3.4 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/* ld script to make ARM Linux kernel
|
|
* taken from the i386 version by Russell King
|
|
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
|
|
*/
|
|
|
|
#ifdef CONFIG_XIP_KERNEL
|
|
#include "vmlinux-xip.lds.S"
|
|
#else
|
|
|
|
#include <linux/pgtable.h>
|
|
#include <asm/vmlinux.lds.h>
|
|
#include <asm/cache.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/page.h>
|
|
#include <asm/mpu.h>
|
|
|
|
OUTPUT_ARCH(arm)
|
|
ENTRY(stext)
|
|
|
|
#ifndef __ARMEB__
|
|
jiffies = jiffies_64;
|
|
#else
|
|
jiffies = jiffies_64 + 4;
|
|
#endif
|
|
|
|
SECTIONS
|
|
{
|
|
/*
|
|
* XXX: The linker does not define how output sections are
|
|
* assigned to input sections when there are multiple statements
|
|
* matching the same input section name. There is no documented
|
|
* order of matching.
|
|
*
|
|
* unwind exit sections must be discarded before the rest of the
|
|
* unwind sections get included.
|
|
*/
|
|
/DISCARD/ : {
|
|
ARM_DISCARD
|
|
#ifndef CONFIG_SMP_ON_UP
|
|
*(.alt.smp.init)
|
|
#endif
|
|
#ifndef CONFIG_ARM_UNWIND
|
|
*(.ARM.exidx) *(.ARM.exidx.*)
|
|
*(.ARM.extab) *(.ARM.extab.*)
|
|
#endif
|
|
}
|
|
|
|
. = KERNEL_OFFSET + TEXT_OFFSET;
|
|
.head.text : {
|
|
_text = .;
|
|
HEAD_TEXT
|
|
}
|
|
|
|
#ifdef CONFIG_STRICT_KERNEL_RWX
|
|
. = ALIGN(1<<SECTION_SHIFT);
|
|
#endif
|
|
|
|
#ifdef CONFIG_ARM_MPU
|
|
. = ALIGN(PMSAv8_MINALIGN);
|
|
#endif
|
|
.text : { /* Real text segment */
|
|
_stext = .; /* Text and read-only data */
|
|
ARM_TEXT
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_ALIGN_RODATA
|
|
. = ALIGN(1<<SECTION_SHIFT);
|
|
#endif
|
|
_etext = .; /* End of text section */
|
|
|
|
RO_DATA(PAGE_SIZE)
|
|
|
|
. = ALIGN(4);
|
|
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
|
|
__start___ex_table = .;
|
|
ARM_MMU_KEEP(*(__ex_table))
|
|
__stop___ex_table = .;
|
|
}
|
|
|
|
#ifdef CONFIG_ARM_UNWIND
|
|
ARM_UNWIND_SECTIONS
|
|
#endif
|
|
|
|
#ifdef CONFIG_STRICT_KERNEL_RWX
|
|
. = ALIGN(1<<SECTION_SHIFT);
|
|
#else
|
|
. = ALIGN(PAGE_SIZE);
|
|
#endif
|
|
__init_begin = .;
|
|
|
|
ARM_VECTORS
|
|
INIT_TEXT_SECTION(8)
|
|
.exit.text : {
|
|
ARM_EXIT_KEEP(EXIT_TEXT)
|
|
}
|
|
.init.proc.info : {
|
|
ARM_CPU_DISCARD(PROC_INFO)
|
|
}
|
|
.init.arch.info : {
|
|
__arch_info_begin = .;
|
|
*(.arch.info.init)
|
|
__arch_info_end = .;
|
|
}
|
|
.init.tagtable : {
|
|
__tagtable_begin = .;
|
|
*(.taglist.init)
|
|
__tagtable_end = .;
|
|
}
|
|
#ifdef CONFIG_SMP_ON_UP
|
|
.init.smpalt : {
|
|
__smpalt_begin = .;
|
|
*(.alt.smp.init)
|
|
__smpalt_end = .;
|
|
}
|
|
#endif
|
|
.init.pv_table : {
|
|
__pv_table_begin = .;
|
|
*(.pv_table)
|
|
__pv_table_end = .;
|
|
}
|
|
|
|
INIT_DATA_SECTION(16)
|
|
|
|
.exit.data : {
|
|
ARM_EXIT_KEEP(EXIT_DATA)
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
PERCPU_SECTION(L1_CACHE_BYTES)
|
|
#endif
|
|
|
|
#ifdef CONFIG_HAVE_TCM
|
|
ARM_TCM
|
|
#endif
|
|
|
|
#ifdef CONFIG_STRICT_KERNEL_RWX
|
|
. = ALIGN(1<<SECTION_SHIFT);
|
|
#else
|
|
. = ALIGN(THREAD_ALIGN);
|
|
#endif
|
|
__init_end = .;
|
|
|
|
_sdata = .;
|
|
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
|
|
_edata = .;
|
|
|
|
BSS_SECTION(0, 0, 0)
|
|
#ifdef CONFIG_ARM_MPU
|
|
. = ALIGN(PMSAv8_MINALIGN);
|
|
#endif
|
|
_end = .;
|
|
|
|
STABS_DEBUG
|
|
DWARF_DEBUG
|
|
ARM_DETAILS
|
|
|
|
ARM_ASSERTS
|
|
}
|
|
|
|
#ifdef CONFIG_STRICT_KERNEL_RWX
|
|
/*
|
|
* Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will
|
|
* be the first section-aligned location after __start_rodata. Otherwise,
|
|
* it will be equal to __start_rodata.
|
|
*/
|
|
__start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
|
|
#endif
|
|
|
|
/*
|
|
* These must never be empty
|
|
* If you have to comment these two assert statements out, your
|
|
* binutils is too old (for other reasons as well)
|
|
*/
|
|
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
|
|
#ifndef CONFIG_COMPILE_TEST
|
|
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
|
|
#endif
|
|
|
|
#endif /* CONFIG_XIP_KERNEL */
|