2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-28 15:13:55 +08:00
linux-next/arch/sparc/kernel/vmlinux.lds.S
Tejun Heo 19df0c2fef percpu: align percpu readmostly subsection to cacheline
Currently percpu readmostly subsection may share cachelines with other
percpu subsections which may result in unnecessary cacheline bounce
and performance degradation.

This patch adds @cacheline parameter to PERCPU() and PERCPU_VADDR()
linker macros, makes each arch linker scripts specify its cacheline
size and use it to align percpu subsections.

This is based on Shaohua's x86 only patch.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Shaohua Li <shaohua.li@intel.com>
2011-01-25 14:26:50 +01:00

123 lines
2.1 KiB
ArmAsm

/* ld script for sparc32/sparc64 kernel */
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#ifdef CONFIG_SPARC32
#define INITIAL_ADDRESS 0x10000 + SIZEOF_HEADERS
#define TEXTSTART 0xf0004000
#define SMP_CACHE_BYTES_SHIFT 5
#else
#define SMP_CACHE_BYTES_SHIFT 6
#define INITIAL_ADDRESS 0x4000
#define TEXTSTART 0x0000000000404000
#endif
#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT)
#ifdef CONFIG_SPARC32
OUTPUT_FORMAT("elf32-sparc", "elf32-sparc", "elf32-sparc")
OUTPUT_ARCH(sparc)
ENTRY(_start)
jiffies = jiffies_64 + 4;
#else
/* sparc64 */
OUTPUT_FORMAT("elf64-sparc", "elf64-sparc", "elf64-sparc")
OUTPUT_ARCH(sparc:v9a)
ENTRY(_start)
jiffies = jiffies_64;
#endif
SECTIONS
{
/* swapper_low_pmd_dir is sparc64 only */
swapper_low_pmd_dir = 0x0000000000402000;
. = INITIAL_ADDRESS;
.text TEXTSTART :
{
_text = .;
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
*(.gnu.warning)
} = 0
_etext = .;
RO_DATA(PAGE_SIZE)
/* Start of data section */
_sdata = .;
.data1 : {
*(.data1)
}
RW_DATA_SECTION(SMP_CACHE_BYTES, 0, THREAD_SIZE)
/* End of data section */
_edata = .;
.fixup : {
__start___fixup = .;
*(.fixup)
__stop___fixup = .;
}
EXCEPTION_TABLE(16)
NOTES
. = ALIGN(PAGE_SIZE);
__init_begin = ALIGN(PAGE_SIZE);
INIT_TEXT_SECTION(PAGE_SIZE)
__init_text_end = .;
INIT_DATA_SECTION(16)
. = ALIGN(4);
.tsb_ldquad_phys_patch : {
__tsb_ldquad_phys_patch = .;
*(.tsb_ldquad_phys_patch)
__tsb_ldquad_phys_patch_end = .;
}
.tsb_phys_patch : {
__tsb_phys_patch = .;
*(.tsb_phys_patch)
__tsb_phys_patch_end = .;
}
.cpuid_patch : {
__cpuid_patch = .;
*(.cpuid_patch)
__cpuid_patch_end = .;
}
.sun4v_1insn_patch : {
__sun4v_1insn_patch = .;
*(.sun4v_1insn_patch)
__sun4v_1insn_patch_end = .;
}
.sun4v_2insn_patch : {
__sun4v_2insn_patch = .;
*(.sun4v_2insn_patch)
__sun4v_2insn_patch_end = .;
}
PERCPU(SMP_CACHE_BYTES, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
BSS_SECTION(0, 0, 0)
_end = . ;
STABS_DEBUG
DWARF_DEBUG
DISCARDS
}