x86: rework __per_cpu_load adjustments

Impact: cleanup

Use cpu_number to determine if the adjustment is necessary.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Brian Gerst 2009-01-19 12:21:28 +09:00 committed by Tejun Heo
parent 0bd74fa8e2
commit 8c7e58e690

View File

@ -207,19 +207,15 @@ ENTRY(secondary_startup_64)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* early_gdt_base should point to the gdt_page in static percpu init * Fix up static pointers that need __per_cpu_load added. The assembler
* data area. Computing this requires two symbols - __per_cpu_load * is unable to do this directly. This is only needed for the boot cpu.
* and per_cpu__gdt_page. As linker can't do no such relocation, do * These values are set up with the correct base addresses by C code for
* it by hand. As early_gdt_descr is manipulated by C code for * secondary cpus.
* secondary CPUs, this should be done only once for the boot CPU
* when early_gdt_descr_base contains zero.
*/ */
movq early_gdt_descr_base(%rip), %rax movq initial_gs(%rip), %rax
testq %rax, %rax cmpl $0, per_cpu__cpu_number(%rax)
jnz 1f jne 1f
movq $__per_cpu_load, %rax addq %rax, early_gdt_descr_base(%rip)
addq $per_cpu__gdt_page, %rax
movq %rax, early_gdt_descr_base(%rip)
1: 1:
#endif #endif
/* /*
@ -431,12 +427,8 @@ NEXT_PAGE(level2_spare_pgt)
.globl early_gdt_descr .globl early_gdt_descr
early_gdt_descr: early_gdt_descr:
.word GDT_ENTRIES*8-1 .word GDT_ENTRIES*8-1
#ifdef CONFIG_SMP
early_gdt_descr_base: early_gdt_descr_base:
.quad 0x0000000000000000
#else
.quad per_cpu__gdt_page .quad per_cpu__gdt_page
#endif
ENTRY(phys_base) ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */ /* This must match the first entry in level2_kernel_pgt */