mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 08:44:21 +08:00
72aff53f1f
The sibling cpu bringup is extremely fragile. We can only perform the most basic calls until we take over the trap table from the firmware/hypervisor on the new cpu. This means no accesses to %g4, %g5, %g6 since those can't be TLB translated without our trap handlers. In order to achieve this: 1) Change sun4v_init_mondo_queues() so that it can operate in several modes. It can allocate the queues, or install them in the current processor, or both. The boot cpu does both in it's call early on. Later, the boot cpu allocates the sibling cpu queue, starts the sibling cpu, then the sibling cpu loads them in. 2) init_cur_cpu_trap() is changed to take the current_thread_info() as an argument instead of reading %g6 directly on the current cpu. 3) Create a trampoline stack for the sibling cpus. We do our basic kernel calls using this stack, which is locked into the kernel image, then go to our proper thread stack after taking over the trap table. 4) While we are in this delicate startup state, we put 0xdeadbeef into %g4/%g5/%g6 in order to catch accidental accesses. 5) On the final prom_set_trap_table*() call, we put &init_thread_union into %g6. This is a hack to make prom_world(0) work. All that wants to do is restore the %asi register using get_thread_current_ds(). Longer term we should just do the OBP calls to set the trap table by hand just like we do for everything else. This would avoid that silly prom_world(0) issue, then we can remove the init_thread_union hack. Signed-off-by: David S. Miller <davem@davemloft.net>
451 lines
10 KiB
ArmAsm
451 lines
10 KiB
ArmAsm
/* $Id: trampoline.S,v 1.26 2002/02/09 19:49:30 davem Exp $
|
|
* trampoline.S: Jump start slave processors on sparc64.
|
|
*
|
|
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
|
|
*/
|
|
|
|
#include <asm/head.h>
|
|
#include <asm/asi.h>
|
|
#include <asm/lsu.h>
|
|
#include <asm/dcr.h>
|
|
#include <asm/dcu.h>
|
|
#include <asm/pstate.h>
|
|
#include <asm/page.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/spitfire.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/mmu.h>
|
|
#include <asm/hypervisor.h>
|
|
#include <asm/cpudata.h>
|
|
|
|
.data
|
|
.align 8
|
|
call_method:
|
|
.asciz "call-method"
|
|
.align 8
|
|
itlb_load:
|
|
.asciz "SUNW,itlb-load"
|
|
.align 8
|
|
dtlb_load:
|
|
.asciz "SUNW,dtlb-load"
|
|
|
|
/* XXX __cpuinit this thing XXX */
|
|
#define TRAMP_STACK_SIZE 1024
|
|
.align 16
|
|
tramp_stack:
|
|
.skip TRAMP_STACK_SIZE
|
|
|
|
.text
|
|
.align 8
|
|
.globl sparc64_cpu_startup, sparc64_cpu_startup_end
|
|
sparc64_cpu_startup:
|
|
BRANCH_IF_SUN4V(g1, niagara_startup)
|
|
BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup)
|
|
BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup)
|
|
|
|
ba,pt %xcc, spitfire_startup
|
|
nop
|
|
|
|
cheetah_plus_startup:
|
|
/* Preserve OBP chosen DCU and DCR register settings. */
|
|
ba,pt %xcc, cheetah_generic_startup
|
|
nop
|
|
|
|
cheetah_startup:
|
|
mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
|
|
wr %g1, %asr18
|
|
|
|
sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
|
|
or %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
|
|
sllx %g5, 32, %g5
|
|
or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
|
|
stxa %g5, [%g0] ASI_DCU_CONTROL_REG
|
|
membar #Sync
|
|
/* fallthru */
|
|
|
|
cheetah_generic_startup:
|
|
mov TSB_EXTENSION_P, %g3
|
|
stxa %g0, [%g3] ASI_DMMU
|
|
stxa %g0, [%g3] ASI_IMMU
|
|
membar #Sync
|
|
|
|
mov TSB_EXTENSION_S, %g3
|
|
stxa %g0, [%g3] ASI_DMMU
|
|
membar #Sync
|
|
|
|
mov TSB_EXTENSION_N, %g3
|
|
stxa %g0, [%g3] ASI_DMMU
|
|
stxa %g0, [%g3] ASI_IMMU
|
|
membar #Sync
|
|
/* fallthru */
|
|
|
|
niagara_startup:
|
|
/* Disable STICK_INT interrupts. */
|
|
sethi %hi(0x80000000), %g5
|
|
sllx %g5, 32, %g5
|
|
wr %g5, %asr25
|
|
|
|
ba,pt %xcc, startup_continue
|
|
nop
|
|
|
|
spitfire_startup:
|
|
mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1
|
|
stxa %g1, [%g0] ASI_LSU_CONTROL
|
|
membar #Sync
|
|
|
|
startup_continue:
|
|
sethi %hi(0x80000000), %g2
|
|
sllx %g2, 32, %g2
|
|
wr %g2, 0, %tick_cmpr
|
|
|
|
mov %o0, %l0
|
|
|
|
BRANCH_IF_SUN4V(g1, niagara_lock_tlb)
|
|
|
|
/* Call OBP by hand to lock KERNBASE into i/d tlbs.
|
|
* We lock 2 consequetive entries if we are 'bigkernel'.
|
|
*/
|
|
sethi %hi(prom_entry_lock), %g2
|
|
1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
|
|
membar #StoreLoad | #StoreStore
|
|
brnz,pn %g1, 1b
|
|
nop
|
|
|
|
sethi %hi(p1275buf), %g2
|
|
or %g2, %lo(p1275buf), %g2
|
|
ldx [%g2 + 0x10], %l2
|
|
add %l2, -(192 + 128), %sp
|
|
flushw
|
|
|
|
sethi %hi(call_method), %g2
|
|
or %g2, %lo(call_method), %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x00]
|
|
mov 5, %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x08]
|
|
mov 1, %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x10]
|
|
sethi %hi(itlb_load), %g2
|
|
or %g2, %lo(itlb_load), %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x18]
|
|
sethi %hi(prom_mmu_ihandle_cache), %g2
|
|
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x20]
|
|
sethi %hi(KERNBASE), %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x28]
|
|
sethi %hi(kern_locked_tte_data), %g2
|
|
ldx [%g2 + %lo(kern_locked_tte_data)], %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x30]
|
|
|
|
mov 15, %g2
|
|
BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
|
|
|
|
mov 63, %g2
|
|
1:
|
|
stx %g2, [%sp + 2047 + 128 + 0x38]
|
|
sethi %hi(p1275buf), %g2
|
|
or %g2, %lo(p1275buf), %g2
|
|
ldx [%g2 + 0x08], %o1
|
|
call %o1
|
|
add %sp, (2047 + 128), %o0
|
|
|
|
sethi %hi(bigkernel), %g2
|
|
lduw [%g2 + %lo(bigkernel)], %g2
|
|
brz,pt %g2, do_dtlb
|
|
nop
|
|
|
|
sethi %hi(call_method), %g2
|
|
or %g2, %lo(call_method), %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x00]
|
|
mov 5, %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x08]
|
|
mov 1, %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x10]
|
|
sethi %hi(itlb_load), %g2
|
|
or %g2, %lo(itlb_load), %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x18]
|
|
sethi %hi(prom_mmu_ihandle_cache), %g2
|
|
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x20]
|
|
sethi %hi(KERNBASE + 0x400000), %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x28]
|
|
sethi %hi(kern_locked_tte_data), %g2
|
|
ldx [%g2 + %lo(kern_locked_tte_data)], %g2
|
|
sethi %hi(0x400000), %g1
|
|
add %g2, %g1, %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x30]
|
|
|
|
mov 14, %g2
|
|
BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
|
|
|
|
mov 62, %g2
|
|
1:
|
|
stx %g2, [%sp + 2047 + 128 + 0x38]
|
|
sethi %hi(p1275buf), %g2
|
|
or %g2, %lo(p1275buf), %g2
|
|
ldx [%g2 + 0x08], %o1
|
|
call %o1
|
|
add %sp, (2047 + 128), %o0
|
|
|
|
do_dtlb:
|
|
sethi %hi(call_method), %g2
|
|
or %g2, %lo(call_method), %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x00]
|
|
mov 5, %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x08]
|
|
mov 1, %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x10]
|
|
sethi %hi(dtlb_load), %g2
|
|
or %g2, %lo(dtlb_load), %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x18]
|
|
sethi %hi(prom_mmu_ihandle_cache), %g2
|
|
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x20]
|
|
sethi %hi(KERNBASE), %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x28]
|
|
sethi %hi(kern_locked_tte_data), %g2
|
|
ldx [%g2 + %lo(kern_locked_tte_data)], %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x30]
|
|
|
|
mov 15, %g2
|
|
BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
|
|
|
|
mov 63, %g2
|
|
1:
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x38]
|
|
sethi %hi(p1275buf), %g2
|
|
or %g2, %lo(p1275buf), %g2
|
|
ldx [%g2 + 0x08], %o1
|
|
call %o1
|
|
add %sp, (2047 + 128), %o0
|
|
|
|
sethi %hi(bigkernel), %g2
|
|
lduw [%g2 + %lo(bigkernel)], %g2
|
|
brz,pt %g2, do_unlock
|
|
nop
|
|
|
|
sethi %hi(call_method), %g2
|
|
or %g2, %lo(call_method), %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x00]
|
|
mov 5, %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x08]
|
|
mov 1, %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x10]
|
|
sethi %hi(dtlb_load), %g2
|
|
or %g2, %lo(dtlb_load), %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x18]
|
|
sethi %hi(prom_mmu_ihandle_cache), %g2
|
|
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x20]
|
|
sethi %hi(KERNBASE + 0x400000), %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x28]
|
|
sethi %hi(kern_locked_tte_data), %g2
|
|
ldx [%g2 + %lo(kern_locked_tte_data)], %g2
|
|
sethi %hi(0x400000), %g1
|
|
add %g2, %g1, %g2
|
|
stx %g2, [%sp + 2047 + 128 + 0x30]
|
|
|
|
mov 14, %g2
|
|
BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
|
|
|
|
mov 62, %g2
|
|
1:
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x38]
|
|
sethi %hi(p1275buf), %g2
|
|
or %g2, %lo(p1275buf), %g2
|
|
ldx [%g2 + 0x08], %o1
|
|
call %o1
|
|
add %sp, (2047 + 128), %o0
|
|
|
|
do_unlock:
|
|
sethi %hi(prom_entry_lock), %g2
|
|
stb %g0, [%g2 + %lo(prom_entry_lock)]
|
|
membar #StoreStore | #StoreLoad
|
|
|
|
ba,pt %xcc, after_lock_tlb
|
|
nop
|
|
|
|
niagara_lock_tlb:
|
|
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
|
|
sethi %hi(KERNBASE), %o0
|
|
clr %o1
|
|
sethi %hi(kern_locked_tte_data), %o2
|
|
ldx [%o2 + %lo(kern_locked_tte_data)], %o2
|
|
mov HV_MMU_IMMU, %o3
|
|
ta HV_FAST_TRAP
|
|
|
|
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
|
|
sethi %hi(KERNBASE), %o0
|
|
clr %o1
|
|
sethi %hi(kern_locked_tte_data), %o2
|
|
ldx [%o2 + %lo(kern_locked_tte_data)], %o2
|
|
mov HV_MMU_DMMU, %o3
|
|
ta HV_FAST_TRAP
|
|
|
|
sethi %hi(bigkernel), %g2
|
|
lduw [%g2 + %lo(bigkernel)], %g2
|
|
brz,pt %g2, after_lock_tlb
|
|
nop
|
|
|
|
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
|
|
sethi %hi(KERNBASE + 0x400000), %o0
|
|
clr %o1
|
|
sethi %hi(kern_locked_tte_data), %o2
|
|
ldx [%o2 + %lo(kern_locked_tte_data)], %o2
|
|
sethi %hi(0x400000), %o3
|
|
add %o2, %o3, %o2
|
|
mov HV_MMU_IMMU, %o3
|
|
ta HV_FAST_TRAP
|
|
|
|
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
|
|
sethi %hi(KERNBASE + 0x400000), %o0
|
|
clr %o1
|
|
sethi %hi(kern_locked_tte_data), %o2
|
|
ldx [%o2 + %lo(kern_locked_tte_data)], %o2
|
|
sethi %hi(0x400000), %o3
|
|
add %o2, %o3, %o2
|
|
mov HV_MMU_DMMU, %o3
|
|
ta HV_FAST_TRAP
|
|
|
|
after_lock_tlb:
|
|
wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
|
|
wr %g0, 0, %fprs
|
|
|
|
wr %g0, ASI_P, %asi
|
|
|
|
mov PRIMARY_CONTEXT, %g7
|
|
|
|
661: stxa %g0, [%g7] ASI_DMMU
|
|
.section .sun4v_1insn_patch, "ax"
|
|
.word 661b
|
|
stxa %g0, [%g7] ASI_MMU
|
|
.previous
|
|
|
|
membar #Sync
|
|
mov SECONDARY_CONTEXT, %g7
|
|
|
|
661: stxa %g0, [%g7] ASI_DMMU
|
|
.section .sun4v_1insn_patch, "ax"
|
|
.word 661b
|
|
stxa %g0, [%g7] ASI_MMU
|
|
.previous
|
|
|
|
membar #Sync
|
|
|
|
/* Everything we do here, until we properly take over the
|
|
* trap table, must be done with extreme care. We cannot
|
|
* make any references to %g6 (current thread pointer),
|
|
* %g4 (current task pointer), or %g5 (base of current cpu's
|
|
* per-cpu area) until we properly take over the trap table
|
|
* from the firmware and hypervisor.
|
|
*
|
|
* Get onto temporary stack which is in the locked kernel image.
|
|
*/
|
|
sethi %hi(tramp_stack), %g1
|
|
or %g1, %lo(tramp_stack), %g1
|
|
add %g1, TRAMP_STACK_SIZE, %g1
|
|
sub %g1, STACKFRAME_SZ + STACK_BIAS, %sp
|
|
mov 0, %fp
|
|
|
|
/* Put garbage in these registers to trap any access to them. */
|
|
set 0xdeadbeef, %g4
|
|
set 0xdeadbeef, %g5
|
|
set 0xdeadbeef, %g6
|
|
|
|
call init_irqwork_curcpu
|
|
nop
|
|
|
|
sethi %hi(tlb_type), %g3
|
|
lduw [%g3 + %lo(tlb_type)], %g2
|
|
cmp %g2, 3
|
|
bne,pt %icc, 1f
|
|
nop
|
|
|
|
call hard_smp_processor_id
|
|
nop
|
|
|
|
mov %o0, %o1
|
|
mov 0, %o0
|
|
mov 0, %o2
|
|
call sun4v_init_mondo_queues
|
|
mov 1, %o3
|
|
|
|
1: call init_cur_cpu_trap
|
|
ldx [%l0], %o0
|
|
|
|
/* Start using proper page size encodings in ctx register. */
|
|
sethi %hi(sparc64_kern_pri_context), %g3
|
|
ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
|
|
mov PRIMARY_CONTEXT, %g1
|
|
|
|
661: stxa %g2, [%g1] ASI_DMMU
|
|
.section .sun4v_1insn_patch, "ax"
|
|
.word 661b
|
|
stxa %g2, [%g1] ASI_MMU
|
|
.previous
|
|
|
|
membar #Sync
|
|
|
|
wrpr %g0, 0, %wstate
|
|
|
|
/* As a hack, put &init_thread_union into %g6.
|
|
* prom_world() loads from here to restore the %asi
|
|
* register.
|
|
*/
|
|
sethi %hi(init_thread_union), %g6
|
|
or %g6, %lo(init_thread_union), %g6
|
|
|
|
sethi %hi(is_sun4v), %o0
|
|
lduw [%o0 + %lo(is_sun4v)], %o0
|
|
brz,pt %o0, 1f
|
|
nop
|
|
|
|
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
|
|
add %g2, TRAP_PER_CPU_FAULT_INFO, %g2
|
|
stxa %g2, [%g0] ASI_SCRATCHPAD
|
|
|
|
/* Compute physical address:
|
|
*
|
|
* paddr = kern_base + (mmfsa_vaddr - KERNBASE)
|
|
*/
|
|
sethi %hi(KERNBASE), %g3
|
|
sub %g2, %g3, %g2
|
|
sethi %hi(kern_base), %g3
|
|
ldx [%g3 + %lo(kern_base)], %g3
|
|
add %g2, %g3, %o1
|
|
|
|
call prom_set_trap_table_sun4v
|
|
sethi %hi(sparc64_ttable_tl0), %o0
|
|
|
|
ba,pt %xcc, 2f
|
|
nop
|
|
|
|
1: call prom_set_trap_table
|
|
sethi %hi(sparc64_ttable_tl0), %o0
|
|
|
|
2: ldx [%l0], %g6
|
|
ldx [%g6 + TI_TASK], %g4
|
|
|
|
mov 1, %g5
|
|
sllx %g5, THREAD_SHIFT, %g5
|
|
sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
|
|
add %g6, %g5, %sp
|
|
mov 0, %fp
|
|
|
|
rdpr %pstate, %o1
|
|
or %o1, PSTATE_IE, %o1
|
|
wrpr %o1, 0, %pstate
|
|
|
|
call smp_callin
|
|
nop
|
|
call cpu_idle
|
|
mov 0, %o0
|
|
call cpu_panic
|
|
nop
|
|
1: b,a,pt %xcc, 1b
|
|
|
|
.align 8
|
|
sparc64_cpu_startup_end:
|