mirror of
https://github.com/u-boot/u-boot.git
synced 2024-12-11 13:43:27 +08:00
x86: Add support for starting 64-bit kernel
Add code to jump to a 64-bit Linux kernel. We need to set up a flat page table structure, a new GDT and then go through a few hoops in the right order. Signed-off-by: Simon Glass <sjg@chromium.org>
This commit is contained in:
parent
92cc94a1fe
commit
200182a748
@ -10,4 +10,4 @@
|
||||
|
||||
extra-y = start.o
|
||||
obj-$(CONFIG_X86_RESET_VECTOR) += resetvec.o start16.o
|
||||
obj-y += interrupts.o cpu.o
|
||||
obj-y += interrupts.o cpu.o call64.o
|
||||
|
93
arch/x86/cpu/call64.S
Normal file
93
arch/x86/cpu/call64.S
Normal file
@ -0,0 +1,93 @@
|
||||
/*
|
||||
* (C) Copyright 2014 Google, Inc
|
||||
* Copyright (C) 1991, 1992, 1993 Linus Torvalds
|
||||
*
|
||||
* Parts of this copied from Linux arch/x86/boot/compressed/head_64.S
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0+
|
||||
*/
|
||||
|
||||
#include <asm/global_data.h>
|
||||
#include <asm/msr-index.h>
|
||||
#include <asm/processor-flags.h>
|
||||
|
||||
.code32
|
||||
.globl cpu_call64
|
||||
cpu_call64:
|
||||
/*
|
||||
* cpu_call64(ulong pgtable, ulong setup_base, ulong target)
|
||||
*
|
||||
* eax - pgtable
|
||||
* edx - setup_base
|
||||
* ecx - target
|
||||
*/
|
||||
cli
|
||||
push %ecx /* arg2 = target */
|
||||
push %edx /* arg1 = setup_base */
|
||||
mov %eax, %ebx
|
||||
|
||||
/* Load new GDT with the 64bit segments using 32bit descriptor */
|
||||
leal gdt, %eax
|
||||
movl %eax, gdt+2
|
||||
lgdt gdt
|
||||
|
||||
/* Enable PAE mode */
|
||||
movl $(X86_CR4_PAE), %eax
|
||||
movl %eax, %cr4
|
||||
|
||||
/* Enable the boot page tables */
|
||||
leal (%ebx), %eax
|
||||
movl %eax, %cr3
|
||||
|
||||
/* Enable Long mode in EFER (Extended Feature Enable Register) */
|
||||
movl $MSR_EFER, %ecx
|
||||
rdmsr
|
||||
btsl $_EFER_LME, %eax
|
||||
wrmsr
|
||||
|
||||
/* After gdt is loaded */
|
||||
xorl %eax, %eax
|
||||
lldt %ax
|
||||
movl $0x20, %eax
|
||||
ltr %ax
|
||||
|
||||
/*
|
||||
* Setup for the jump to 64bit mode
|
||||
*
|
||||
* When the jump is performed we will be in long mode but
|
||||
* in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
|
||||
* (and in turn EFER.LMA = 1). To jump into 64bit mode we use
|
||||
* the new gdt/idt that has __KERNEL_CS with CS.L = 1.
|
||||
* We place all of the values on our mini stack so lret can
|
||||
* used to perform that far jump. See the gdt below.
|
||||
*/
|
||||
pop %esi /* setup_base */
|
||||
|
||||
pushl $0x10
|
||||
leal lret_target, %eax
|
||||
pushl %eax
|
||||
|
||||
/* Enter paged protected Mode, activating Long Mode */
|
||||
movl $(X86_CR0_PG | X86_CR0_PE), %eax
|
||||
movl %eax, %cr0
|
||||
|
||||
/* Jump from 32bit compatibility mode into 64bit mode. */
|
||||
lret
|
||||
|
||||
code64:
|
||||
lret_target:
|
||||
pop %eax /* target */
|
||||
mov %eax, %eax /* Clear bits 63:32 */
|
||||
jmp *%eax /* Jump to the 64-bit target */
|
||||
|
||||
.data
|
||||
gdt:
|
||||
.word gdt_end - gdt
|
||||
.long gdt
|
||||
.word 0
|
||||
.quad 0x0000000000000000 /* NULL descriptor */
|
||||
.quad 0x00af9a000000ffff /* __KERNEL_CS */
|
||||
.quad 0x00cf92000000ffff /* __KERNEL_DS */
|
||||
.quad 0x0080890000000000 /* TS descriptor */
|
||||
.quad 0x0000000000000000 /* TS continued */
|
||||
gdt_end:
|
@ -18,7 +18,10 @@
|
||||
|
||||
#include <common.h>
|
||||
#include <command.h>
|
||||
#include <errno.h>
|
||||
#include <malloc.h>
|
||||
#include <asm/control_regs.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/processor-flags.h>
|
||||
#include <asm/interrupt.h>
|
||||
@ -339,3 +342,45 @@ int print_cpuinfo(void)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define PAGETABLE_SIZE (6 * 4096)
|
||||
|
||||
/**
|
||||
* build_pagetable() - build a flat 4GiB page table structure for 64-bti mode
|
||||
*
|
||||
* @pgtable: Pointer to a 24iKB block of memory
|
||||
*/
|
||||
static void build_pagetable(uint32_t *pgtable)
|
||||
{
|
||||
uint i;
|
||||
|
||||
memset(pgtable, '\0', PAGETABLE_SIZE);
|
||||
|
||||
/* Level 4 needs a single entry */
|
||||
pgtable[0] = (uint32_t)&pgtable[1024] + 7;
|
||||
|
||||
/* Level 3 has one 64-bit entry for each GiB of memory */
|
||||
for (i = 0; i < 4; i++) {
|
||||
pgtable[1024 + i * 2] = (uint32_t)&pgtable[2048] +
|
||||
0x1000 * i + 7;
|
||||
}
|
||||
|
||||
/* Level 2 has 2048 64-bit entries, each repesenting 2MiB */
|
||||
for (i = 0; i < 2048; i++)
|
||||
pgtable[2048 + i * 2] = 0x183 + (i << 21UL);
|
||||
}
|
||||
|
||||
int cpu_jump_to_64bit(ulong setup_base, ulong target)
|
||||
{
|
||||
uint32_t *pgtable;
|
||||
|
||||
pgtable = memalign(4096, PAGETABLE_SIZE);
|
||||
if (!pgtable)
|
||||
return -ENOMEM;
|
||||
|
||||
build_pagetable(pgtable);
|
||||
cpu_call64((ulong)pgtable, setup_base, target);
|
||||
free(pgtable);
|
||||
|
||||
return -EFAULT;
|
||||
}
|
||||
|
@ -26,4 +26,30 @@ void cpu_disable_paging_pae(void);
|
||||
*/
|
||||
int cpu_has_64bit(void);
|
||||
|
||||
/**
|
||||
* cpu_call64() - Jump to a 64-bit Linux kernel (internal function)
|
||||
*
|
||||
* The kernel is uncompressed and the 64-bit entry point is expected to be
|
||||
* at @target.
|
||||
*
|
||||
* This function is used internally - see cpu_jump_to_64bit() for a more
|
||||
* useful function.
|
||||
*
|
||||
* @pgtable: Address of 24KB area containing the page table
|
||||
* @setup_base: Pointer to the setup.bin information for the kernel
|
||||
* @target: Pointer to the start of the kernel image
|
||||
*/
|
||||
void cpu_call64(ulong pgtable, ulong setup_base, ulong target);
|
||||
|
||||
/**
|
||||
* cpu_jump_to_64bit() - Jump to a 64-bit Linux kernel
|
||||
*
|
||||
* The kernel is uncompressed and the 64-bit entry point is expected to be
|
||||
* at @target.
|
||||
*
|
||||
* @setup_base: Pointer to the setup.bin information for the kernel
|
||||
* @target: Pointer to the start of the kernel image
|
||||
*/
|
||||
int cpu_jump_to_64bit(ulong setup_base, ulong target);
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user