mirror of
https://github.com/qemu/qemu.git
synced 2024-11-24 19:33:39 +08:00
ppc: Use split I/D mmu modes to avoid flushes on interrupts
We rework the way the MMU indices are calculated, providing separate indices for I and D side based on MSR:IR and MSR:DR respectively, and thus no longer need to flush the TLB on context changes. This also adds correct support for HV as a separate address space. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
parent
5fd1111b20
commit
9fb0449114
@ -359,6 +359,8 @@ struct ppc_slb_t {
|
||||
#define MSR_EP 6 /* Exception prefix on 601 */
|
||||
#define MSR_IR 5 /* Instruction relocate */
|
||||
#define MSR_DR 4 /* Data relocate */
|
||||
#define MSR_IS 5 /* Instruction address space (BookE) */
|
||||
#define MSR_DS 4 /* Data address space (BookE) */
|
||||
#define MSR_PE 3 /* Protection enable on 403 */
|
||||
#define MSR_PX 2 /* Protection exclusive on 403 x */
|
||||
#define MSR_PMM 2 /* Performance monitor mark on POWER x */
|
||||
@ -410,6 +412,8 @@ struct ppc_slb_t {
|
||||
#define msr_ep ((env->msr >> MSR_EP) & 1)
|
||||
#define msr_ir ((env->msr >> MSR_IR) & 1)
|
||||
#define msr_dr ((env->msr >> MSR_DR) & 1)
|
||||
#define msr_is ((env->msr >> MSR_IS) & 1)
|
||||
#define msr_ds ((env->msr >> MSR_DS) & 1)
|
||||
#define msr_pe ((env->msr >> MSR_PE) & 1)
|
||||
#define msr_px ((env->msr >> MSR_PX) & 1)
|
||||
#define msr_pmm ((env->msr >> MSR_PMM) & 1)
|
||||
@ -889,7 +893,7 @@ struct ppc_segment_page_sizes {
|
||||
|
||||
/*****************************************************************************/
|
||||
/* The whole PowerPC CPU context */
|
||||
#define NB_MMU_MODES 3
|
||||
#define NB_MMU_MODES 8
|
||||
|
||||
#define PPC_CPU_OPCODES_LEN 0x40
|
||||
#define PPC_CPU_INDIRECT_OPCODES_LEN 0x20
|
||||
@ -1053,7 +1057,8 @@ struct CPUPPCState {
|
||||
/* Those resources are used only in QEMU core */
|
||||
target_ulong hflags; /* hflags is a MSR & HFLAGS_MASK */
|
||||
target_ulong hflags_nmsr; /* specific hflags, not coming from MSR */
|
||||
int mmu_idx; /* precomputed MMU index to speed up mem accesses */
|
||||
int immu_idx; /* precomputed MMU index to speed up insn access */
|
||||
int dmmu_idx; /* precomputed MMU index to speed up data accesses */
|
||||
|
||||
/* Power management */
|
||||
int (*check_pow)(CPUPPCState *env);
|
||||
@ -1245,7 +1250,7 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
|
||||
#define MMU_USER_IDX 0
|
||||
static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
|
||||
{
|
||||
return env->mmu_idx;
|
||||
return ifetch ? env->immu_idx : env->dmmu_idx;
|
||||
}
|
||||
|
||||
#include "exec/cpu-all.h"
|
||||
|
@ -646,9 +646,6 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||
|
||||
if (env->spr[SPR_LPCR] & LPCR_AIL) {
|
||||
new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
|
||||
} else if (msr & ((1 << MSR_IR) | (1 << MSR_DR))) {
|
||||
/* If we disactivated any translation, flush TLBs */
|
||||
tlb_flush(cs, 1);
|
||||
}
|
||||
|
||||
#ifdef TARGET_PPC64
|
||||
@ -721,14 +718,6 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||
/* Reset exception state */
|
||||
cs->exception_index = POWERPC_EXCP_NONE;
|
||||
env->error_code = 0;
|
||||
|
||||
if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
|
||||
(env->mmu_model == POWERPC_MMU_BOOKE206)) {
|
||||
/* XXX: The BookE changes address space when switching modes,
|
||||
we should probably implement that as different MMU indexes,
|
||||
but for the moment we do it the slow way and flush all. */
|
||||
tlb_flush(cs, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void ppc_cpu_do_interrupt(CPUState *cs)
|
||||
|
@ -41,11 +41,50 @@ static inline void hreg_swap_gpr_tgpr(CPUPPCState *env)
|
||||
|
||||
static inline void hreg_compute_mem_idx(CPUPPCState *env)
|
||||
{
|
||||
/* Precompute MMU index */
|
||||
if (msr_pr == 0 && msr_hv != 0) {
|
||||
env->mmu_idx = 2;
|
||||
/* This is our encoding for server processors
|
||||
*
|
||||
* 0 = Guest User space virtual mode
|
||||
* 1 = Guest Kernel space virtual mode
|
||||
* 2 = Guest Kernel space real mode
|
||||
* 3 = HV User space virtual mode
|
||||
* 4 = HV Kernel space virtual mode
|
||||
* 5 = HV Kernel space real mode
|
||||
*
|
||||
* The combination PR=1 IR&DR=0 is invalid, we will treat
|
||||
* it as IR=DR=1
|
||||
*
|
||||
* For BookE, we need 8 MMU modes as follow:
|
||||
*
|
||||
* 0 = AS 0 HV User space
|
||||
* 1 = AS 0 HV Kernel space
|
||||
* 2 = AS 1 HV User space
|
||||
* 3 = AS 1 HV Kernel space
|
||||
* 4 = AS 0 Guest User space
|
||||
* 5 = AS 0 Guest Kernel space
|
||||
* 6 = AS 1 Guest User space
|
||||
* 7 = AS 1 Guest Kernel space
|
||||
*/
|
||||
if (env->mmu_model & POWERPC_MMU_BOOKE) {
|
||||
env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1;
|
||||
env->immu_idx += msr_is ? 2 : 0;
|
||||
env->dmmu_idx += msr_ds ? 2 : 0;
|
||||
env->immu_idx += msr_gs ? 4 : 0;
|
||||
env->dmmu_idx += msr_gs ? 4 : 0;
|
||||
} else {
|
||||
env->mmu_idx = 1 - msr_pr;
|
||||
/* First calucalte a base value independent of HV */
|
||||
if (msr_pr != 0) {
|
||||
/* User space, ignore IR and DR */
|
||||
env->immu_idx = env->dmmu_idx = 0;
|
||||
} else {
|
||||
/* Kernel, setup a base I/D value */
|
||||
env->immu_idx = msr_ir ? 1 : 2;
|
||||
env->dmmu_idx = msr_dr ? 1 : 2;
|
||||
}
|
||||
/* Then offset it for HV */
|
||||
if (msr_hv) {
|
||||
env->immu_idx += 3;
|
||||
env->dmmu_idx += 3;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -82,9 +121,10 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
|
||||
}
|
||||
if (((value >> MSR_IR) & 1) != msr_ir ||
|
||||
((value >> MSR_DR) & 1) != msr_dr) {
|
||||
/* Flush all tlb when changing translation mode */
|
||||
tlb_flush(cs, 1);
|
||||
excp = POWERPC_EXCP_NONE;
|
||||
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
|
||||
}
|
||||
if ((env->mmu_model & POWERPC_MMU_BOOKE) &&
|
||||
((value >> MSR_GS) & 1) != msr_gs) {
|
||||
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
|
||||
}
|
||||
if (unlikely((env->flags & POWERPC_FLAG_TGPR) &&
|
||||
|
@ -97,9 +97,12 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
|
||||
qemu_get_betls(f, &env->nip);
|
||||
qemu_get_betls(f, &env->hflags);
|
||||
qemu_get_betls(f, &env->hflags_nmsr);
|
||||
qemu_get_sbe32s(f, &env->mmu_idx);
|
||||
qemu_get_sbe32(f); /* Discard unused mmu_idx */
|
||||
qemu_get_sbe32(f); /* Discard unused power_mode */
|
||||
|
||||
/* Recompute mmu indices */
|
||||
hreg_compute_mem_idx(env);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -11220,8 +11220,9 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
|
||||
env->nip, env->lr, env->ctr, cpu_read_xer(env),
|
||||
cs->cpu_index);
|
||||
cpu_fprintf(f, "MSR " TARGET_FMT_lx " HID0 " TARGET_FMT_lx " HF "
|
||||
TARGET_FMT_lx " idx %d\n", env->msr, env->spr[SPR_HID0],
|
||||
env->hflags, env->mmu_idx);
|
||||
TARGET_FMT_lx " iidx %d didx %d\n",
|
||||
env->msr, env->spr[SPR_HID0],
|
||||
env->hflags, env->immu_idx, env->dmmu_idx);
|
||||
#if !defined(NO_TIMER_DUMP)
|
||||
cpu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
@ -11428,7 +11429,7 @@ void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb)
|
||||
ctx.spr_cb = env->spr_cb;
|
||||
ctx.pr = msr_pr;
|
||||
ctx.hv = !msr_pr && msr_hv;
|
||||
ctx.mem_idx = env->mmu_idx;
|
||||
ctx.mem_idx = env->dmmu_idx;
|
||||
ctx.insns_flags = env->insns_flags;
|
||||
ctx.insns_flags2 = env->insns_flags2;
|
||||
ctx.access_type = -1;
|
||||
|
Loading…
Reference in New Issue
Block a user