mirror of
https://github.com/qemu/qemu.git
synced 2024-11-23 19:03:38 +08:00
Replace is_user variable with mmu_idx in softmmu core,
allowing support of more than 2 mmu access modes. Add backward compatibility is_user variable in targets code when needed. Implement per target cpu_mmu_index function, avoiding duplicated code and #ifdef TARGET_xxx in softmmu core functions. Implement per target mmu modes definitions. As an example, add PowerPC hypervisor mode definition and Alpha executive and kernel modes definitions. Optimize PowerPC case, precomputing mmu_idx when MSR register changes and using the same definition in code translation code. git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@3384 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
parent
d0f48074db
commit
6ebbf39000
@ -112,15 +112,6 @@ typedef struct CPUTLBEntry {
|
||||
target_phys_addr_t addend;
|
||||
} CPUTLBEntry;
|
||||
|
||||
/* Alpha has 4 different running levels */
|
||||
#if defined(TARGET_ALPHA)
|
||||
#define NB_MMU_MODES 4
|
||||
#elif defined(TARGET_PPC64H) /* PowerPC 64 with hypervisor mode support */
|
||||
#define NB_MMU_MODES 3
|
||||
#else
|
||||
#define NB_MMU_MODES 2
|
||||
#endif
|
||||
|
||||
#define CPU_COMMON \
|
||||
struct TranslationBlock *current_tb; /* currently executing TB */ \
|
||||
/* soft mmu support */ \
|
||||
|
19
cpu-exec.c
19
cpu-exec.c
@ -884,8 +884,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
|
||||
}
|
||||
|
||||
/* see if it is an MMU fault */
|
||||
ret = cpu_x86_handle_mmu_fault(env, address, is_write,
|
||||
((env->hflags & HF_CPL_MASK) == 3), 0);
|
||||
ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
|
||||
if (ret < 0)
|
||||
return 0; /* not an MMU fault */
|
||||
if (ret == 0)
|
||||
@ -934,7 +933,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
|
||||
return 1;
|
||||
}
|
||||
/* see if it is an MMU fault */
|
||||
ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
|
||||
ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
|
||||
if (ret < 0)
|
||||
return 0; /* not an MMU fault */
|
||||
if (ret == 0)
|
||||
@ -970,7 +969,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
|
||||
return 1;
|
||||
}
|
||||
/* see if it is an MMU fault */
|
||||
ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
|
||||
ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
|
||||
if (ret < 0)
|
||||
return 0; /* not an MMU fault */
|
||||
if (ret == 0)
|
||||
@ -1007,7 +1006,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
|
||||
}
|
||||
|
||||
/* see if it is an MMU fault */
|
||||
ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
|
||||
ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
|
||||
if (ret < 0)
|
||||
return 0; /* not an MMU fault */
|
||||
if (ret == 0)
|
||||
@ -1056,7 +1055,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
|
||||
return 1;
|
||||
}
|
||||
/* see if it is an MMU fault */
|
||||
ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
|
||||
ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
|
||||
if (ret < 0)
|
||||
return 0; /* not an MMU fault */
|
||||
if (ret == 0)
|
||||
@ -1096,7 +1095,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
|
||||
}
|
||||
|
||||
/* see if it is an MMU fault */
|
||||
ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
|
||||
ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
|
||||
if (ret < 0)
|
||||
return 0; /* not an MMU fault */
|
||||
if (ret == 0)
|
||||
@ -1146,7 +1145,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
|
||||
}
|
||||
|
||||
/* see if it is an MMU fault */
|
||||
ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
|
||||
ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
|
||||
if (ret < 0)
|
||||
return 0; /* not an MMU fault */
|
||||
if (ret == 0)
|
||||
@ -1191,7 +1190,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
|
||||
}
|
||||
|
||||
/* see if it is an MMU fault */
|
||||
ret = cpu_alpha_handle_mmu_fault(env, address, is_write, 1, 0);
|
||||
ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
|
||||
if (ret < 0)
|
||||
return 0; /* not an MMU fault */
|
||||
if (ret == 0)
|
||||
@ -1235,7 +1234,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
|
||||
}
|
||||
|
||||
/* see if it is an MMU fault */
|
||||
ret = cpu_cris_handle_mmu_fault(env, address, is_write, 1, 0);
|
||||
ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
|
||||
if (ret < 0)
|
||||
return 0; /* not an MMU fault */
|
||||
if (ret == 0)
|
||||
|
40
exec-all.h
40
exec-all.h
@ -117,14 +117,14 @@ void tlb_flush_page(CPUState *env, target_ulong addr);
|
||||
void tlb_flush(CPUState *env, int flush_global);
|
||||
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
|
||||
target_phys_addr_t paddr, int prot,
|
||||
int is_user, int is_softmmu);
|
||||
int mmu_idx, int is_softmmu);
|
||||
static inline int tlb_set_page(CPUState *env, target_ulong vaddr,
|
||||
target_phys_addr_t paddr, int prot,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
if (prot & PAGE_READ)
|
||||
prot |= PAGE_EXEC;
|
||||
return tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
|
||||
return tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
|
||||
}
|
||||
|
||||
#define CODE_GEN_MAX_SIZE 65536
|
||||
@ -562,10 +562,10 @@ extern int tb_invalidated_flag;
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
void tlb_fill(target_ulong addr, int is_write, int is_user,
|
||||
void tlb_fill(target_ulong addr, int is_write, int mmu_idx,
|
||||
void *retaddr);
|
||||
|
||||
#define ACCESS_TYPE 3
|
||||
#define ACCESS_TYPE (NB_MMU_MODES + 1)
|
||||
#define MEMSUFFIX _code
|
||||
#define env cpu_single_env
|
||||
|
||||
@ -598,35 +598,15 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
|
||||
is the offset relative to phys_ram_base */
|
||||
static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
|
||||
{
|
||||
int is_user, index, pd;
|
||||
int mmu_idx, index, pd;
|
||||
|
||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
#if defined(TARGET_I386)
|
||||
is_user = ((env->hflags & HF_CPL_MASK) == 3);
|
||||
#elif defined (TARGET_PPC)
|
||||
is_user = msr_pr;
|
||||
#elif defined (TARGET_MIPS)
|
||||
is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM);
|
||||
#elif defined (TARGET_SPARC)
|
||||
is_user = (env->psrs == 0);
|
||||
#elif defined (TARGET_ARM)
|
||||
is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR);
|
||||
#elif defined (TARGET_SH4)
|
||||
is_user = ((env->sr & SR_MD) == 0);
|
||||
#elif defined (TARGET_ALPHA)
|
||||
is_user = ((env->ps >> 3) & 3);
|
||||
#elif defined (TARGET_M68K)
|
||||
is_user = ((env->sr & SR_S) == 0);
|
||||
#elif defined (TARGET_CRIS)
|
||||
is_user = (0);
|
||||
#else
|
||||
#error unimplemented CPU
|
||||
#endif
|
||||
if (__builtin_expect(env->tlb_table[is_user][index].addr_code !=
|
||||
mmu_idx = cpu_mmu_index(env);
|
||||
if (__builtin_expect(env->tlb_table[mmu_idx][index].addr_code !=
|
||||
(addr & TARGET_PAGE_MASK), 0)) {
|
||||
ldub_code(addr);
|
||||
}
|
||||
pd = env->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK;
|
||||
pd = env->tlb_table[mmu_idx][index].addr_code & ~TARGET_PAGE_MASK;
|
||||
if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
|
||||
#ifdef TARGET_SPARC
|
||||
do_unassigned_access(addr, 0, 1, 0);
|
||||
@ -634,7 +614,7 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
|
||||
cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
|
||||
#endif
|
||||
}
|
||||
return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base;
|
||||
return addr + env->tlb_table[mmu_idx][index].addend - (unsigned long)phys_ram_base;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
10
exec.c
10
exec.c
@ -1608,7 +1608,7 @@ static inline void tlb_set_dirty(CPUState *env,
|
||||
conflicting with the host address space). */
|
||||
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
|
||||
target_phys_addr_t paddr, int prot,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
PhysPageDesc *p;
|
||||
unsigned long pd;
|
||||
@ -1626,8 +1626,8 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
|
||||
pd = p->phys_offset;
|
||||
}
|
||||
#if defined(DEBUG_TLB)
|
||||
printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
|
||||
vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
|
||||
printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
|
||||
vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
|
||||
#endif
|
||||
|
||||
ret = 0;
|
||||
@ -1664,7 +1664,7 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
|
||||
|
||||
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
addend -= vaddr;
|
||||
te = &env->tlb_table[is_user][index];
|
||||
te = &env->tlb_table[mmu_idx][index];
|
||||
te->addend = addend;
|
||||
if (prot & PAGE_READ) {
|
||||
te->addr_read = address;
|
||||
@ -1790,7 +1790,7 @@ void tlb_flush_page(CPUState *env, target_ulong addr)
|
||||
|
||||
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
|
||||
target_phys_addr_t paddr, int prot,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -811,12 +811,14 @@ static int get_page_bits (CPUState *env)
|
||||
|
||||
static int get_pte (uint64_t *pfnp, int *zbitsp, int *protp,
|
||||
uint64_t ptebase, int page_bits, uint64_t level,
|
||||
int is_user, int rw)
|
||||
int mmu_idx, int rw)
|
||||
{
|
||||
uint64_t pteaddr, pte, pfn;
|
||||
uint8_t gh;
|
||||
int ure, uwe, kre, kwe, foE, foR, foW, v, ret, ar;
|
||||
int ure, uwe, kre, kwe, foE, foR, foW, v, ret, ar, is_user;
|
||||
|
||||
/* XXX: TOFIX */
|
||||
is_user = mmu_idx == MMU_USER_IDX;
|
||||
pteaddr = (ptebase << page_bits) + (8 * level);
|
||||
pte = ldq_raw(pteaddr);
|
||||
/* Decode all interresting PTE fields */
|
||||
@ -871,7 +873,7 @@ static int get_pte (uint64_t *pfnp, int *zbitsp, int *protp,
|
||||
|
||||
static int paddr_from_pte (uint64_t *paddr, int *zbitsp, int *prot,
|
||||
uint64_t ptebase, int page_bits,
|
||||
uint64_t vaddr, int is_user, int rw)
|
||||
uint64_t vaddr, int mmu_idx, int rw)
|
||||
{
|
||||
uint64_t pfn, page_mask, lvl_mask, level1, level2, level3;
|
||||
int lvl_bits, ret;
|
||||
@ -909,7 +911,7 @@ static int paddr_from_pte (uint64_t *paddr, int *zbitsp, int *prot,
|
||||
break;
|
||||
}
|
||||
/* Level 3 PTE */
|
||||
ret = get_pte(&pfn, zbitsp, prot, pfn, page_bits, level3, is_user, rw);
|
||||
ret = get_pte(&pfn, zbitsp, prot, pfn, page_bits, level3, mmu_idx, rw);
|
||||
if (ret & 0x1) {
|
||||
/* Translation not valid */
|
||||
ret = 1;
|
||||
@ -943,7 +945,7 @@ static int paddr_from_pte (uint64_t *paddr, int *zbitsp, int *prot,
|
||||
|
||||
static int virtual_to_physical (CPUState *env, uint64_t *physp,
|
||||
int *zbitsp, int *protp,
|
||||
uint64_t virtual, int is_user, int rw)
|
||||
uint64_t virtual, int mmu_idx, int rw)
|
||||
{
|
||||
uint64_t sva, ptebase;
|
||||
int seg, page_bits, ret;
|
||||
@ -961,16 +963,16 @@ static int virtual_to_physical (CPUState *env, uint64_t *physp,
|
||||
case 0:
|
||||
/* seg1: 3 levels of PTE */
|
||||
ret = paddr_from_pte(physp, zbitsp, protp, ptebase, page_bits,
|
||||
virtual, is_user, rw);
|
||||
virtual, mmu_idx, rw);
|
||||
break;
|
||||
case 1:
|
||||
/* seg1: 2 levels of PTE */
|
||||
ret = paddr_from_pte(physp, zbitsp, protp, ptebase, page_bits,
|
||||
virtual, is_user, rw);
|
||||
virtual, mmu_idx, rw);
|
||||
break;
|
||||
case 2:
|
||||
/* kernel segment */
|
||||
if (is_user) {
|
||||
if (mmu_idx != 0) {
|
||||
ret = 2;
|
||||
} else {
|
||||
*physp = virtual;
|
||||
@ -979,7 +981,7 @@ static int virtual_to_physical (CPUState *env, uint64_t *physp,
|
||||
case 3:
|
||||
/* seg1: TB mapped */
|
||||
ret = paddr_from_pte(physp, zbitsp, protp, ptebase, page_bits,
|
||||
virtual, is_user, rw);
|
||||
virtual, mmu_idx, rw);
|
||||
break;
|
||||
default:
|
||||
ret = 1;
|
||||
@ -991,7 +993,7 @@ static int virtual_to_physical (CPUState *env, uint64_t *physp,
|
||||
|
||||
/* XXX: code provision */
|
||||
int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
uint64_t physical, page_size, end;
|
||||
int prot, zbits, ret;
|
||||
@ -1000,7 +1002,7 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw,
|
||||
ret = 2;
|
||||
} else {
|
||||
ret = virtual_to_physical(env, &physical, &zbits, &prot,
|
||||
address, is_user, rw);
|
||||
address, mmu_idx, rw);
|
||||
}
|
||||
switch (ret) {
|
||||
case 0:
|
||||
@ -1009,7 +1011,7 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw,
|
||||
address &= ~(page_size - 1);
|
||||
for (end = physical + page_size; physical < end; physical += 0x1000) {
|
||||
ret = tlb_set_page(env, address, physical, prot,
|
||||
is_user, is_softmmu);
|
||||
mmu_idx, is_softmmu);
|
||||
address += 0x1000;
|
||||
}
|
||||
break;
|
||||
|
@ -1,10 +1,16 @@
|
||||
/* Common softmmu definitions and inline routines. */
|
||||
|
||||
#define ldul_user ldl_user
|
||||
#define ldul_kernel ldl_kernel
|
||||
/* XXX: find something cleaner.
|
||||
* Furthermore, this is false for 64 bits targets
|
||||
*/
|
||||
#define ldul_user ldl_user
|
||||
#define ldul_kernel ldl_kernel
|
||||
#define ldul_hypv ldl_hypv
|
||||
#define ldul_executive ldl_executive
|
||||
#define ldul_supervisor ldl_supervisor
|
||||
|
||||
#define ACCESS_TYPE 0
|
||||
#define MEMSUFFIX _kernel
|
||||
#define MEMSUFFIX MMU_MODE0_SUFFIX
|
||||
#define DATA_SIZE 1
|
||||
#include "softmmu_header.h"
|
||||
|
||||
@ -20,7 +26,7 @@
|
||||
#undef MEMSUFFIX
|
||||
|
||||
#define ACCESS_TYPE 1
|
||||
#define MEMSUFFIX _user
|
||||
#define MEMSUFFIX MMU_MODE1_SUFFIX
|
||||
#define DATA_SIZE 1
|
||||
#include "softmmu_header.h"
|
||||
|
||||
@ -35,8 +41,50 @@
|
||||
#undef ACCESS_TYPE
|
||||
#undef MEMSUFFIX
|
||||
|
||||
/* these access are slower, they must be as rare as possible */
|
||||
#if (NB_MMU_MODES >= 3)
|
||||
|
||||
#define ACCESS_TYPE 2
|
||||
#define MEMSUFFIX MMU_MODE2_SUFFIX
|
||||
#define DATA_SIZE 1
|
||||
#include "softmmu_header.h"
|
||||
|
||||
#define DATA_SIZE 2
|
||||
#include "softmmu_header.h"
|
||||
|
||||
#define DATA_SIZE 4
|
||||
#include "softmmu_header.h"
|
||||
|
||||
#define DATA_SIZE 8
|
||||
#include "softmmu_header.h"
|
||||
#undef ACCESS_TYPE
|
||||
#undef MEMSUFFIX
|
||||
|
||||
#if (NB_MMU_MODES >= 4)
|
||||
|
||||
#define ACCESS_TYPE 3
|
||||
#define MEMSUFFIX MMU_MODE3_SUFFIX
|
||||
#define DATA_SIZE 1
|
||||
#include "softmmu_header.h"
|
||||
|
||||
#define DATA_SIZE 2
|
||||
#include "softmmu_header.h"
|
||||
|
||||
#define DATA_SIZE 4
|
||||
#include "softmmu_header.h"
|
||||
|
||||
#define DATA_SIZE 8
|
||||
#include "softmmu_header.h"
|
||||
#undef ACCESS_TYPE
|
||||
#undef MEMSUFFIX
|
||||
|
||||
#if (NB_MMU_MODES > 4)
|
||||
#error "NB_MMU_MODES > 4 is not supported for now"
|
||||
#endif /* (NB_MMU_MODES > 4) */
|
||||
#endif /* (NB_MMU_MODES == 4) */
|
||||
#endif /* (NB_MMU_MODES >= 3) */
|
||||
|
||||
/* these access are slower, they must be as rare as possible */
|
||||
#define ACCESS_TYPE (NB_MMU_MODES)
|
||||
#define MEMSUFFIX _data
|
||||
#define DATA_SIZE 1
|
||||
#include "softmmu_header.h"
|
||||
|
119
softmmu_header.h
119
softmmu_header.h
@ -39,66 +39,19 @@
|
||||
#error unsupported data size
|
||||
#endif
|
||||
|
||||
#if ACCESS_TYPE == 0
|
||||
#if ACCESS_TYPE < (NB_MMU_MODES)
|
||||
|
||||
#define CPU_MEM_INDEX 0
|
||||
#define CPU_MMU_INDEX ACCESS_TYPE
|
||||
#define MMUSUFFIX _mmu
|
||||
|
||||
#elif ACCESS_TYPE == 1
|
||||
#elif ACCESS_TYPE == (NB_MMU_MODES)
|
||||
|
||||
#define CPU_MEM_INDEX 1
|
||||
#define CPU_MMU_INDEX (cpu_mmu_index(env))
|
||||
#define MMUSUFFIX _mmu
|
||||
|
||||
#elif ACCESS_TYPE == 2
|
||||
#elif ACCESS_TYPE == (NB_MMU_MODES + 1)
|
||||
|
||||
#ifdef TARGET_I386
|
||||
#define CPU_MEM_INDEX ((env->hflags & HF_CPL_MASK) == 3)
|
||||
#elif defined (TARGET_PPC)
|
||||
#define CPU_MEM_INDEX (msr_pr)
|
||||
#elif defined (TARGET_MIPS)
|
||||
#define CPU_MEM_INDEX ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM)
|
||||
#elif defined (TARGET_SPARC)
|
||||
#define CPU_MEM_INDEX ((env->psrs) == 0)
|
||||
#elif defined (TARGET_ARM)
|
||||
#define CPU_MEM_INDEX ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)
|
||||
#elif defined (TARGET_SH4)
|
||||
#define CPU_MEM_INDEX ((env->sr & SR_MD) == 0)
|
||||
#elif defined (TARGET_ALPHA)
|
||||
#define CPU_MEM_INDEX ((env->ps >> 3) & 3)
|
||||
#elif defined (TARGET_M68K)
|
||||
#define CPU_MEM_INDEX ((env->sr & SR_S) == 0)
|
||||
#elif defined (TARGET_CRIS)
|
||||
/* CRIS FIXME: I guess we want to validate supervisor mode acceses here. */
|
||||
#define CPU_MEM_INDEX (0)
|
||||
#else
|
||||
#error unsupported CPU
|
||||
#endif
|
||||
#define MMUSUFFIX _mmu
|
||||
|
||||
#elif ACCESS_TYPE == 3
|
||||
|
||||
#ifdef TARGET_I386
|
||||
#define CPU_MEM_INDEX ((env->hflags & HF_CPL_MASK) == 3)
|
||||
#elif defined (TARGET_PPC)
|
||||
#define CPU_MEM_INDEX (msr_pr)
|
||||
#elif defined (TARGET_MIPS)
|
||||
#define CPU_MEM_INDEX ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM)
|
||||
#elif defined (TARGET_SPARC)
|
||||
#define CPU_MEM_INDEX ((env->psrs) == 0)
|
||||
#elif defined (TARGET_ARM)
|
||||
#define CPU_MEM_INDEX ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)
|
||||
#elif defined (TARGET_SH4)
|
||||
#define CPU_MEM_INDEX ((env->sr & SR_MD) == 0)
|
||||
#elif defined (TARGET_ALPHA)
|
||||
#define CPU_MEM_INDEX ((env->ps >> 3) & 3)
|
||||
#elif defined (TARGET_M68K)
|
||||
#define CPU_MEM_INDEX ((env->sr & SR_S) == 0)
|
||||
#elif defined (TARGET_CRIS)
|
||||
/* CRIS FIXME: I guess we want to validate supervisor mode acceses here. */
|
||||
#define CPU_MEM_INDEX (0)
|
||||
#else
|
||||
#error unsupported CPU
|
||||
#endif
|
||||
#define CPU_MMU_INDEX (cpu_mmu_index(env))
|
||||
#define MMUSUFFIX _cmmu
|
||||
|
||||
#else
|
||||
@ -111,18 +64,18 @@
|
||||
#define RES_TYPE int
|
||||
#endif
|
||||
|
||||
#if ACCESS_TYPE == 3
|
||||
#if ACCESS_TYPE == (NB_MMU_MODES + 1)
|
||||
#define ADDR_READ addr_code
|
||||
#else
|
||||
#define ADDR_READ addr_read
|
||||
#endif
|
||||
|
||||
DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
int is_user);
|
||||
void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE v, int is_user);
|
||||
int mmu_idx);
|
||||
void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE v, int mmu_idx);
|
||||
|
||||
#if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \
|
||||
(ACCESS_TYPE <= 1) && defined(ASM_SOFTMMU)
|
||||
(ACCESS_TYPE < NB_MMU_MODES) && defined(ASM_SOFTMMU)
|
||||
|
||||
#define CPU_TLB_ENTRY_BITS 4
|
||||
|
||||
@ -161,8 +114,8 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
|
||||
"i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
|
||||
"i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
|
||||
"i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
|
||||
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_read)),
|
||||
"i" (CPU_MEM_INDEX),
|
||||
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),
|
||||
"i" (CPU_MMU_INDEX),
|
||||
"m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
|
||||
: "%eax", "%ecx", "%edx", "memory", "cc");
|
||||
return res;
|
||||
@ -208,8 +161,8 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
|
||||
"i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
|
||||
"i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
|
||||
"i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
|
||||
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_read)),
|
||||
"i" (CPU_MEM_INDEX),
|
||||
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),
|
||||
"i" (CPU_MMU_INDEX),
|
||||
"m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
|
||||
: "%eax", "%ecx", "%edx", "memory", "cc");
|
||||
return res;
|
||||
@ -260,8 +213,8 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE
|
||||
"i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
|
||||
"i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
|
||||
"i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
|
||||
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_write)),
|
||||
"i" (CPU_MEM_INDEX),
|
||||
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_write)),
|
||||
"i" (CPU_MMU_INDEX),
|
||||
"m" (*(uint8_t *)&glue(glue(__st, SUFFIX), MMUSUFFIX))
|
||||
: "%eax", "%ecx", "%edx", "memory", "cc");
|
||||
}
|
||||
@ -276,16 +229,16 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
|
||||
RES_TYPE res;
|
||||
target_ulong addr;
|
||||
unsigned long physaddr;
|
||||
int is_user;
|
||||
int mmu_idx;
|
||||
|
||||
addr = ptr;
|
||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
is_user = CPU_MEM_INDEX;
|
||||
if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ !=
|
||||
mmu_idx = CPU_MMU_INDEX;
|
||||
if (__builtin_expect(env->tlb_table[mmu_idx][index].ADDR_READ !=
|
||||
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) {
|
||||
res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user);
|
||||
res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx);
|
||||
} else {
|
||||
physaddr = addr + env->tlb_table[is_user][index].addend;
|
||||
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
|
||||
res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
|
||||
}
|
||||
return res;
|
||||
@ -297,23 +250,23 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
|
||||
int res, index;
|
||||
target_ulong addr;
|
||||
unsigned long physaddr;
|
||||
int is_user;
|
||||
int mmu_idx;
|
||||
|
||||
addr = ptr;
|
||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
is_user = CPU_MEM_INDEX;
|
||||
if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ !=
|
||||
mmu_idx = CPU_MMU_INDEX;
|
||||
if (__builtin_expect(env->tlb_table[mmu_idx][index].ADDR_READ !=
|
||||
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) {
|
||||
res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user);
|
||||
res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx);
|
||||
} else {
|
||||
physaddr = addr + env->tlb_table[is_user][index].addend;
|
||||
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
|
||||
res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ACCESS_TYPE != 3
|
||||
#if ACCESS_TYPE != (NB_MMU_MODES + 1)
|
||||
|
||||
/* generic store macro */
|
||||
|
||||
@ -322,25 +275,25 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE
|
||||
int index;
|
||||
target_ulong addr;
|
||||
unsigned long physaddr;
|
||||
int is_user;
|
||||
int mmu_idx;
|
||||
|
||||
addr = ptr;
|
||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
is_user = CPU_MEM_INDEX;
|
||||
if (__builtin_expect(env->tlb_table[is_user][index].addr_write !=
|
||||
mmu_idx = CPU_MMU_INDEX;
|
||||
if (__builtin_expect(env->tlb_table[mmu_idx][index].addr_write !=
|
||||
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) {
|
||||
glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, is_user);
|
||||
glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, mmu_idx);
|
||||
} else {
|
||||
physaddr = addr + env->tlb_table[is_user][index].addend;
|
||||
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
|
||||
glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* ACCESS_TYPE != 3 */
|
||||
#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
|
||||
|
||||
#endif /* !asm */
|
||||
|
||||
#if ACCESS_TYPE != 3
|
||||
#if ACCESS_TYPE != (NB_MMU_MODES + 1)
|
||||
|
||||
#if DATA_SIZE == 8
|
||||
static inline float64 glue(ldfq, MEMSUFFIX)(target_ulong ptr)
|
||||
@ -386,7 +339,7 @@ static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v)
|
||||
}
|
||||
#endif /* DATA_SIZE == 4 */
|
||||
|
||||
#endif /* ACCESS_TYPE != 3 */
|
||||
#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
|
||||
|
||||
#undef RES_TYPE
|
||||
#undef DATA_TYPE
|
||||
@ -394,6 +347,6 @@ static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v)
|
||||
#undef SUFFIX
|
||||
#undef USUFFIX
|
||||
#undef DATA_SIZE
|
||||
#undef CPU_MEM_INDEX
|
||||
#undef CPU_MMU_INDEX
|
||||
#undef MMUSUFFIX
|
||||
#undef ADDR_READ
|
||||
|
@ -48,7 +48,7 @@
|
||||
#endif
|
||||
|
||||
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
int is_user,
|
||||
int mmu_idx,
|
||||
void *retaddr);
|
||||
static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
|
||||
target_ulong tlb_addr)
|
||||
@ -76,7 +76,7 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
|
||||
|
||||
/* handle all cases except unaligned access which span two pages */
|
||||
DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
int is_user)
|
||||
int mmu_idx)
|
||||
{
|
||||
DATA_TYPE res;
|
||||
int index;
|
||||
@ -88,9 +88,9 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
/* XXX: could done more in memory macro in a non portable way */
|
||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
redo:
|
||||
tlb_addr = env->tlb_table[is_user][index].ADDR_READ;
|
||||
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
|
||||
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
physaddr = addr + env->tlb_table[is_user][index].addend;
|
||||
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
|
||||
if (tlb_addr & ~TARGET_PAGE_MASK) {
|
||||
/* IO access */
|
||||
if ((addr & (DATA_SIZE - 1)) != 0)
|
||||
@ -101,16 +101,16 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
do_unaligned_access:
|
||||
retaddr = GETPC();
|
||||
#ifdef ALIGNED_ONLY
|
||||
do_unaligned_access(addr, READ_ACCESS_TYPE, is_user, retaddr);
|
||||
do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
|
||||
#endif
|
||||
res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
|
||||
is_user, retaddr);
|
||||
mmu_idx, retaddr);
|
||||
} else {
|
||||
/* unaligned/aligned access in the same page */
|
||||
#ifdef ALIGNED_ONLY
|
||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
||||
retaddr = GETPC();
|
||||
do_unaligned_access(addr, READ_ACCESS_TYPE, is_user, retaddr);
|
||||
do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
|
||||
}
|
||||
#endif
|
||||
res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)physaddr);
|
||||
@ -120,9 +120,9 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
retaddr = GETPC();
|
||||
#ifdef ALIGNED_ONLY
|
||||
if ((addr & (DATA_SIZE - 1)) != 0)
|
||||
do_unaligned_access(addr, READ_ACCESS_TYPE, is_user, retaddr);
|
||||
do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
|
||||
#endif
|
||||
tlb_fill(addr, READ_ACCESS_TYPE, is_user, retaddr);
|
||||
tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
|
||||
goto redo;
|
||||
}
|
||||
return res;
|
||||
@ -130,7 +130,7 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
|
||||
/* handle all unaligned cases */
|
||||
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
int is_user,
|
||||
int mmu_idx,
|
||||
void *retaddr)
|
||||
{
|
||||
DATA_TYPE res, res1, res2;
|
||||
@ -140,9 +140,9 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
|
||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
redo:
|
||||
tlb_addr = env->tlb_table[is_user][index].ADDR_READ;
|
||||
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
|
||||
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
physaddr = addr + env->tlb_table[is_user][index].addend;
|
||||
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
|
||||
if (tlb_addr & ~TARGET_PAGE_MASK) {
|
||||
/* IO access */
|
||||
if ((addr & (DATA_SIZE - 1)) != 0)
|
||||
@ -154,9 +154,9 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
addr1 = addr & ~(DATA_SIZE - 1);
|
||||
addr2 = addr1 + DATA_SIZE;
|
||||
res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
|
||||
is_user, retaddr);
|
||||
mmu_idx, retaddr);
|
||||
res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
|
||||
is_user, retaddr);
|
||||
mmu_idx, retaddr);
|
||||
shift = (addr & (DATA_SIZE - 1)) * 8;
|
||||
#ifdef TARGET_WORDS_BIGENDIAN
|
||||
res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
|
||||
@ -170,7 +170,7 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
}
|
||||
} else {
|
||||
/* the page is not in the TLB : fill it */
|
||||
tlb_fill(addr, READ_ACCESS_TYPE, is_user, retaddr);
|
||||
tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
|
||||
goto redo;
|
||||
}
|
||||
return res;
|
||||
@ -180,7 +180,7 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
|
||||
static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
DATA_TYPE val,
|
||||
int is_user,
|
||||
int mmu_idx,
|
||||
void *retaddr);
|
||||
|
||||
static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
|
||||
@ -211,7 +211,7 @@ static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
|
||||
|
||||
void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
DATA_TYPE val,
|
||||
int is_user)
|
||||
int mmu_idx)
|
||||
{
|
||||
target_phys_addr_t physaddr;
|
||||
target_ulong tlb_addr;
|
||||
@ -220,9 +220,9 @@ void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
|
||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
redo:
|
||||
tlb_addr = env->tlb_table[is_user][index].addr_write;
|
||||
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
||||
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
physaddr = addr + env->tlb_table[is_user][index].addend;
|
||||
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
|
||||
if (tlb_addr & ~TARGET_PAGE_MASK) {
|
||||
/* IO access */
|
||||
if ((addr & (DATA_SIZE - 1)) != 0)
|
||||
@ -233,16 +233,16 @@ void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
do_unaligned_access:
|
||||
retaddr = GETPC();
|
||||
#ifdef ALIGNED_ONLY
|
||||
do_unaligned_access(addr, 1, is_user, retaddr);
|
||||
do_unaligned_access(addr, 1, mmu_idx, retaddr);
|
||||
#endif
|
||||
glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
|
||||
is_user, retaddr);
|
||||
mmu_idx, retaddr);
|
||||
} else {
|
||||
/* aligned/unaligned access in the same page */
|
||||
#ifdef ALIGNED_ONLY
|
||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
||||
retaddr = GETPC();
|
||||
do_unaligned_access(addr, 1, is_user, retaddr);
|
||||
do_unaligned_access(addr, 1, mmu_idx, retaddr);
|
||||
}
|
||||
#endif
|
||||
glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)physaddr, val);
|
||||
@ -252,9 +252,9 @@ void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
retaddr = GETPC();
|
||||
#ifdef ALIGNED_ONLY
|
||||
if ((addr & (DATA_SIZE - 1)) != 0)
|
||||
do_unaligned_access(addr, 1, is_user, retaddr);
|
||||
do_unaligned_access(addr, 1, mmu_idx, retaddr);
|
||||
#endif
|
||||
tlb_fill(addr, 1, is_user, retaddr);
|
||||
tlb_fill(addr, 1, mmu_idx, retaddr);
|
||||
goto redo;
|
||||
}
|
||||
}
|
||||
@ -262,7 +262,7 @@ void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
/* handles all unaligned cases */
|
||||
static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
DATA_TYPE val,
|
||||
int is_user,
|
||||
int mmu_idx,
|
||||
void *retaddr)
|
||||
{
|
||||
target_phys_addr_t physaddr;
|
||||
@ -271,9 +271,9 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
|
||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
redo:
|
||||
tlb_addr = env->tlb_table[is_user][index].addr_write;
|
||||
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
||||
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
physaddr = addr + env->tlb_table[is_user][index].addend;
|
||||
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
|
||||
if (tlb_addr & ~TARGET_PAGE_MASK) {
|
||||
/* IO access */
|
||||
if ((addr & (DATA_SIZE - 1)) != 0)
|
||||
@ -285,10 +285,10 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
for(i = 0;i < DATA_SIZE; i++) {
|
||||
#ifdef TARGET_WORDS_BIGENDIAN
|
||||
glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
|
||||
is_user, retaddr);
|
||||
mmu_idx, retaddr);
|
||||
#else
|
||||
glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
|
||||
is_user, retaddr);
|
||||
mmu_idx, retaddr);
|
||||
#endif
|
||||
}
|
||||
} else {
|
||||
@ -297,7 +297,7 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
|
||||
}
|
||||
} else {
|
||||
/* the page is not in the TLB : fill it */
|
||||
tlb_fill(addr, 1, is_user, retaddr);
|
||||
tlb_fill(addr, 1, mmu_idx, retaddr);
|
||||
goto redo;
|
||||
}
|
||||
}
|
||||
|
@ -256,6 +256,8 @@ struct pal_handler_t {
|
||||
void (*call_pal)(CPUAlphaState *env, uint32_t palcode);
|
||||
};
|
||||
|
||||
#define NB_MMU_MODES 4
|
||||
|
||||
struct CPUAlphaState {
|
||||
uint64_t ir[31];
|
||||
float64 fir[31];
|
||||
@ -302,6 +304,17 @@ struct CPUAlphaState {
|
||||
#define cpu_gen_code cpu_alpha_gen_code
|
||||
#define cpu_signal_handler cpu_alpha_signal_handler
|
||||
|
||||
/* MMU modes definitions */
|
||||
#define MMU_MODE0_SUFFIX _kernel
|
||||
#define MMU_MODE1_SUFFIX _executive
|
||||
#define MMU_MODE2_SUFFIX _supervisor
|
||||
#define MMU_MODE3_SUFFIX _user
|
||||
#define MMU_USER_IDX 3
|
||||
static inline int cpu_mmu_index (CPUState *env)
|
||||
{
|
||||
return (env->ps >> 3) & 3;
|
||||
}
|
||||
|
||||
#include "cpu-all.h"
|
||||
|
||||
enum {
|
||||
|
@ -73,7 +73,7 @@ static inline void regs_to_env(void)
|
||||
}
|
||||
|
||||
int cpu_alpha_handle_mmu_fault (CPUState *env, uint64_t address, int rw,
|
||||
int is_user, int is_softmmu);
|
||||
int mmu_idx, int is_softmmu);
|
||||
int cpu_alpha_mfpr (CPUState *env, int iprn, uint64_t *valp);
|
||||
int cpu_alpha_mtpr (CPUState *env, int iprn, uint64_t val, uint64_t *oldvalp);
|
||||
|
||||
|
@ -28,7 +28,7 @@
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
int cpu_alpha_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
if (rw == 2)
|
||||
env->exception_index = EXCP_ITB_MISS;
|
||||
@ -57,7 +57,7 @@ target_phys_addr_t cpu_get_phys_page_debug (CPUState *env, target_ulong addr)
|
||||
}
|
||||
|
||||
int cpu_alpha_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
uint32_t opc;
|
||||
|
||||
|
@ -1164,20 +1164,20 @@ void helper_mtpr (int iprn)
|
||||
void helper_ld_phys_to_virt (void)
|
||||
{
|
||||
uint64_t tlb_addr, physaddr;
|
||||
int index, is_user;
|
||||
int index, mmu_idx;
|
||||
void *retaddr;
|
||||
|
||||
is_user = (env->ps >> 3) & 3;
|
||||
mmu_idx = cpu_mmu_index(env);
|
||||
index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
redo:
|
||||
tlb_addr = env->tlb_table[is_user][index].addr_read;
|
||||
tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
|
||||
if ((T0 & TARGET_PAGE_MASK) ==
|
||||
(tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
physaddr = T0 + env->tlb_table[is_user][index].addend;
|
||||
physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
|
||||
} else {
|
||||
/* the page is not in the TLB : fill it */
|
||||
retaddr = GETPC();
|
||||
tlb_fill(T0, 0, is_user, retaddr);
|
||||
tlb_fill(T0, 0, mmu_idx, retaddr);
|
||||
goto redo;
|
||||
}
|
||||
T0 = physaddr;
|
||||
@ -1186,20 +1186,20 @@ void helper_ld_phys_to_virt (void)
|
||||
void helper_st_phys_to_virt (void)
|
||||
{
|
||||
uint64_t tlb_addr, physaddr;
|
||||
int index, is_user;
|
||||
int index, mmu_idx;
|
||||
void *retaddr;
|
||||
|
||||
is_user = (env->ps >> 3) & 3;
|
||||
mmu_idx = cpu_mmu_index(env);
|
||||
index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
redo:
|
||||
tlb_addr = env->tlb_table[is_user][index].addr_write;
|
||||
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
||||
if ((T0 & TARGET_PAGE_MASK) ==
|
||||
(tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
physaddr = T0 + env->tlb_table[is_user][index].addend;
|
||||
physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
|
||||
} else {
|
||||
/* the page is not in the TLB : fill it */
|
||||
retaddr = GETPC();
|
||||
tlb_fill(T0, 1, is_user, retaddr);
|
||||
tlb_fill(T0, 1, mmu_idx, retaddr);
|
||||
goto redo;
|
||||
}
|
||||
T0 = physaddr;
|
||||
@ -1223,7 +1223,7 @@ void helper_st_phys_to_virt (void)
|
||||
NULL, it means that the function was called in C code (i.e. not
|
||||
from generated code or from helper.c) */
|
||||
/* XXX: fix it to restore all registers */
|
||||
void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
CPUState *saved_env;
|
||||
@ -1234,7 +1234,7 @@ void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
generated code */
|
||||
saved_env = env;
|
||||
env = cpu_single_env;
|
||||
ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, is_user, 1);
|
||||
ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
|
||||
if (!likely(ret == 0)) {
|
||||
if (likely(retaddr)) {
|
||||
/* now we have a real cpu fault */
|
||||
|
@ -43,6 +43,8 @@ typedef void ARMWriteCPFunc(void *opaque, int cp_info,
|
||||
typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info,
|
||||
int dstreg, int operand);
|
||||
|
||||
#define NB_MMU_MODES 2
|
||||
|
||||
/* We currently assume float and double are IEEE single and double
|
||||
precision respectively.
|
||||
Doing runtime conversions is tricky because VFP registers may contain
|
||||
@ -301,6 +303,15 @@ void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
|
||||
#define cpu_signal_handler cpu_arm_signal_handler
|
||||
#define cpu_list arm_cpu_list
|
||||
|
||||
/* MMU modes definitions */
|
||||
#define MMU_MODE0_SUFFIX _kernel
|
||||
#define MMU_MODE1_SUFFIX _user
|
||||
#define MMU_USER_IDX 1
|
||||
static inline int cpu_mmu_index (CPUState *env)
|
||||
{
|
||||
return (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR ? 1 : 0;
|
||||
}
|
||||
|
||||
#include "cpu-all.h"
|
||||
|
||||
#endif
|
||||
|
@ -46,7 +46,7 @@ static inline void regs_to_env(void)
|
||||
}
|
||||
|
||||
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu);
|
||||
int mmu_idx, int is_softmmu);
|
||||
|
||||
static inline int cpu_halted(CPUState *env) {
|
||||
if (!env->halted)
|
||||
|
@ -169,7 +169,7 @@ void do_interrupt (CPUState *env)
|
||||
}
|
||||
|
||||
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
if (rw == 2) {
|
||||
env->exception_index = EXCP_PREFETCH_ABORT;
|
||||
@ -547,18 +547,19 @@ do_fault:
|
||||
}
|
||||
|
||||
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
|
||||
int access_type, int is_user, int is_softmmu)
|
||||
int access_type, int mmu_idx, int is_softmmu)
|
||||
{
|
||||
uint32_t phys_addr;
|
||||
int prot;
|
||||
int ret;
|
||||
int ret, is_user;
|
||||
|
||||
is_user = mmu_idx == MMU_USER_IDX;
|
||||
ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
|
||||
if (ret == 0) {
|
||||
/* Map a single [sub]page. */
|
||||
phys_addr &= ~(uint32_t)0x3ff;
|
||||
address &= ~(uint32_t)0x3ff;
|
||||
return tlb_set_page (env, address, phys_addr, prot, is_user,
|
||||
return tlb_set_page (env, address, phys_addr, prot, mmu_idx,
|
||||
is_softmmu);
|
||||
}
|
||||
|
||||
|
@ -196,7 +196,7 @@ void do_vfp_get_fpscr(void)
|
||||
NULL, it means that the function was called in C code (i.e. not
|
||||
from generated code or from helper.c) */
|
||||
/* XXX: fix it to restore all registers */
|
||||
void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
CPUState *saved_env;
|
||||
@ -207,7 +207,7 @@ void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
generated code */
|
||||
saved_env = env;
|
||||
env = cpu_single_env;
|
||||
ret = cpu_arm_handle_mmu_fault(env, addr, is_write, is_user, 1);
|
||||
ret = cpu_arm_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
|
||||
if (__builtin_expect(ret, 0)) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
|
@ -74,6 +74,8 @@
|
||||
/* Internal flags for the implementation. */
|
||||
#define F_DELAYSLOT 1
|
||||
|
||||
#define NB_MMU_MODES 2
|
||||
|
||||
typedef struct CPUCRISState {
|
||||
uint32_t debug1;
|
||||
uint32_t debug2;
|
||||
@ -229,6 +231,16 @@ void register_cris_insns (CPUCRISState *env);
|
||||
#define cpu_gen_code cpu_cris_gen_code
|
||||
#define cpu_signal_handler cpu_cris_signal_handler
|
||||
|
||||
/* MMU modes definitions */
|
||||
#define MMU_MODE0_SUFFIX _kernel
|
||||
#define MMU_MODE1_SUFFIX _user
|
||||
#define MMU_USER_IDX 1
|
||||
/* CRIS FIXME: I guess we want to validate supervisor mode acceses here. */
|
||||
static inline int cpu_mmu_index (CPUState *env)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#include "cpu-all.h"
|
||||
|
||||
/* Register aliases. */
|
||||
|
@ -45,8 +45,8 @@ static inline void regs_to_env(void)
|
||||
}
|
||||
|
||||
int cpu_cris_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu);
|
||||
void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr);
|
||||
int mmu_idx, int is_softmmu);
|
||||
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr);
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
#include "softmmu_exec.h"
|
||||
|
@ -35,7 +35,7 @@ void do_interrupt (CPUState *env)
|
||||
}
|
||||
|
||||
int cpu_cris_handle_mmu_fault(CPUState * env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
env->exception_index = 0xaa;
|
||||
env->debug1 = address;
|
||||
@ -52,7 +52,7 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr)
|
||||
#else /* !CONFIG_USER_ONLY */
|
||||
|
||||
int cpu_cris_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
struct cris_mmu_result_t res;
|
||||
int prot, miss;
|
||||
@ -61,7 +61,7 @@ int cpu_cris_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
address &= TARGET_PAGE_MASK;
|
||||
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
// printf ("%s pc=%x %x w=%d smmu=%d\n", __func__, env->pc, address, rw, is_softmmu);
|
||||
miss = cris_mmu_translate(&res, env, address, rw, is_user);
|
||||
miss = cris_mmu_translate(&res, env, address, rw, mmu_idx);
|
||||
if (miss)
|
||||
{
|
||||
/* handle the miss. */
|
||||
@ -73,7 +73,7 @@ int cpu_cris_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
phy = res.phy;
|
||||
}
|
||||
// printf ("a=%x phy=%x\n", address, phy);
|
||||
return tlb_set_page(env, address, phy, prot, is_user, is_softmmu);
|
||||
return tlb_set_page(env, address, phy, prot, mmu_idx, is_softmmu);
|
||||
}
|
||||
|
||||
|
||||
|
@ -111,11 +111,12 @@ static int cris_mmu_translate_page(struct cris_mmu_result_t *res,
|
||||
|
||||
int cris_mmu_translate(struct cris_mmu_result_t *res,
|
||||
CPUState *env, uint32_t vaddr,
|
||||
int rw, int is_user)
|
||||
int rw, int mmu_idx)
|
||||
{
|
||||
uint32_t phy = vaddr;
|
||||
int seg;
|
||||
int miss = 0;
|
||||
int is_user = mmu_idx == MMU_USER_IDX;
|
||||
|
||||
if (!cris_mmu_enabled(env->sregs[SFR_RW_GC_CFG])) {
|
||||
res->phy = vaddr;
|
||||
|
@ -17,4 +17,4 @@ struct cris_mmu_result_t
|
||||
|
||||
int cris_mmu_translate(struct cris_mmu_result_t *res,
|
||||
CPUState *env, uint32_t vaddr,
|
||||
int rw, int is_user);
|
||||
int rw, int mmu_idx);
|
||||
|
@ -41,7 +41,7 @@
|
||||
NULL, it means that the function was called in C code (i.e. not
|
||||
from generated code or from helper.c) */
|
||||
/* XXX: fix it to restore all registers */
|
||||
void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
CPUState *saved_env;
|
||||
@ -52,7 +52,7 @@ void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
generated code */
|
||||
saved_env = env;
|
||||
env = cpu_single_env;
|
||||
ret = cpu_cris_handle_mmu_fault(env, addr, is_write, is_user, 1);
|
||||
ret = cpu_cris_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
|
||||
if (__builtin_expect(ret, 0)) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
|
@ -432,6 +432,8 @@ typedef union {
|
||||
#define CPU_NB_REGS 8
|
||||
#endif
|
||||
|
||||
#define NB_MMU_MODES 2
|
||||
|
||||
typedef struct CPUX86State {
|
||||
#if TARGET_LONG_BITS > HOST_LONG_BITS
|
||||
/* temporaries if we cannot store them in host registers */
|
||||
@ -688,6 +690,15 @@ static inline int cpu_get_time_fast(void)
|
||||
#define cpu_gen_code cpu_x86_gen_code
|
||||
#define cpu_signal_handler cpu_x86_signal_handler
|
||||
|
||||
/* MMU modes definitions */
|
||||
#define MMU_MODE0_SUFFIX _kernel
|
||||
#define MMU_MODE1_SUFFIX _user
|
||||
#define MMU_USER_IDX 1
|
||||
static inline int cpu_mmu_index (CPUState *env)
|
||||
{
|
||||
return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
|
||||
}
|
||||
|
||||
#include "cpu-all.h"
|
||||
|
||||
#include "svm.h"
|
||||
|
@ -163,8 +163,8 @@ void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
|
||||
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
|
||||
void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr);
|
||||
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
|
||||
int is_write, int is_user, int is_softmmu);
|
||||
void tlb_fill(target_ulong addr, int is_write, int is_user,
|
||||
int is_write, int mmu_idx, int is_softmmu);
|
||||
void tlb_fill(target_ulong addr, int is_write, int mmu_idx,
|
||||
void *retaddr);
|
||||
void __hidden cpu_lock(void);
|
||||
void __hidden cpu_unlock(void);
|
||||
|
@ -3885,7 +3885,7 @@ void update_fp_status(void)
|
||||
NULL, it means that the function was called in C code (i.e. not
|
||||
from generated code or from helper.c) */
|
||||
/* XXX: fix it to restore all registers */
|
||||
void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
int ret;
|
||||
@ -3897,7 +3897,7 @@ void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
saved_env = env;
|
||||
env = cpu_single_env;
|
||||
|
||||
ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
|
||||
ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
|
||||
if (ret) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
|
@ -571,7 +571,7 @@ void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
|
||||
int is_write, int is_user, int is_softmmu)
|
||||
int is_write, int mmu_idx, int is_softmmu)
|
||||
{
|
||||
/* user mode only emulation */
|
||||
is_write &= 1;
|
||||
@ -598,14 +598,15 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
|
||||
2 = soft MMU activation required for this block
|
||||
*/
|
||||
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
|
||||
int is_write1, int is_user, int is_softmmu)
|
||||
int is_write1, int mmu_idx, int is_softmmu)
|
||||
{
|
||||
uint64_t ptep, pte;
|
||||
uint32_t pdpe_addr, pde_addr, pte_addr;
|
||||
int error_code, is_dirty, prot, page_size, ret, is_write;
|
||||
int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
|
||||
unsigned long paddr, page_offset;
|
||||
target_ulong vaddr, virt_addr;
|
||||
|
||||
is_user = mmu_idx == MMU_USER_IDX;
|
||||
#if defined(DEBUG_MMU)
|
||||
printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
|
||||
addr, is_write1, is_user, env->eip);
|
||||
@ -862,7 +863,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
|
||||
paddr = (pte & TARGET_PAGE_MASK) + page_offset;
|
||||
vaddr = virt_addr + page_offset;
|
||||
|
||||
ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
|
||||
ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
|
||||
return ret;
|
||||
do_fault_protect:
|
||||
error_code = PG_ERROR_P_MASK;
|
||||
|
@ -53,6 +53,8 @@
|
||||
#define EXCP_RTE 0x100
|
||||
#define EXCP_HALT_INSN 0x101
|
||||
|
||||
#define NB_MMU_MODES 2
|
||||
|
||||
typedef struct CPUM68KState {
|
||||
uint32_t dregs[8];
|
||||
uint32_t aregs[8];
|
||||
@ -223,6 +225,15 @@ void register_m68k_insns (CPUM68KState *env);
|
||||
#define cpu_gen_code cpu_m68k_gen_code
|
||||
#define cpu_signal_handler cpu_m68k_signal_handler
|
||||
|
||||
/* MMU modes definitions */
|
||||
#define MMU_MODE0_SUFFIX _kernel
|
||||
#define MMU_MODE1_SUFFIX _user
|
||||
#define MMU_USER_IDX 1
|
||||
static inline int cpu_mmu_index (CPUState *env)
|
||||
{
|
||||
return (env->sr & SR_S) == 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
#include "cpu-all.h"
|
||||
|
||||
#endif
|
||||
|
@ -38,7 +38,7 @@ static inline void regs_to_env(void)
|
||||
}
|
||||
|
||||
int cpu_m68k_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu);
|
||||
int mmu_idx, int is_softmmu);
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
#include "softmmu_exec.h"
|
||||
|
@ -301,7 +301,7 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
int cpu_m68k_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
env->exception_index = EXCP_ACCESS;
|
||||
env->mmu.ar = address;
|
||||
@ -311,13 +311,13 @@ int cpu_m68k_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
#else
|
||||
|
||||
int cpu_m68k_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
int prot;
|
||||
|
||||
address &= TARGET_PAGE_MASK;
|
||||
prot = PAGE_READ | PAGE_WRITE;
|
||||
return tlb_set_page(env, address, address, prot, is_user, is_softmmu);
|
||||
return tlb_set_page(env, address, address, prot, mmu_idx, is_softmmu);
|
||||
}
|
||||
|
||||
/* Notify CPU of a pending interrupt. Prioritization and vectoring should
|
||||
|
@ -49,7 +49,7 @@ extern int semihosting_enabled;
|
||||
NULL, it means that the function was called in C code (i.e. not
|
||||
from generated code or from helper.c) */
|
||||
/* XXX: fix it to restore all registers */
|
||||
void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
CPUState *saved_env;
|
||||
@ -60,7 +60,7 @@ void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
generated code */
|
||||
saved_env = env;
|
||||
env = cpu_single_env;
|
||||
ret = cpu_m68k_handle_mmu_fault(env, addr, is_write, is_user, 1);
|
||||
ret = cpu_m68k_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
|
||||
if (__builtin_expect(ret, 0)) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
|
@ -107,6 +107,8 @@ struct CPUMIPSFPUContext {
|
||||
#define FP_UNIMPLEMENTED 32
|
||||
};
|
||||
|
||||
#define NB_MMU_MODES 2
|
||||
|
||||
typedef struct CPUMIPSMVPContext CPUMIPSMVPContext;
|
||||
struct CPUMIPSMVPContext {
|
||||
int32_t CP0_MVPControl;
|
||||
@ -484,6 +486,15 @@ int cpu_mips_register (CPUMIPSState *env, mips_def_t *def);
|
||||
#define cpu_signal_handler cpu_mips_signal_handler
|
||||
#define cpu_list mips_cpu_list
|
||||
|
||||
/* MMU modes definitions */
|
||||
#define MMU_MODE0_SUFFIX _kernel
|
||||
#define MMU_MODE1_SUFFIX _user
|
||||
#define MMU_USER_IDX 1
|
||||
static inline int cpu_mmu_index (CPUState *env)
|
||||
{
|
||||
return (env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM ? 1 : 0;
|
||||
}
|
||||
|
||||
#include "cpu-all.h"
|
||||
|
||||
/* Memory access type :
|
||||
|
@ -105,7 +105,7 @@ void do_pmon (int function);
|
||||
void dump_sc (void);
|
||||
|
||||
int cpu_mips_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu);
|
||||
int mmu_idx, int is_softmmu);
|
||||
void do_interrupt (CPUState *env);
|
||||
void r4k_invalidate_tlb (CPUState *env, int idx, int use_extra);
|
||||
|
||||
|
@ -229,7 +229,7 @@ void cpu_mips_init_mmu (CPUState *env)
|
||||
#endif /* !defined(CONFIG_USER_ONLY) */
|
||||
|
||||
int cpu_mips_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
target_ulong physical;
|
||||
int prot;
|
||||
@ -241,8 +241,8 @@ int cpu_mips_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
#if 0
|
||||
cpu_dump_state(env, logfile, fprintf, 0);
|
||||
#endif
|
||||
fprintf(logfile, "%s pc " TARGET_FMT_lx " ad " TARGET_FMT_lx " rw %d is_user %d smmu %d\n",
|
||||
__func__, env->PC[env->current_tc], address, rw, is_user, is_softmmu);
|
||||
fprintf(logfile, "%s pc " TARGET_FMT_lx " ad " TARGET_FMT_lx " rw %d mmu_idx %d smmu %d\n",
|
||||
__func__, env->PC[env->current_tc], address, rw, mmu_idx, is_softmmu);
|
||||
}
|
||||
|
||||
rw &= 1;
|
||||
@ -265,7 +265,7 @@ int cpu_mips_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
if (ret == TLBRET_MATCH) {
|
||||
ret = tlb_set_page(env, address & TARGET_PAGE_MASK,
|
||||
physical & TARGET_PAGE_MASK, prot,
|
||||
is_user, is_softmmu);
|
||||
mmu_idx, is_softmmu);
|
||||
} else if (ret < 0) {
|
||||
do_fault:
|
||||
switch (ret) {
|
||||
|
@ -563,7 +563,7 @@ static void do_unaligned_access (target_ulong addr, int is_write, int is_user, v
|
||||
do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
|
||||
}
|
||||
|
||||
void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
CPUState *saved_env;
|
||||
@ -574,7 +574,7 @@ void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
generated code */
|
||||
saved_env = env;
|
||||
env = cpu_single_env;
|
||||
ret = cpu_mips_handle_mmu_fault(env, addr, is_write, is_user, 1);
|
||||
ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
|
||||
if (ret) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
|
@ -434,6 +434,12 @@ enum {
|
||||
POWERPC_FLAG_PMM = 0x00000400,
|
||||
};
|
||||
|
||||
#if defined(TARGET_PPC64H)
|
||||
#define NB_MMU_MODES 3
|
||||
#else
|
||||
#define NB_MMU_MODES 2
|
||||
#endif
|
||||
|
||||
/*****************************************************************************/
|
||||
/* The whole PowerPC CPU context */
|
||||
struct CPUPPCState {
|
||||
@ -575,6 +581,7 @@ struct CPUPPCState {
|
||||
jmp_buf jmp_env;
|
||||
int user_mode_only; /* user mode only simulation */
|
||||
target_ulong hflags; /* hflags is a MSR & HFLAGS_MASK */
|
||||
int mmu_idx; /* precomputed MMU index to speed up mem accesses */
|
||||
|
||||
/* Power management */
|
||||
int power_mode;
|
||||
@ -699,6 +706,18 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, target_ulong val);
|
||||
#define cpu_signal_handler cpu_ppc_signal_handler
|
||||
#define cpu_list ppc_cpu_list
|
||||
|
||||
/* MMU modes definitions */
|
||||
#define MMU_MODE0_SUFFIX _user
|
||||
#define MMU_MODE1_SUFFIX _kernel
|
||||
#if defined(TARGET_PPC64H)
|
||||
#define MMU_MODE2_SUFFIX _hypv
|
||||
#endif
|
||||
#define MMU_USER_IDX 0
|
||||
static inline int cpu_mmu_index (CPUState *env)
|
||||
{
|
||||
return env->mmu_idx;
|
||||
}
|
||||
|
||||
#include "cpu-all.h"
|
||||
|
||||
/*****************************************************************************/
|
||||
|
@ -112,7 +112,7 @@ static always_inline void regs_to_env (void)
|
||||
}
|
||||
|
||||
int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu);
|
||||
int mmu_idx, int is_softmmu);
|
||||
|
||||
static always_inline int cpu_halted (CPUState *env)
|
||||
{
|
||||
|
@ -39,7 +39,7 @@
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
int exception, error_code;
|
||||
|
||||
@ -1349,7 +1349,7 @@ target_phys_addr_t cpu_get_phys_page_debug (CPUState *env, target_ulong addr)
|
||||
|
||||
/* Perform address translation */
|
||||
int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
mmu_ctx_t ctx;
|
||||
int access_type;
|
||||
@ -1370,7 +1370,7 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
if (ret == 0) {
|
||||
ret = tlb_set_page(env, address & TARGET_PAGE_MASK,
|
||||
ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
|
||||
is_user, is_softmmu);
|
||||
mmu_idx, is_softmmu);
|
||||
} else if (ret < 0) {
|
||||
#if defined (DEBUG_MMU)
|
||||
if (loglevel != 0)
|
||||
@ -2083,7 +2083,12 @@ void do_compute_hflags (CPUPPCState *env)
|
||||
env->hflags |= msr_cm << MSR_CM;
|
||||
env->hflags |= (uint64_t)msr_sf << MSR_SF;
|
||||
env->hflags |= (uint64_t)msr_hv << MSR_HV;
|
||||
/* Precompute MMU index */
|
||||
if (msr_pr == 0 && msr_hv == 1)
|
||||
env->mmu_idx = 2;
|
||||
else
|
||||
#endif
|
||||
env->mmu_idx = 1 - msr_pr;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
|
@ -2307,7 +2307,7 @@ DO_SPE_OP1(fsctuf);
|
||||
NULL, it means that the function was called in C code (i.e. not
|
||||
from generated code or from helper.c) */
|
||||
/* XXX: fix it to restore all registers */
|
||||
void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
CPUState *saved_env;
|
||||
@ -2318,7 +2318,7 @@ void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
generated code */
|
||||
saved_env = env;
|
||||
env = cpu_single_env;
|
||||
ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, is_user, 1);
|
||||
ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (likely(retaddr)) {
|
||||
/* now we have a real cpu fault */
|
||||
|
@ -6693,15 +6693,8 @@ static always_inline int gen_intermediate_code_internal (CPUState *env,
|
||||
ctx.tb = tb;
|
||||
ctx.exception = POWERPC_EXCP_NONE;
|
||||
ctx.spr_cb = env->spr_cb;
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
supervisor = 0;
|
||||
#else
|
||||
#if defined(TARGET_PPC64H)
|
||||
if (msr_pr == 0 && msr_hv == 1)
|
||||
supervisor = 2;
|
||||
else
|
||||
#endif
|
||||
supervisor = 1 - msr_pr;
|
||||
supervisor = env->mmu_idx;
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
ctx.supervisor = supervisor;
|
||||
#endif
|
||||
#if defined(TARGET_PPC64)
|
||||
|
@ -77,6 +77,8 @@ typedef struct tlb_t {
|
||||
#define UTLB_SIZE 64
|
||||
#define ITLB_SIZE 4
|
||||
|
||||
#define NB_MMU_MODES 2
|
||||
|
||||
typedef struct CPUSH4State {
|
||||
uint32_t flags; /* general execution flags */
|
||||
uint32_t gregs[24]; /* general registers */
|
||||
@ -134,6 +136,15 @@ int cpu_sh4_signal_handler(int host_signum, void *pinfo,
|
||||
#define cpu_gen_code cpu_sh4_gen_code
|
||||
#define cpu_signal_handler cpu_sh4_signal_handler
|
||||
|
||||
/* MMU modes definitions */
|
||||
#define MMU_MODE0_SUFFIX _kernel
|
||||
#define MMU_MODE1_SUFFIX _user
|
||||
#define MMU_USER_IDX 1
|
||||
static inline int cpu_mmu_index (CPUState *env)
|
||||
{
|
||||
return (env->sr & SR_MD) == 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
#include "cpu-all.h"
|
||||
|
||||
/* Memory access type */
|
||||
|
@ -63,7 +63,7 @@ static inline void env_to_regs(void)
|
||||
}
|
||||
|
||||
int cpu_sh4_handle_mmu_fault(CPUState * env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu);
|
||||
int mmu_idx, int is_softmmu);
|
||||
|
||||
int find_itlb_entry(CPUState * env, target_ulong address,
|
||||
int use_asid, int update);
|
||||
|
@ -36,7 +36,7 @@ void do_interrupt (CPUState *env)
|
||||
}
|
||||
|
||||
int cpu_sh4_handle_mmu_fault(CPUState * env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
env->tea = address;
|
||||
switch (rw) {
|
||||
@ -372,15 +372,15 @@ int get_physical_address(CPUState * env, target_ulong * physical,
|
||||
}
|
||||
|
||||
int cpu_sh4_handle_mmu_fault(CPUState * env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
target_ulong physical, page_offset, page_size;
|
||||
int prot, ret, access_type;
|
||||
|
||||
/* XXXXX */
|
||||
#if 0
|
||||
fprintf(stderr, "%s pc %08x ad %08x rw %d is_user %d smmu %d\n",
|
||||
__func__, env->pc, address, rw, is_user, is_softmmu);
|
||||
fprintf(stderr, "%s pc %08x ad %08x rw %d mmu_idx %d smmu %d\n",
|
||||
__func__, env->pc, address, rw, mmu_idx, is_softmmu);
|
||||
#endif
|
||||
|
||||
access_type = ACCESS_INT;
|
||||
@ -426,7 +426,7 @@ int cpu_sh4_handle_mmu_fault(CPUState * env, target_ulong address, int rw,
|
||||
address = (address & TARGET_PAGE_MASK) + page_offset;
|
||||
physical = (physical & TARGET_PAGE_MASK) + page_offset;
|
||||
|
||||
return tlb_set_page(env, address, physical, prot, is_user, is_softmmu);
|
||||
return tlb_set_page(env, address, physical, prot, mmu_idx, is_softmmu);
|
||||
}
|
||||
|
||||
target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr)
|
||||
|
@ -42,7 +42,7 @@ void do_raise_exception(void)
|
||||
#define SHIFT 3
|
||||
#include "softmmu_template.h"
|
||||
|
||||
void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
CPUState *saved_env;
|
||||
@ -53,7 +53,7 @@ void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
generated code */
|
||||
saved_env = env;
|
||||
env = cpu_single_env;
|
||||
ret = cpu_sh4_handle_mmu_fault(env, addr, is_write, is_user, 1);
|
||||
ret = cpu_sh4_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
|
||||
if (ret) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
|
@ -166,6 +166,8 @@
|
||||
|
||||
typedef struct sparc_def_t sparc_def_t;
|
||||
|
||||
#define NB_MMU_MODES 2
|
||||
|
||||
typedef struct CPUSPARCState {
|
||||
target_ulong gregs[8]; /* general registers */
|
||||
target_ulong *regwptr; /* pointer to current register window */
|
||||
@ -317,6 +319,15 @@ void cpu_check_irqs(CPUSPARCState *env);
|
||||
#define cpu_signal_handler cpu_sparc_signal_handler
|
||||
#define cpu_list sparc_cpu_list
|
||||
|
||||
/* MMU modes definitions */
|
||||
#define MMU_MODE0_SUFFIX _kernel
|
||||
#define MMU_MODE1_SUFFIX _user
|
||||
#define MMU_USER_IDX 1
|
||||
static inline int cpu_mmu_index (CPUState *env)
|
||||
{
|
||||
return env->psrs == 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
#include "cpu-all.h"
|
||||
|
||||
#endif
|
||||
|
@ -115,7 +115,7 @@ static inline void regs_to_env(void)
|
||||
}
|
||||
|
||||
int cpu_sparc_handle_mmu_fault(CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu);
|
||||
int mmu_idx, int is_softmmu);
|
||||
|
||||
static inline int cpu_halted(CPUState *env) {
|
||||
if (!env->halted)
|
||||
|
@ -49,7 +49,7 @@ void cpu_unlock(void)
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
int cpu_sparc_handle_mmu_fault(CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
if (rw & 2)
|
||||
env->exception_index = TT_TFAULT;
|
||||
@ -100,15 +100,16 @@ static const int perm_table[2][8] = {
|
||||
|
||||
int get_physical_address (CPUState *env, target_phys_addr_t *physical, int *prot,
|
||||
int *access_index, target_ulong address, int rw,
|
||||
int is_user)
|
||||
int mmu_idx)
|
||||
{
|
||||
int access_perms = 0;
|
||||
target_phys_addr_t pde_ptr;
|
||||
uint32_t pde;
|
||||
target_ulong virt_addr;
|
||||
int error_code = 0, is_dirty;
|
||||
int error_code = 0, is_dirty, is_user;
|
||||
unsigned long page_offset;
|
||||
|
||||
is_user = mmu_idx == MMU_USER_IDX;
|
||||
virt_addr = address & TARGET_PAGE_MASK;
|
||||
|
||||
if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */
|
||||
@ -216,13 +217,13 @@ int get_physical_address (CPUState *env, target_phys_addr_t *physical, int *prot
|
||||
|
||||
/* Perform address translation */
|
||||
int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
target_phys_addr_t paddr;
|
||||
target_ulong vaddr;
|
||||
int error_code = 0, prot, ret = 0, access_index;
|
||||
|
||||
error_code = get_physical_address(env, &paddr, &prot, &access_index, address, rw, is_user);
|
||||
error_code = get_physical_address(env, &paddr, &prot, &access_index, address, rw, mmu_idx);
|
||||
if (error_code == 0) {
|
||||
vaddr = address & TARGET_PAGE_MASK;
|
||||
paddr &= TARGET_PAGE_MASK;
|
||||
@ -230,7 +231,7 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
printf("Translate at " TARGET_FMT_lx " -> " TARGET_FMT_plx ", vaddr "
|
||||
TARGET_FMT_lx "\n", address, paddr, vaddr);
|
||||
#endif
|
||||
ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
|
||||
ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -246,7 +247,7 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
// switching to normal mode.
|
||||
vaddr = address & TARGET_PAGE_MASK;
|
||||
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
|
||||
ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
|
||||
return ret;
|
||||
} else {
|
||||
if (rw & 2)
|
||||
@ -484,8 +485,10 @@ static int get_physical_address_code(CPUState *env, target_phys_addr_t *physical
|
||||
|
||||
int get_physical_address(CPUState *env, target_phys_addr_t *physical, int *prot,
|
||||
int *access_index, target_ulong address, int rw,
|
||||
int is_user)
|
||||
int mmu_idx)
|
||||
{
|
||||
int is_user = mmu_idx == MMU_USER_IDX;
|
||||
|
||||
if (rw == 2)
|
||||
return get_physical_address_code(env, physical, prot, access_index, address, rw, is_user);
|
||||
else
|
||||
@ -494,20 +497,20 @@ int get_physical_address(CPUState *env, target_phys_addr_t *physical, int *prot,
|
||||
|
||||
/* Perform address translation */
|
||||
int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
|
||||
int is_user, int is_softmmu)
|
||||
int mmu_idx, int is_softmmu)
|
||||
{
|
||||
target_ulong virt_addr, vaddr;
|
||||
target_phys_addr_t paddr;
|
||||
int error_code = 0, prot, ret = 0, access_index;
|
||||
|
||||
error_code = get_physical_address(env, &paddr, &prot, &access_index, address, rw, is_user);
|
||||
error_code = get_physical_address(env, &paddr, &prot, &access_index, address, rw, mmu_idx);
|
||||
if (error_code == 0) {
|
||||
virt_addr = address & TARGET_PAGE_MASK;
|
||||
vaddr = virt_addr + ((address & TARGET_PAGE_MASK) & (TARGET_PAGE_SIZE - 1));
|
||||
#ifdef DEBUG_MMU
|
||||
printf("Translate at 0x%" PRIx64 " -> 0x%" PRIx64 ", vaddr 0x%" PRIx64 "\n", address, paddr, vaddr);
|
||||
#endif
|
||||
ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
|
||||
ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
|
||||
return ret;
|
||||
}
|
||||
// XXX
|
||||
|
@ -1522,7 +1522,7 @@ static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
|
||||
NULL, it means that the function was called in C code (i.e. not
|
||||
from generated code or from helper.c) */
|
||||
/* XXX: fix it to restore all registers */
|
||||
void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
int ret;
|
||||
@ -1534,7 +1534,7 @@ void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
|
||||
saved_env = env;
|
||||
env = cpu_single_env;
|
||||
|
||||
ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, is_user, 1);
|
||||
ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
|
||||
if (ret) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
|
@ -3689,7 +3689,7 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
|
||||
#else
|
||||
extern int get_physical_address (CPUState *env, target_phys_addr_t *physical, int *prot,
|
||||
int *access_index, target_ulong address, int rw,
|
||||
int is_user);
|
||||
int mmu_idx);
|
||||
|
||||
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user