- MMU DAT translation rewrite and cleanup

- Implement more TCG CPU features related to the MMU (e.g., IEP)
 - Add the current instruction length to unwind data and clean up
 - Resolve one TODO for the MVCL instruction
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCAAvFiEEG9nKrXNcTDpGDfzKTd4Q9wD/g1oFAl2fFRIRHGRhdmlkQHJl
 ZGhhdC5jb20ACgkQTd4Q9wD/g1pLZBAApbNdwchTcYR483BWFCDRktU+jILOvepe
 yZ8ek6kvVAL0U0psVPYrUw74C11ig7c06JADL9ON3aF5RHRppNpLG/ZNVSat/1P5
 hcgCMwNYkXVJeL5PDWW2sDVjBvY9n8sDH6rlslmtZB+uetIpTS6ixfv/GhZak4E5
 YHJPK2eNAJHMOuasvGeBdnObQNSTAr+pE9I7k4+wt4OKHZiT+k6Dlm44JYQtiv6s
 DRJClt25pdxSxjrMzG9nEDm5Ql+K/9qJ23sSniqfTD4UtgILBHODc9p9VNVf+92Q
 y2iMRVnHHA8wlp6UI6uJLPIoVPcEKcBQYFnEN1zTKGwoPHIxlzh1zj6wgbQTJfns
 bUASRu3o6coUdUAX1YeLczzP5Gac+nWzhbF8jf8p9WdKAgcMyMgQfox+sC8GRh+v
 Gdc6/tFCLZjpgRXFZL8cL6xRrHMBGjP9DmZC7tzVJUGpfLei7RE9WBJ3HHzUiQIp
 Q/zg/SkriJJwTiWh5QiSMYcQlHsUfA5qaex7ZbQKM+JKUuLAbydb7yN82HcaVyam
 zhopRFkNsobIev4ywCGYeypQ5MhO2DDzDqyH6g4P+Q2DO2l8wVj+vNcGgNZ1lu9R
 Bn/NSsREae6jCTTSKc4TFYs63R5xaCkfSoz0NlLwSEgO7BbcilX9tjlk80UO7eFE
 VuQDlPl/Sg0=
 =+rtL
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/davidhildenbrand/tags/s390x-tcg-2019-10-10' into staging

- MMU DAT translation rewrite and cleanup
- Implement more TCG CPU features related to the MMU (e.g., IEP)
- Add the current instruction length to unwind data and clean up
- Resolve one TODO for the MVCL instruction

# gpg: Signature made Thu 10 Oct 2019 12:25:06 BST
# gpg:                using RSA key 1BD9CAAD735C4C3A460DFCCA4DDE10F700FF835A
# gpg:                issuer "david@redhat.com"
# gpg: Good signature from "David Hildenbrand <david@redhat.com>" [unknown]
# gpg:                 aka "David Hildenbrand <davidhildenbrand@gmail.com>" [full]
# Primary key fingerprint: 1BD9 CAAD 735C 4C3A 460D  FCCA 4DDE 10F7 00FF 835A

* remotes/davidhildenbrand/tags/s390x-tcg-2019-10-10: (31 commits)
  s390x/tcg: MVCL: Exit to main loop if requested
  target/s390x: Remove ILEN_UNWIND
  target/s390x: Remove ilen argument from trigger_pgm_exception
  target/s390x: Remove ilen argument from trigger_access_exception
  target/s390x: Remove ILEN_AUTO
  target/s390x: Rely on unwinding in s390_cpu_virt_mem_rw
  target/s390x: Rely on unwinding in s390_cpu_tlb_fill
  target/s390x: Simplify helper_lra
  target/s390x: Remove fail variable from s390_cpu_tlb_fill
  target/s390x: Return exception from translate_pages
  target/s390x: Return exception from mmu_translate
  target/s390x: Remove exc argument to mmu_translate_asce
  target/s390x: Return exception from mmu_translate_real
  target/s390x: Handle tec in s390_cpu_tlb_fill
  target/s390x: Push trigger_pgm_exception lower in s390_cpu_tlb_fill
  target/s390x: Use tcg_s390_program_interrupt in TCG helpers
  target/s390x: Remove ilen parameter from s390_program_interrupt
  target/s390x: Remove ilen parameter from tcg_s390_program_interrupt
  target/s390x: Add ilen to unwind data
  s390x/cpumodel: Add new TCG features to QEMU cpu model
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-10-14 10:42:35 +01:00
commit cdfc44ac3c
21 changed files with 473 additions and 431 deletions

View File

@ -157,7 +157,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
int i;
if (env->psw.mask & PSW_MASK_PSTATE) {
s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
s390_program_interrupt(env, PGM_PRIVILEGED, ra);
return 0;
}
@ -168,7 +168,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
reqh = (ClpReqHdr *)buffer;
req_len = lduw_p(&reqh->len);
if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return 0;
}
@ -180,11 +180,11 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
resh = (ClpRspHdr *)(buffer + req_len);
res_len = lduw_p(&resh->len);
if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return 0;
}
if ((req_len + res_len) > 8192) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return 0;
}
@ -390,12 +390,12 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
uint8_t pcias;
if (env->psw.mask & PSW_MASK_PSTATE) {
s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
s390_program_interrupt(env, PGM_PRIVILEGED, ra);
return 0;
}
if (r2 & 0x1) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return 0;
}
@ -429,25 +429,25 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
switch (pcias) {
case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
if (!len || (len > (8 - (offset & 0x7)))) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return 0;
}
result = zpci_read_bar(pbdev, pcias, offset, &data, len);
if (result != MEMTX_OK) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return 0;
}
break;
case ZPCI_CONFIG_BAR:
if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return 0;
}
data = pci_host_config_read_common(
pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
if (zpci_endian_swap(&data, len)) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return 0;
}
break;
@ -489,12 +489,12 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
uint8_t pcias;
if (env->psw.mask & PSW_MASK_PSTATE) {
s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
s390_program_interrupt(env, PGM_PRIVILEGED, ra);
return 0;
}
if (r2 & 0x1) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return 0;
}
@ -536,13 +536,13 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
* A length of 0 is invalid and length should not cross a double word
*/
if (!len || (len > (8 - (offset & 0x7)))) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return 0;
}
result = zpci_write_bar(pbdev, pcias, offset, data, len);
if (result != MEMTX_OK) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return 0;
}
break;
@ -550,7 +550,7 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
/* ZPCI uses the pseudo BAR number 15 as configuration space */
/* possible access lengths are 1,2,4 and must not cross a word */
if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return 0;
}
/* len = 1,2,4 so we do not need to test */
@ -622,12 +622,12 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
hwaddr start, end;
if (env->psw.mask & PSW_MASK_PSTATE) {
s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
s390_program_interrupt(env, PGM_PRIVILEGED, ra);
return 0;
}
if (r2 & 0x1) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return 0;
}
@ -709,7 +709,7 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
uint8_t buffer[128];
if (env->psw.mask & PSW_MASK_PSTATE) {
s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
s390_program_interrupt(env, PGM_PRIVILEGED, ra);
return 0;
}
@ -772,7 +772,7 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
if (!memory_region_access_valid(mr, offset, len, true,
MEMTXATTRS_UNSPECIFIED)) {
s390_program_interrupt(env, PGM_OPERAND, 6, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return 0;
}
@ -786,7 +786,7 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
ldq_p(buffer + i * 8),
MO_64, MEMTXATTRS_UNSPECIFIED);
if (result != MEMTX_OK) {
s390_program_interrupt(env, PGM_OPERAND, 6, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return 0;
}
}
@ -797,7 +797,7 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
return 0;
specification_error:
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return 0;
}
@ -871,14 +871,14 @@ static int reg_ioat(CPUS390XState *env, S390PCIIOMMU *iommu, ZpciFib fib,
pba &= ~0xfff;
pal |= 0xfff;
if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) {
s390_program_interrupt(env, PGM_OPERAND, 6, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return -EINVAL;
}
/* currently we only support designation type 1 with translation */
if (!(dt == ZPCI_IOTA_RTTO && t)) {
error_report("unsupported ioat dt %d t %d", dt, t);
s390_program_interrupt(env, PGM_OPERAND, 6, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return -EINVAL;
}
@ -1003,7 +1003,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
uint64_t cc = ZPCI_PCI_LS_OK;
if (env->psw.mask & PSW_MASK_PSTATE) {
s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
s390_program_interrupt(env, PGM_PRIVILEGED, ra);
return 0;
}
@ -1012,7 +1012,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
fh = env->regs[r1] >> 32;
if (fiba & 0x7) {
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return 0;
}
@ -1040,7 +1040,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
}
if (fib.fmt != 0) {
s390_program_interrupt(env, PGM_OPERAND, 6, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return 0;
}
@ -1151,7 +1151,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
break;
}
default:
s390_program_interrupt(&cpu->env, PGM_OPERAND, 6, ra);
s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
cc = ZPCI_PCI_LS_ERR;
}
@ -1171,7 +1171,7 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
uint64_t cc = ZPCI_PCI_LS_OK;
if (env->psw.mask & PSW_MASK_PSTATE) {
s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
s390_program_interrupt(env, PGM_PRIVILEGED, ra);
return 0;
}
@ -1185,7 +1185,7 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
}
if (fiba & 0x7) {
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return 0;
}

View File

@ -650,7 +650,9 @@ DEFINE_CCW_MACHINE(4_2, "4.2", true);
static void ccw_machine_4_1_instance_options(MachineState *machine)
{
static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V4_1 };
ccw_machine_4_2_instance_options(machine);
s390_set_qemu_cpu_model(0x2964, 13, 2, qemu_cpu_feat);
}
static void ccw_machine_4_1_class_options(MachineClass *mc)

View File

@ -72,6 +72,23 @@ void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
/**
* cpu_loop_exit_requested:
* @cpu: The CPU state to be tested
*
* Indicate if somebody asked for a return of the CPU to the main loop
* (e.g., via cpu_exit() or cpu_interrupt()).
*
* This is helpful for architectures that support interruptible
* instructions. After writing back all state to registers/memory, this
* call can be used to check if it makes sense to return to the main loop
* or to continue executing the interruptible instruction.
*/
static inline bool cpu_loop_exit_requested(CPUState *cpu)
{
return (int32_t)atomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
}
#if !defined(CONFIG_USER_ONLY)
void cpu_reloading_memory_map(void);
/**

View File

@ -21,6 +21,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internal.h"
#include "tcg_s390x.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
@ -588,8 +589,7 @@ void HELPER(sacf)(CPUS390XState *env, uint64_t a1)
break;
default:
HELPER_LOG("unknown sacf mode: %" PRIx64 "\n", a1);
s390_program_interrupt(env, PGM_SPECIFICATION, 2, GETPC());
break;
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
}
}
#endif

View File

@ -1,6 +1,10 @@
/*
* S/390 virtual CPU header
*
* For details on the s390x architecture and used definitions (e.g.,
* PSW, PER and DAT (Dynamic Address Translation)), please refer to
* the "z/Architecture Principles of Operations" - a.k.a. PoP.
*
* Copyright (c) 2009 Ulrich Hecht
* Copyright IBM Corp. 2012, 2018
*
@ -30,7 +34,7 @@
/* The z/Architecture has a strong memory model with some store-after-load re-ordering */
#define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
#define TARGET_INSN_START_EXTRA_WORDS 1
#define TARGET_INSN_START_EXTRA_WORDS 2
#define MMU_MODE0_SUFFIX _primary
#define MMU_MODE1_SUFFIX _secondary
@ -311,6 +315,7 @@ extern const VMStateDescription vmstate_s390_cpu;
#define CR0_EDAT 0x0000000000800000ULL
#define CR0_AFP 0x0000000000040000ULL
#define CR0_VECTOR 0x0000000000020000ULL
#define CR0_IEP 0x0000000000100000ULL
#define CR0_EMERGENCY_SIGNAL_SC 0x0000000000004000ULL
#define CR0_EXTERNAL_CALL_SC 0x0000000000002000ULL
#define CR0_CKC_SC 0x0000000000000800ULL
@ -558,26 +563,60 @@ QEMU_BUILD_BUG_ON(sizeof(SysIB) != 4096);
#define ASCE_TYPE_SEGMENT 0x00 /* segment table type */
#define ASCE_TABLE_LENGTH 0x03 /* region table length */
#define REGION_ENTRY_ORIGIN (~0xfffULL) /* region/segment table origin */
#define REGION_ENTRY_RO 0x200 /* region/segment protection bit */
#define REGION_ENTRY_TF 0xc0 /* region/segment table offset */
#define REGION_ENTRY_INV 0x20 /* invalid region table entry */
#define REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
#define REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
#define REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
#define REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
#define REGION_ENTRY_LENGTH 0x03 /* region third length */
#define REGION_ENTRY_ORIGIN 0xfffffffffffff000ULL
#define REGION_ENTRY_P 0x0000000000000200ULL
#define REGION_ENTRY_TF 0x00000000000000c0ULL
#define REGION_ENTRY_I 0x0000000000000020ULL
#define REGION_ENTRY_TT 0x000000000000000cULL
#define REGION_ENTRY_TL 0x0000000000000003ULL
#define SEGMENT_ENTRY_ORIGIN (~0x7ffULL) /* segment table origin */
#define SEGMENT_ENTRY_FC 0x400 /* format control */
#define SEGMENT_ENTRY_RO 0x200 /* page protection bit */
#define SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
#define REGION_ENTRY_TT_REGION1 0x000000000000000cULL
#define REGION_ENTRY_TT_REGION2 0x0000000000000008ULL
#define REGION_ENTRY_TT_REGION3 0x0000000000000004ULL
#define VADDR_PX 0xff000 /* page index bits */
#define REGION3_ENTRY_RFAA 0xffffffff80000000ULL
#define REGION3_ENTRY_AV 0x0000000000010000ULL
#define REGION3_ENTRY_ACC 0x000000000000f000ULL
#define REGION3_ENTRY_F 0x0000000000000800ULL
#define REGION3_ENTRY_FC 0x0000000000000400ULL
#define REGION3_ENTRY_IEP 0x0000000000000100ULL
#define REGION3_ENTRY_CR 0x0000000000000010ULL
#define PAGE_RO 0x200 /* HW read-only bit */
#define PAGE_INVALID 0x400 /* HW invalid bit */
#define PAGE_RES0 0x800 /* bit must be zero */
#define SEGMENT_ENTRY_ORIGIN 0xfffffffffffff800ULL
#define SEGMENT_ENTRY_SFAA 0xfffffffffff00000ULL
#define SEGMENT_ENTRY_AV 0x0000000000010000ULL
#define SEGMENT_ENTRY_ACC 0x000000000000f000ULL
#define SEGMENT_ENTRY_F 0x0000000000000800ULL
#define SEGMENT_ENTRY_FC 0x0000000000000400ULL
#define SEGMENT_ENTRY_P 0x0000000000000200ULL
#define SEGMENT_ENTRY_IEP 0x0000000000000100ULL
#define SEGMENT_ENTRY_I 0x0000000000000020ULL
#define SEGMENT_ENTRY_CS 0x0000000000000010ULL
#define SEGMENT_ENTRY_TT 0x000000000000000cULL
#define SEGMENT_ENTRY_TT_SEGMENT 0x0000000000000000ULL
#define PAGE_ENTRY_0 0x0000000000000800ULL
#define PAGE_ENTRY_I 0x0000000000000400ULL
#define PAGE_ENTRY_P 0x0000000000000200ULL
#define PAGE_ENTRY_IEP 0x0000000000000100ULL
#define VADDR_REGION1_TX_MASK 0xffe0000000000000ULL
#define VADDR_REGION2_TX_MASK 0x001ffc0000000000ULL
#define VADDR_REGION3_TX_MASK 0x000003ff80000000ULL
#define VADDR_SEGMENT_TX_MASK 0x000000007ff00000ULL
#define VADDR_PAGE_TX_MASK 0x00000000000ff000ULL
#define VADDR_REGION1_TX(vaddr) (((vaddr) & VADDR_REGION1_TX_MASK) >> 53)
#define VADDR_REGION2_TX(vaddr) (((vaddr) & VADDR_REGION2_TX_MASK) >> 42)
#define VADDR_REGION3_TX(vaddr) (((vaddr) & VADDR_REGION3_TX_MASK) >> 31)
#define VADDR_SEGMENT_TX(vaddr) (((vaddr) & VADDR_SEGMENT_TX_MASK) >> 20)
#define VADDR_PAGE_TX(vaddr) (((vaddr) & VADDR_PAGE_TX_MASK) >> 12)
#define VADDR_REGION1_TL(vaddr) (((vaddr) & 0xc000000000000000ULL) >> 62)
#define VADDR_REGION2_TL(vaddr) (((vaddr) & 0x0018000000000000ULL) >> 51)
#define VADDR_REGION3_TL(vaddr) (((vaddr) & 0x0000030000000000ULL) >> 40)
#define VADDR_SEGMENT_TL(vaddr) (((vaddr) & 0x0000000060000000ULL) >> 29)
#define SK_C (0x1 << 1)
#define SK_R (0x1 << 2)
@ -765,11 +804,8 @@ int cpu_s390x_signal_handler(int host_signum, void *pinfo, void *puc);
void s390_crw_mchk(void);
void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
uint32_t io_int_parm, uint32_t io_int_word);
/* automatically detect the instruction length */
#define ILEN_AUTO 0xff
#define RA_IGNORED 0
void s390_program_interrupt(CPUS390XState *env, uint32_t code, int ilen,
uintptr_t ra);
void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra);
/* service interrupts are floating therefore we must not pass an cpustate */
void s390_sclp_extint(uint32_t parm);

View File

@ -13,6 +13,7 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "internal.h"
#include "tcg_s390x.h"
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
@ -34,16 +35,14 @@ uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3,
case S390_FEAT_TYPE_PCKMO:
case S390_FEAT_TYPE_PCC:
if (mod) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return 0;
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
break;
}
s390_get_feat_block(type, subfunc);
if (!test_be_bit(fc, subfunc)) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return 0;
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
switch (fc) {

View File

@ -61,12 +61,12 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra)
IplParameterBlock *iplb;
if (env->psw.mask & PSW_MASK_PSTATE) {
s390_program_interrupt(env, PGM_PRIVILEGED, ILEN_AUTO, ra);
s390_program_interrupt(env, PGM_PRIVILEGED, ra);
return;
}
if ((subcode & ~0x0ffffULL) || (subcode > 6)) {
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return;
}
@ -82,13 +82,13 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra)
break;
case 5:
if ((r1 & 1) || (addr & 0x0fffULL)) {
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return;
}
if (!address_space_access_valid(&address_space_memory, addr,
sizeof(IplParameterBlock), false,
MEMTXATTRS_UNSPECIFIED)) {
s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra);
s390_program_interrupt(env, PGM_ADDRESSING, ra);
return;
}
iplb = g_new0(IplParameterBlock, 1);
@ -112,13 +112,13 @@ out:
return;
case 6:
if ((r1 & 1) || (addr & 0x0fffULL)) {
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return;
}
if (!address_space_access_valid(&address_space_memory, addr,
sizeof(IplParameterBlock), true,
MEMTXATTRS_UNSPECIFIED)) {
s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra);
s390_program_interrupt(env, PGM_ADDRESSING, ra);
return;
}
iplb = s390_ipl_get_iplb();
@ -130,7 +130,7 @@ out:
}
return;
default:
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
break;
}
}

View File

@ -34,15 +34,15 @@
#include "hw/boards.h"
#endif
void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code,
int ilen, uintptr_t ra)
void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env,
uint32_t code, uintptr_t ra)
{
CPUState *cs = env_cpu(env);
cpu_restore_state(cs, ra, true);
qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
env->psw.addr);
trigger_pgm_exception(env, code, ilen);
trigger_pgm_exception(env, code);
cpu_loop_exit(cs);
}
@ -60,7 +60,7 @@ void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
if (env->cregs[0] & CR0_AFP) {
env->fpc = deposit32(env->fpc, 8, 8, dxc);
}
tcg_s390_program_interrupt(env, PGM_DATA, ILEN_AUTO, ra);
tcg_s390_program_interrupt(env, PGM_DATA, ra);
}
void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
@ -75,7 +75,7 @@ void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
/* Always store the VXC into the FPC, without AFP it is undefined */
env->fpc = deposit32(env->fpc, 8, 8, vxc);
tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ILEN_AUTO, ra);
tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
}
void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
@ -96,7 +96,7 @@ bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
{
S390CPU *cpu = S390_CPU(cs);
trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_AUTO);
trigger_pgm_exception(&cpu->env, PGM_ADDRESSING);
/* On real machines this value is dropped into LowMem. Since this
is userland, simply put this someplace that cpu_loop can find it. */
cpu->env.__excp_addr = address;
@ -126,8 +126,8 @@ bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
target_ulong vaddr, raddr;
uint64_t asc;
int prot, fail;
uint64_t asc, tec;
int prot, excp;
qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
__func__, address, access_type, mmu_idx);
@ -140,30 +140,30 @@ bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
if (!(env->psw.mask & PSW_MASK_64)) {
vaddr &= 0x7fffffff;
}
fail = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, true);
excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
} else if (mmu_idx == MMU_REAL_IDX) {
/* 31-Bit mode */
if (!(env->psw.mask & PSW_MASK_64)) {
vaddr &= 0x7fffffff;
}
fail = mmu_translate_real(env, vaddr, access_type, &raddr, &prot);
excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
} else {
g_assert_not_reached();
}
/* check out of RAM access */
if (!fail &&
if (!excp &&
!address_space_access_valid(&address_space_memory, raddr,
TARGET_PAGE_SIZE, access_type,
MEMTXATTRS_UNSPECIFIED)) {
qemu_log_mask(CPU_LOG_MMU,
"%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n",
__func__, (uint64_t)raddr, (uint64_t)ram_size);
trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
fail = 1;
excp = PGM_ADDRESSING;
tec = 0; /* unused */
}
if (!fail) {
if (!excp) {
qemu_log_mask(CPU_LOG_MMU,
"%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
__func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
@ -175,23 +175,20 @@ bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
return false;
}
cpu_restore_state(cs, retaddr, true);
/*
* The ILC value for code accesses is undefined. The important
* thing here is to *not* leave env->int_pgm_ilen set to ILEN_AUTO,
* which would cause do_program_interrupt to attempt to read from
* env->psw.addr again. C.f. the condition in trigger_page_fault,
* but is not universally applied.
*
* ??? If we remove ILEN_AUTO, by moving the computation of ILEN
* into cpu_restore_state, then we may remove this entirely.
*/
if (access_type == MMU_INST_FETCH) {
env->int_pgm_ilen = 2;
if (excp != PGM_ADDRESSING) {
stq_phys(env_cpu(env)->as,
env->psa + offsetof(LowCore, trans_exc_code), tec);
}
cpu_loop_exit(cs);
/*
* For data accesses, ILEN will be filled in from the unwind info,
* within cpu_loop_exit_restore. For code accesses, retaddr == 0,
* and so unwinding will not occur. However, ILEN is also undefined
* for that case -- we choose to set ILEN = 2.
*/
env->int_pgm_ilen = 2;
trigger_pgm_exception(env, excp);
cpu_loop_exit_restore(cs, retaddr);
}
static void do_program_interrupt(CPUS390XState *env)
@ -200,9 +197,6 @@ static void do_program_interrupt(CPUS390XState *env)
LowCore *lowcore;
int ilen = env->int_pgm_ilen;
if (ilen == ILEN_AUTO) {
ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
}
assert(ilen == 2 || ilen == 4 || ilen == 6);
switch (env->int_pgm_code) {
@ -614,7 +608,7 @@ void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, retaddr);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
}
#endif /* CONFIG_USER_ONLY */

View File

@ -825,7 +825,7 @@ void HELPER(sfpc)(CPUS390XState *env, uint64_t fpc)
{
if (fpc_to_rnd[fpc & 0x7] == -1 || fpc & 0x03030088u ||
(!s390_has_feat(S390_FEAT_FLOATING_POINT_EXT) && fpc & 0x4)) {
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, GETPC());
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
}
/* Install everything in the main FPC. */
@ -843,7 +843,7 @@ void HELPER(sfas)(CPUS390XState *env, uint64_t fpc)
if (fpc_to_rnd[fpc & 0x7] == -1 || fpc & 0x03030088u ||
(!s390_has_feat(S390_FEAT_FLOATING_POINT_EXT) && fpc & 0x4)) {
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, GETPC());
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
}
/*
@ -880,7 +880,7 @@ void HELPER(sfas)(CPUS390XState *env, uint64_t fpc)
void HELPER(srnm)(CPUS390XState *env, uint64_t rnd)
{
if (rnd > 0x7 || fpc_to_rnd[rnd & 0x7] == -1) {
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, GETPC());
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
}
env->fpc = deposit32(env->fpc, 0, 3, rnd);

View File

@ -698,15 +698,23 @@ static uint16_t qemu_V4_0[] = {
S390_FEAT_ZPCI,
};
static uint16_t qemu_LATEST[] = {
static uint16_t qemu_V4_1[] = {
S390_FEAT_STFLE_53,
S390_FEAT_VECTOR,
};
static uint16_t qemu_LATEST[] = {
S390_FEAT_ACCESS_EXCEPTION_FS_INDICATION,
S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2,
S390_FEAT_ESOP,
};
/* add all new definitions before this point */
static uint16_t qemu_MAX[] = {
/* generates a dependency warning, leave it out for now */
S390_FEAT_MSA_EXT_5,
/* features introduced after the z13 */
S390_FEAT_INSTRUCTION_EXEC_PROT,
};
/****** END FEATURE DEFS ******/
@ -824,6 +832,7 @@ static FeatGroupDefSpec QemuFeatDef[] = {
QEMU_FEAT_INITIALIZER(V2_11),
QEMU_FEAT_INITIALIZER(V3_1),
QEMU_FEAT_INITIALIZER(V4_0),
QEMU_FEAT_INITIALIZER(V4_1),
QEMU_FEAT_INITIALIZER(LATEST),
QEMU_FEAT_INITIALIZER(MAX),
};

View File

@ -52,6 +52,7 @@ hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
target_ulong raddr;
int prot;
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
uint64_t tec;
/* 31-Bit mode */
if (!(env->psw.mask & PSW_MASK_64)) {
@ -63,7 +64,11 @@ hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
asc = PSW_ASC_PRIMARY;
}
if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
/*
* We want to read code even if IEP is active. Use MMU_DATA_LOAD instead
* of MMU_INST_FETCH.
*/
if (mmu_translate(env, vaddr, MMU_DATA_LOAD, asc, &raddr, &prot, &tec)) {
return -1;
}
return raddr;

View File

@ -21,6 +21,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internal.h"
#include "tcg_s390x.h"
#include "exec/exec-all.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
@ -39,7 +40,7 @@ int64_t HELPER(divs32)(CPUS390XState *env, int64_t a, int64_t b64)
int64_t q;
if (b == 0) {
s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC());
}
ret = q = a / b;
@ -47,7 +48,7 @@ int64_t HELPER(divs32)(CPUS390XState *env, int64_t a, int64_t b64)
/* Catch non-representable quotient. */
if (ret != q) {
s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC());
}
return ret;
@ -60,7 +61,7 @@ uint64_t HELPER(divu32)(CPUS390XState *env, uint64_t a, uint64_t b64)
uint64_t q;
if (b == 0) {
s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC());
}
ret = q = a / b;
@ -68,7 +69,7 @@ uint64_t HELPER(divu32)(CPUS390XState *env, uint64_t a, uint64_t b64)
/* Catch non-representable quotient. */
if (ret != q) {
s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC());
}
return ret;
@ -79,7 +80,7 @@ int64_t HELPER(divs64)(CPUS390XState *env, int64_t a, int64_t b)
{
/* Catch divide by zero, and non-representable quotient (MIN / -1). */
if (b == 0 || (b == -1 && a == (1ll << 63))) {
s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC());
}
env->retxl = a % b;
return a / b;
@ -92,7 +93,7 @@ uint64_t HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al,
uint64_t ret;
/* Signal divide by zero. */
if (b == 0) {
s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC());
}
if (ah == 0) {
/* 64 -> 64/64 case */
@ -106,7 +107,7 @@ uint64_t HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al,
env->retxl = a % b;
ret = q;
if (ret != q) {
s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC());
}
#else
/* 32-bit hosts would need special wrapper functionality - just abort if

View File

@ -317,7 +317,7 @@ void cpu_unmap_lowcore(LowCore *lowcore);
/* interrupt.c */
void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen);
void trigger_pgm_exception(CPUS390XState *env, uint32_t code);
void cpu_inject_clock_comparator(S390CPU *cpu);
void cpu_inject_cpu_timer(S390CPU *cpu);
void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr);
@ -360,9 +360,9 @@ void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len,
/* mmu_helper.c */
int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
target_ulong *raddr, int *flags, bool exc);
target_ulong *raddr, int *flags, uint64_t *tec);
int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw,
target_ulong *addr, int *flags);
target_ulong *addr, int *flags, uint64_t *tec);
/* misc_helper.c */

View File

@ -22,22 +22,21 @@
#endif
/* Ensure to exit the TB after this call! */
void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
void trigger_pgm_exception(CPUS390XState *env, uint32_t code)
{
CPUState *cs = env_cpu(env);
cs->exception_index = EXCP_PGM;
env->int_pgm_code = code;
env->int_pgm_ilen = ilen;
/* env->int_pgm_ilen is already set, or will be set during unwinding */
}
void s390_program_interrupt(CPUS390XState *env, uint32_t code, int ilen,
uintptr_t ra)
void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra)
{
if (kvm_enabled()) {
kvm_s390_program_interrupt(env_archcpu(env), code);
} else if (tcg_enabled()) {
tcg_s390_program_interrupt(env, code, ilen, ra);
tcg_s390_program_interrupt(env, code, ra);
} else {
g_assert_not_reached();
}

View File

@ -44,7 +44,7 @@ void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
SubchDev *sch;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra);
s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
return;
}
trace_ioinst_sch_id("xsch", cssid, ssid, schid);
@ -62,7 +62,7 @@ void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
SubchDev *sch;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra);
s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
return;
}
trace_ioinst_sch_id("csch", cssid, ssid, schid);
@ -80,7 +80,7 @@ void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
SubchDev *sch;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra);
s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
return;
}
trace_ioinst_sch_id("hsch", cssid, ssid, schid);
@ -116,7 +116,7 @@ void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return;
}
if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) {
@ -125,7 +125,7 @@ void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
}
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) ||
!ioinst_schib_valid(&schib)) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return;
}
trace_ioinst_sch_id("msch", cssid, ssid, schid);
@ -173,7 +173,7 @@ void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return;
}
if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) {
@ -183,7 +183,7 @@ void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
copy_orb_from_guest(&orb, &orig_orb);
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) ||
!ioinst_orb_valid(&orb)) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return;
}
trace_ioinst_sch_id("ssch", cssid, ssid, schid);
@ -205,7 +205,7 @@ void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra)
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return;
}
@ -236,7 +236,7 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return;
}
@ -247,7 +247,7 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
* access execption if it is not) first.
*/
if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
} else {
s390_cpu_virt_mem_handle_exc(cpu, ra);
}
@ -299,13 +299,13 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
uint8_t ar;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return -EIO;
}
trace_ioinst_sch_id("tsch", cssid, ssid, schid);
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return -EIO;
}
@ -613,7 +613,7 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra)
addr = env->regs[reg];
/* Page boundary? */
if (addr & 0xfff) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return;
}
/*
@ -629,7 +629,7 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra)
len = be16_to_cpu(req->len);
/* Length field valid? */
if ((len < 16) || (len > 4088) || (len & 7)) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return;
}
memset((char *)req + len, 0, TARGET_PAGE_SIZE - len);
@ -678,7 +678,7 @@ void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
trace_ioinst("schm");
if (SCHM_REG1_RES(reg1)) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return;
}
@ -687,7 +687,7 @@ void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
dct = SCHM_REG1_DCT(reg1);
if (update && (reg2 & 0x000000000000001f)) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return;
}
@ -700,7 +700,7 @@ void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
SubchDev *sch;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra);
s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
return;
}
trace_ioinst_sch_id("rsch", cssid, ssid, schid);
@ -724,7 +724,7 @@ void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
CPUS390XState *env = &cpu->env;
if (RCHP_REG1_RES(reg1)) {
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return;
}
@ -747,7 +747,7 @@ void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
break;
default:
/* Invalid channel subsystem. */
s390_program_interrupt(env, PGM_OPERAND, 4, ra);
s390_program_interrupt(env, PGM_OPERAND, ra);
return;
}
setcc(cpu, cc);
@ -758,6 +758,6 @@ void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
{
/* We do not provide address limit checking, so let's suppress it. */
if (SAL_REG1_INVALID(reg1) || reg1 & 0x000000000000ffff) {
s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra);
s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
}
}

View File

@ -21,6 +21,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internal.h"
#include "tcg_s390x.h"
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
@ -71,7 +72,7 @@ static inline void check_alignment(CPUS390XState *env, uint64_t v,
int wordsize, uintptr_t ra)
{
if (v % wordsize) {
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
}
@ -730,7 +731,7 @@ void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
/* Bits 32-55 must contain all 0. */
if (env->regs[0] & 0xffffff00u) {
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
str = get_address(env, r2);
@ -767,7 +768,7 @@ void HELPER(srstu)(CPUS390XState *env, uint32_t r1, uint32_t r2)
/* Bits 32-47 of R0 must be zero. */
if (env->regs[0] & 0xffff0000u) {
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
str = get_address(env, r2);
@ -846,7 +847,7 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
S390Access srca, desta;
if ((f && s) || extract64(r0, 12, 4)) {
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, GETPC());
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
}
r1 = wrap_address(env, r1 & TARGET_PAGE_MASK);
@ -879,7 +880,7 @@ uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
int i;
if (env->regs[0] & 0xffffff00ull) {
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
/*
@ -911,8 +912,7 @@ void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
int i;
if (a2 & 0x3) {
/* we either came here by lam or lamy, which have different lengths */
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
for (i = r1;; i = (i + 1) % 16) {
@ -932,7 +932,7 @@ void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
int i;
if (a2 & 0x3) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
for (i = r1;; i = (i + 1) % 16) {
@ -1015,6 +1015,7 @@ uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
uint64_t src = get_address(env, r2);
uint8_t pad = env->regs[r2 + 1] >> 24;
CPUState *cs = env_cpu(env);
S390Access srca, desta;
uint32_t cc, cur_len;
@ -1065,7 +1066,15 @@ uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen);
set_address_zero(env, r1, dest);
/* TODO: Deliver interrupts. */
/*
* MVCL is interruptible. Return to the main loop if requested after
* writing back all state to registers. If no interrupt will get
* injected, we'll end up back in this handler and continue processing
* the remaining parts.
*/
if (destlen && unlikely(cpu_loop_exit_requested(cs))) {
cpu_loop_exit_restore(cs, ra);
}
}
return cc;
}
@ -1888,8 +1897,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
return cc;
spec_exception:
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
g_assert_not_reached();
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
@ -1912,7 +1920,7 @@ void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
uint32_t i;
if (src & 0x7) {
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
for (i = r1;; i = (i + 1) % 16) {
@ -1945,7 +1953,7 @@ void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
uint32_t i;
if (src & 0x3) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
for (i = r1;; i = (i + 1) % 16) {
@ -1976,7 +1984,7 @@ void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
uint32_t i;
if (dest & 0x7) {
s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
for (i = r1;; i = (i + 1) % 16) {
@ -1996,7 +2004,7 @@ void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
uint32_t i;
if (dest & 0x3) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
for (i = r1;; i = (i + 1) % 16) {
@ -2168,7 +2176,7 @@ uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) ||
psw_as == AS_HOME || psw_as == AS_ACCREG) {
s390_program_interrupt(env, PGM_SPECIAL_OP, ILEN_AUTO, ra);
s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
}
l = wrap_length32(env, l);
@ -2199,7 +2207,7 @@ uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) ||
psw_as == AS_HOME || psw_as == AS_ACCREG) {
s390_program_interrupt(env, PGM_SPECIAL_OP, ILEN_AUTO, ra);
s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
}
l = wrap_length32(env, l);
@ -2226,7 +2234,7 @@ void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
uint16_t entries, i, index = 0;
if (r2 & 0xff000) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
if (!(r2 & 0x800)) {
@ -2252,9 +2260,9 @@ void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
/* addresses are not wrapped in 24/31bit mode but table index is */
raddr = table + ((index + i) & 0x7ff) * sizeof(entry);
entry = cpu_ldq_real_ra(env, raddr, ra);
if (!(entry & REGION_ENTRY_INV)) {
if (!(entry & REGION_ENTRY_I)) {
/* we are allowed to not store if already invalid */
entry |= REGION_ENTRY_INV;
entry |= REGION_ENTRY_I;
cpu_stq_real_ra(env, raddr, entry, ra);
}
}
@ -2279,17 +2287,17 @@ void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
/* Compute the page table entry address */
pte_addr = (pto & SEGMENT_ENTRY_ORIGIN);
pte_addr += (vaddr & VADDR_PX) >> 9;
pte_addr += VADDR_PAGE_TX(vaddr) * 8;
/* Mark the page table entry as invalid */
pte = cpu_ldq_real_ra(env, pte_addr, ra);
pte |= PAGE_INVALID;
pte |= PAGE_ENTRY_I;
cpu_stq_real_ra(env, pte_addr, pte, ra);
/* XXX we exploit the fact that Linux passes the exact virtual
address here - it's not obliged to! */
if (m4 & 1) {
if (vaddr & ~VADDR_PX) {
if (vaddr & ~VADDR_PAGE_TX_MASK) {
tlb_flush_page(cs, page);
/* XXX 31-bit hack */
tlb_flush_page(cs, page ^ 0x80000000);
@ -2298,7 +2306,7 @@ void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
tlb_flush(cs);
}
} else {
if (vaddr & ~VADDR_PX) {
if (vaddr & ~VADDR_PAGE_TX_MASK) {
tlb_flush_page_all_cpus_synced(cs, page);
/* XXX 31-bit hack */
tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000);
@ -2362,27 +2370,23 @@ void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
/* load real address */
uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
{
CPUState *cs = env_cpu(env);
uint32_t cc = 0;
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
uint64_t ret;
int old_exc, flags;
uint64_t ret, tec;
int flags, exc, cc;
/* XXX incomplete - has more corner cases */
if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
s390_program_interrupt(env, PGM_SPECIAL_OP, 2, GETPC());
tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, GETPC());
}
old_exc = cs->exception_index;
if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
exc = mmu_translate(env, addr, 0, asc, &ret, &flags, &tec);
if (exc) {
cc = 3;
}
if (cs->exception_index == EXCP_PGM) {
ret = env->int_pgm_code | 0x80000000;
ret = exc | 0x80000000;
} else {
cc = 0;
ret |= addr & ~TARGET_PAGE_MASK;
}
cs->exception_index = old_exc;
env->cc_op = cc;
return ret;
@ -2539,7 +2543,7 @@ uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
__func__, dest, src, len);
if (!(env->psw.mask & PSW_MASK_DAT)) {
s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
}
/* OAC (operand access control) for the first operand -> dest */
@ -2570,14 +2574,14 @@ uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
}
if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) {
s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
}
if (!(env->cregs[0] & CR0_SECONDARY) &&
(dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) {
s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
}
if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) {
s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
tcg_s390_program_interrupt(env, PGM_PRIVILEGED, ra);
}
len = wrap_length32(env, len);
@ -2591,7 +2595,7 @@ uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
(env->psw.mask & PSW_MASK_PSTATE)) {
qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n",
__func__);
s390_program_interrupt(env, PGM_ADDRESSING, 6, ra);
tcg_s390_program_interrupt(env, PGM_ADDRESSING, ra);
}
/* FIXME: Access using correct keys and AR-mode */

View File

@ -106,7 +106,7 @@ uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
int r = sclp_service_call(env, r1, r2);
qemu_mutex_unlock_iothread();
if (r < 0) {
s390_program_interrupt(env, -r, 4, GETPC());
tcg_s390_program_interrupt(env, -r, GETPC());
}
return r;
}
@ -143,7 +143,7 @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
}
if (r) {
s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, GETPC());
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
}
}
@ -222,7 +222,7 @@ void HELPER(sckpf)(CPUS390XState *env, uint64_t r0)
uint32_t val = r0;
if (val & 0xffff0000) {
s390_program_interrupt(env, PGM_SPECIFICATION, 2, GETPC());
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
}
env->todpr = val;
}
@ -266,7 +266,7 @@ uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1)
}
if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) {
@ -276,7 +276,7 @@ uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1)
}
if (a0 & ~TARGET_PAGE_MASK) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
/* count the cpus and split them into configured and reserved ones */
@ -509,7 +509,7 @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
LowCore *lowcore;
if (addr & 0x3) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
qemu_mutex_lock_iothread();
@ -573,17 +573,8 @@ void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
#ifndef CONFIG_USER_ONLY
void HELPER(per_check_exception)(CPUS390XState *env)
{
uint32_t ilen;
if (env->per_perc_atmid) {
/*
* FIXME: ILEN_AUTO is most probably the right thing to use. ilen
* always has to match the instruction referenced in the PSW. E.g.
* if a PER interrupt is triggered via EXECUTE, we have to use ilen
* of EXECUTE, while per_address contains the target of EXECUTE.
*/
ilen = get_ilen(cpu_ldub_code(env, env->per_address));
s390_program_interrupt(env, PGM_PER, ilen, GETPC());
tcg_s390_program_interrupt(env, PGM_PER, GETPC());
}
}
@ -673,7 +664,7 @@ uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
int i;
if (addr & 0x7) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
prepare_stfl();
@ -746,7 +737,7 @@ void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3)
qemu_mutex_unlock_iothread();
/* css_do_sic() may actually return a PGM_xxx value to inject */
if (r) {
s390_program_interrupt(env, -r, 4, GETPC());
tcg_s390_program_interrupt(env, -r, GETPC());
}
}

View File

@ -28,37 +28,12 @@
#include "hw/hw.h"
#include "hw/s390x/storage-keys.h"
/* #define DEBUG_S390 */
/* #define DEBUG_S390_PTE */
/* #define DEBUG_S390_STDOUT */
#ifdef DEBUG_S390
#ifdef DEBUG_S390_STDOUT
#define DPRINTF(fmt, ...) \
do { fprintf(stderr, fmt, ## __VA_ARGS__); \
if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
#endif
#else
#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
#ifdef DEBUG_S390_PTE
#define PTE_DPRINTF DPRINTF
#else
#define PTE_DPRINTF(fmt, ...) \
do { } while (0)
#endif
/* Fetch/store bits in the translation exception code: */
#define FS_READ 0x800
#define FS_WRITE 0x400
static void trigger_access_exception(CPUS390XState *env, uint32_t type,
uint32_t ilen, uint64_t tec)
uint64_t tec)
{
S390CPU *cpu = env_archcpu(env);
@ -69,48 +44,10 @@ static void trigger_access_exception(CPUS390XState *env, uint32_t type,
if (type != PGM_ADDRESSING) {
stq_phys(cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec);
}
trigger_pgm_exception(env, type, ilen);
trigger_pgm_exception(env, type);
}
}
static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, int rw, bool exc)
{
uint64_t tec;
tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | 4 | asc >> 46;
DPRINTF("%s: trans_exc_code=%016" PRIx64 "\n", __func__, tec);
if (!exc) {
return;
}
trigger_access_exception(env, PGM_PROTECTION, ILEN_AUTO, tec);
}
static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
uint32_t type, uint64_t asc, int rw, bool exc)
{
int ilen = ILEN_AUTO;
uint64_t tec;
tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | asc >> 46;
DPRINTF("%s: trans_exc_code=%016" PRIx64 "\n", __func__, tec);
if (!exc) {
return;
}
/* Code accesses have an undefined ilc. */
if (rw == MMU_INST_FETCH) {
ilen = 2;
}
trigger_access_exception(env, type, ilen, tec);
}
/* check whether the address would be proteted by Low-Address Protection */
static bool is_low_address(uint64_t addr)
{
@ -156,122 +93,40 @@ target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr)
return raddr;
}
/* Decode page table entry (normal 4KB page) */
static int mmu_translate_pte(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t pt_entry,
target_ulong *raddr, int *flags, int rw, bool exc)
{
if (pt_entry & PAGE_INVALID) {
DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, pt_entry);
trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw, exc);
return -1;
}
if (pt_entry & PAGE_RES0) {
trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
return -1;
}
if (pt_entry & PAGE_RO) {
*flags &= ~PAGE_WRITE;
}
*raddr = pt_entry & ASCE_ORIGIN;
PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, pt_entry);
return 0;
}
/* Decode segment table entry */
static int mmu_translate_segment(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t st_entry,
target_ulong *raddr, int *flags, int rw,
bool exc)
static inline bool read_table_entry(CPUS390XState *env, hwaddr gaddr,
uint64_t *entry)
{
CPUState *cs = env_cpu(env);
uint64_t origin, offs, pt_entry;
if (st_entry & SEGMENT_ENTRY_RO) {
*flags &= ~PAGE_WRITE;
/*
* According to the PoP, these table addresses are "unpredictably real
* or absolute". Also, "it is unpredictable whether the address wraps
* or an addressing exception is recognized".
*
* We treat them as absolute addresses and don't wrap them.
*/
if (unlikely(address_space_read(cs->as, gaddr, MEMTXATTRS_UNSPECIFIED,
(uint8_t *)entry, sizeof(*entry)) !=
MEMTX_OK)) {
return false;
}
if ((st_entry & SEGMENT_ENTRY_FC) && (env->cregs[0] & CR0_EDAT)) {
/* Decode EDAT1 segment frame absolute address (1MB page) */
*raddr = (st_entry & 0xfffffffffff00000ULL) | (vaddr & 0xfffff);
PTE_DPRINTF("%s: SEG=0x%" PRIx64 "\n", __func__, st_entry);
return 0;
}
/* Look up 4KB page entry */
origin = st_entry & SEGMENT_ENTRY_ORIGIN;
offs = (vaddr & VADDR_PX) >> 9;
pt_entry = ldq_phys(cs->as, origin + offs);
PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
__func__, origin, offs, pt_entry);
return mmu_translate_pte(env, vaddr, asc, pt_entry, raddr, flags, rw, exc);
}
/* Decode region table entries */
static int mmu_translate_region(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t entry, int level,
target_ulong *raddr, int *flags, int rw,
bool exc)
{
CPUState *cs = env_cpu(env);
uint64_t origin, offs, new_entry;
const int pchks[4] = {
PGM_SEGMENT_TRANS, PGM_REG_THIRD_TRANS,
PGM_REG_SEC_TRANS, PGM_REG_FIRST_TRANS
};
PTE_DPRINTF("%s: 0x%" PRIx64 "\n", __func__, entry);
origin = entry & REGION_ENTRY_ORIGIN;
offs = (vaddr >> (17 + 11 * level / 4)) & 0x3ff8;
new_entry = ldq_phys(cs->as, origin + offs);
PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
__func__, origin, offs, new_entry);
if ((new_entry & REGION_ENTRY_INV) != 0) {
DPRINTF("%s: invalid region\n", __func__);
trigger_page_fault(env, vaddr, pchks[level / 4], asc, rw, exc);
return -1;
}
if ((new_entry & REGION_ENTRY_TYPE_MASK) != level) {
trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
return -1;
}
if (level == ASCE_TYPE_SEGMENT) {
return mmu_translate_segment(env, vaddr, asc, new_entry, raddr, flags,
rw, exc);
}
/* Check region table offset and length */
offs = (vaddr >> (28 + 11 * (level - 4) / 4)) & 3;
if (offs < ((new_entry & REGION_ENTRY_TF) >> 6)
|| offs > (new_entry & REGION_ENTRY_LENGTH)) {
DPRINTF("%s: invalid offset or len (%lx)\n", __func__, new_entry);
trigger_page_fault(env, vaddr, pchks[level / 4 - 1], asc, rw, exc);
return -1;
}
if ((env->cregs[0] & CR0_EDAT) && (new_entry & REGION_ENTRY_RO)) {
*flags &= ~PAGE_WRITE;
}
/* yet another region */
return mmu_translate_region(env, vaddr, asc, new_entry, level - 4,
raddr, flags, rw, exc);
*entry = be64_to_cpu(*entry);
return true;
}
static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t asce, target_ulong *raddr,
int *flags, int rw, bool exc)
int *flags, int rw)
{
int level;
int r;
const bool edat1 = (env->cregs[0] & CR0_EDAT) &&
s390_has_feat(S390_FEAT_EDAT);
const bool edat2 = edat1 && s390_has_feat(S390_FEAT_EDAT_2);
const bool iep = (env->cregs[0] & CR0_IEP) &&
s390_has_feat(S390_FEAT_INSTRUCTION_EXEC_PROT);
const int asce_tl = asce & ASCE_TABLE_LENGTH;
const int asce_p = asce & ASCE_PRIVATE_SPACE;
hwaddr gaddr = asce & ASCE_ORIGIN;
uint64_t entry;
if (asce & ASCE_REAL_SPACE) {
/* direct mapping */
@ -279,60 +134,158 @@ static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
return 0;
}
level = asce & ASCE_TYPE_MASK;
switch (level) {
switch (asce & ASCE_TYPE_MASK) {
case ASCE_TYPE_REGION1:
if ((vaddr >> 62) > (asce & ASCE_TABLE_LENGTH)) {
trigger_page_fault(env, vaddr, PGM_REG_FIRST_TRANS, asc, rw, exc);
return -1;
if (VADDR_REGION1_TL(vaddr) > asce_tl) {
return PGM_REG_FIRST_TRANS;
}
gaddr += VADDR_REGION1_TX(vaddr) * 8;
break;
case ASCE_TYPE_REGION2:
if (vaddr & 0xffe0000000000000ULL) {
DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
" 0xffe0000000000000ULL\n", __func__, vaddr);
trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
return -1;
if (VADDR_REGION1_TX(vaddr)) {
return PGM_ASCE_TYPE;
}
if ((vaddr >> 51 & 3) > (asce & ASCE_TABLE_LENGTH)) {
trigger_page_fault(env, vaddr, PGM_REG_SEC_TRANS, asc, rw, exc);
return -1;
if (VADDR_REGION2_TL(vaddr) > asce_tl) {
return PGM_REG_SEC_TRANS;
}
gaddr += VADDR_REGION2_TX(vaddr) * 8;
break;
case ASCE_TYPE_REGION3:
if (vaddr & 0xfffffc0000000000ULL) {
DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
" 0xfffffc0000000000ULL\n", __func__, vaddr);
trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
return -1;
if (VADDR_REGION1_TX(vaddr) || VADDR_REGION2_TX(vaddr)) {
return PGM_ASCE_TYPE;
}
if ((vaddr >> 40 & 3) > (asce & ASCE_TABLE_LENGTH)) {
trigger_page_fault(env, vaddr, PGM_REG_THIRD_TRANS, asc, rw, exc);
return -1;
if (VADDR_REGION3_TL(vaddr) > asce_tl) {
return PGM_REG_THIRD_TRANS;
}
gaddr += VADDR_REGION3_TX(vaddr) * 8;
break;
case ASCE_TYPE_SEGMENT:
if (vaddr & 0xffffffff80000000ULL) {
DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
" 0xffffffff80000000ULL\n", __func__, vaddr);
trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
return -1;
if (VADDR_REGION1_TX(vaddr) || VADDR_REGION2_TX(vaddr) ||
VADDR_REGION3_TX(vaddr)) {
return PGM_ASCE_TYPE;
}
if ((vaddr >> 29 & 3) > (asce & ASCE_TABLE_LENGTH)) {
trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw, exc);
return -1;
if (VADDR_SEGMENT_TL(vaddr) > asce_tl) {
return PGM_SEGMENT_TRANS;
}
gaddr += VADDR_SEGMENT_TX(vaddr) * 8;
break;
}
r = mmu_translate_region(env, vaddr, asc, asce, level, raddr, flags, rw,
exc);
if (!r && rw == MMU_DATA_STORE && !(*flags & PAGE_WRITE)) {
trigger_prot_fault(env, vaddr, asc, rw, exc);
return -1;
switch (asce & ASCE_TYPE_MASK) {
case ASCE_TYPE_REGION1:
if (!read_table_entry(env, gaddr, &entry)) {
return PGM_ADDRESSING;
}
if (entry & REGION_ENTRY_I) {
return PGM_REG_FIRST_TRANS;
}
if ((entry & REGION_ENTRY_TT) != REGION_ENTRY_TT_REGION1) {
return PGM_TRANS_SPEC;
}
if (VADDR_REGION2_TL(vaddr) < (entry & REGION_ENTRY_TF) >> 6 ||
VADDR_REGION2_TL(vaddr) > (entry & REGION_ENTRY_TL)) {
return PGM_REG_SEC_TRANS;
}
if (edat1 && (entry & REGION_ENTRY_P)) {
*flags &= ~PAGE_WRITE;
}
gaddr = (entry & REGION_ENTRY_ORIGIN) + VADDR_REGION2_TX(vaddr) * 8;
/* fall through */
case ASCE_TYPE_REGION2:
if (!read_table_entry(env, gaddr, &entry)) {
return PGM_ADDRESSING;
}
if (entry & REGION_ENTRY_I) {
return PGM_REG_SEC_TRANS;
}
if ((entry & REGION_ENTRY_TT) != REGION_ENTRY_TT_REGION2) {
return PGM_TRANS_SPEC;
}
if (VADDR_REGION3_TL(vaddr) < (entry & REGION_ENTRY_TF) >> 6 ||
VADDR_REGION3_TL(vaddr) > (entry & REGION_ENTRY_TL)) {
return PGM_REG_THIRD_TRANS;
}
if (edat1 && (entry & REGION_ENTRY_P)) {
*flags &= ~PAGE_WRITE;
}
gaddr = (entry & REGION_ENTRY_ORIGIN) + VADDR_REGION3_TX(vaddr) * 8;
/* fall through */
case ASCE_TYPE_REGION3:
if (!read_table_entry(env, gaddr, &entry)) {
return PGM_ADDRESSING;
}
if (entry & REGION_ENTRY_I) {
return PGM_REG_THIRD_TRANS;
}
if ((entry & REGION_ENTRY_TT) != REGION_ENTRY_TT_REGION3) {
return PGM_TRANS_SPEC;
}
if (edat2 && (entry & REGION3_ENTRY_CR) && asce_p) {
return PGM_TRANS_SPEC;
}
if (edat1 && (entry & REGION_ENTRY_P)) {
*flags &= ~PAGE_WRITE;
}
if (edat2 && (entry & REGION3_ENTRY_FC)) {
if (iep && (entry & REGION3_ENTRY_IEP)) {
*flags &= ~PAGE_EXEC;
}
*raddr = (entry & REGION3_ENTRY_RFAA) |
(vaddr & ~REGION3_ENTRY_RFAA);
return 0;
}
if (VADDR_SEGMENT_TL(vaddr) < (entry & REGION_ENTRY_TF) >> 6 ||
VADDR_SEGMENT_TL(vaddr) > (entry & REGION_ENTRY_TL)) {
return PGM_SEGMENT_TRANS;
}
gaddr = (entry & REGION_ENTRY_ORIGIN) + VADDR_SEGMENT_TX(vaddr) * 8;
/* fall through */
case ASCE_TYPE_SEGMENT:
if (!read_table_entry(env, gaddr, &entry)) {
return PGM_ADDRESSING;
}
if (entry & SEGMENT_ENTRY_I) {
return PGM_SEGMENT_TRANS;
}
if ((entry & SEGMENT_ENTRY_TT) != SEGMENT_ENTRY_TT_SEGMENT) {
return PGM_TRANS_SPEC;
}
if ((entry & SEGMENT_ENTRY_CS) && asce_p) {
return PGM_TRANS_SPEC;
}
if (entry & SEGMENT_ENTRY_P) {
*flags &= ~PAGE_WRITE;
}
if (edat1 && (entry & SEGMENT_ENTRY_FC)) {
if (iep && (entry & SEGMENT_ENTRY_IEP)) {
*flags &= ~PAGE_EXEC;
}
*raddr = (entry & SEGMENT_ENTRY_SFAA) |
(vaddr & ~SEGMENT_ENTRY_SFAA);
return 0;
}
gaddr = (entry & SEGMENT_ENTRY_ORIGIN) + VADDR_PAGE_TX(vaddr) * 8;
break;
}
return r;
if (!read_table_entry(env, gaddr, &entry)) {
return PGM_ADDRESSING;
}
if (entry & PAGE_ENTRY_I) {
return PGM_PAGE_TRANS;
}
if (entry & PAGE_ENTRY_0) {
return PGM_TRANS_SPEC;
}
if (entry & PAGE_ENTRY_P) {
*flags &= ~PAGE_WRITE;
}
if (iep && (entry & PAGE_ENTRY_IEP)) {
*flags &= ~PAGE_EXEC;
}
*raddr = entry & TARGET_PAGE_MASK;
return 0;
}
static void mmu_handle_skey(target_ulong addr, int rw, int *flags)
@ -412,16 +365,18 @@ static void mmu_handle_skey(target_ulong addr, int rw, int *flags)
* @param raddr the translated address is stored to this pointer
* @param flags the PAGE_READ/WRITE/EXEC flags are stored to this pointer
* @param exc true = inject a program check if a fault occurred
* @return 0 if the translation was successful, -1 if a fault occurred
* @return 0 = success, != 0, the exception to raise
*/
int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
target_ulong *raddr, int *flags, bool exc)
target_ulong *raddr, int *flags, uint64_t *tec)
{
uint64_t asce;
int r;
*tec = (vaddr & TARGET_PAGE_MASK) | (asc >> 46) |
(rw == MMU_DATA_STORE ? FS_WRITE : FS_READ);
*flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
if (is_low_address(vaddr & TARGET_PAGE_MASK) && lowprot_enabled(env, asc)) {
/*
* If any part of this page is currently protected, make sure the
@ -433,10 +388,9 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
*/
*flags |= PAGE_WRITE_INV;
if (is_low_address(vaddr) && rw == MMU_DATA_STORE) {
if (exc) {
trigger_access_exception(env, PGM_PROTECTION, ILEN_AUTO, 0);
}
return -EACCES;
/* LAP sets bit 56 */
*tec |= 0x80;
return PGM_PROTECTION;
}
}
@ -449,15 +403,12 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
switch (asc) {
case PSW_ASC_PRIMARY:
PTE_DPRINTF("%s: asc=primary\n", __func__);
asce = env->cregs[1];
break;
case PSW_ASC_HOME:
PTE_DPRINTF("%s: asc=home\n", __func__);
asce = env->cregs[13];
break;
case PSW_ASC_SECONDARY:
PTE_DPRINTF("%s: asc=secondary\n", __func__);
asce = env->cregs[7];
break;
case PSW_ASC_ACCREG:
@ -467,11 +418,25 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
}
/* perform the DAT translation */
r = mmu_translate_asce(env, vaddr, asc, asce, raddr, flags, rw, exc);
if (r) {
r = mmu_translate_asce(env, vaddr, asc, asce, raddr, flags, rw);
if (unlikely(r)) {
return r;
}
/* check for DAT protection */
if (unlikely(rw == MMU_DATA_STORE && !(*flags & PAGE_WRITE))) {
/* DAT sets bit 61 only */
*tec |= 0x4;
return PGM_PROTECTION;
}
/* check for Instruction-Execution-Protection */
if (unlikely(rw == MMU_INST_FETCH && !(*flags & PAGE_EXEC))) {
/* IEP sets bit 56 and 61 */
*tec |= 0x84;
return PGM_PROTECTION;
}
nodat:
/* Convert real address -> absolute address */
*raddr = mmu_real2abs(env, *raddr);
@ -486,22 +451,22 @@ nodat:
* the MEMOP interface.
*/
static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
target_ulong *pages, bool is_write)
target_ulong *pages, bool is_write, uint64_t *tec)
{
uint64_t asc = cpu->env.psw.mask & PSW_MASK_ASC;
CPUS390XState *env = &cpu->env;
int ret, i, pflags;
for (i = 0; i < nr_pages; i++) {
ret = mmu_translate(env, addr, is_write, asc, &pages[i], &pflags, true);
ret = mmu_translate(env, addr, is_write, asc, &pages[i], &pflags, tec);
if (ret) {
return ret;
}
if (!address_space_access_valid(&address_space_memory, pages[i],
TARGET_PAGE_SIZE, is_write,
MEMTXATTRS_UNSPECIFIED)) {
trigger_access_exception(env, PGM_ADDRESSING, ILEN_AUTO, 0);
return -EFAULT;
*tec = 0; /* unused */
return PGM_ADDRESSING;
}
addr += TARGET_PAGE_SIZE;
}
@ -529,6 +494,7 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
{
int currlen, nr_pages, i;
target_ulong *pages;
uint64_t tec;
int ret;
if (kvm_enabled()) {
@ -542,8 +508,10 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
+ 1;
pages = g_malloc(nr_pages * sizeof(*pages));
ret = translate_pages(cpu, laddr, nr_pages, pages, is_write);
if (ret == 0 && hostbuf != NULL) {
ret = translate_pages(cpu, laddr, nr_pages, pages, is_write, &tec);
if (ret) {
trigger_access_exception(&cpu->env, ret, tec);
} else if (hostbuf != NULL) {
/* Copy data by stepping through the area page by page */
for (i = 0; i < nr_pages; i++) {
currlen = MIN(len, TARGET_PAGE_SIZE - (laddr % TARGET_PAGE_SIZE));
@ -575,10 +543,10 @@ void s390_cpu_virt_mem_handle_exc(S390CPU *cpu, uintptr_t ra)
* @param rw 0 = read, 1 = write, 2 = code fetch
* @param addr the translated address is stored to this pointer
* @param flags the PAGE_READ/WRITE/EXEC flags are stored to this pointer
* @return 0 if the translation was successful, < 0 if a fault occurred
* @return 0 = success, != 0, the exception to raise
*/
int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw,
target_ulong *addr, int *flags)
target_ulong *addr, int *flags, uint64_t *tec)
{
const bool lowprot_enabled = env->cregs[0] & CR0_LOWPROT;
@ -587,8 +555,11 @@ int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw,
/* see comment in mmu_translate() how this works */
*flags |= PAGE_WRITE_INV;
if (is_low_address(raddr) && rw == MMU_DATA_STORE) {
trigger_access_exception(env, PGM_PROTECTION, ILEN_AUTO, 0);
return -EACCES;
/* LAP sets bit 56 */
*tec = (raddr & TARGET_PAGE_MASK)
| (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ)
| 0x80;
return PGM_PROTECTION;
}
}

View File

@ -18,8 +18,8 @@
void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque)
{
}
void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code,
int ilen, uintptr_t ra)
void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env,
uint32_t code, uintptr_t ra)
{
g_assert_not_reached();
}

View File

@ -14,8 +14,8 @@
#define TCG_S390X_H
void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque);
void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code,
int ilen, uintptr_t ra);
void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env,
uint32_t code, uintptr_t ra);
void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
uintptr_t ra);
void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,

View File

@ -6309,6 +6309,9 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
/* Search for the insn in the table. */
insn = extract_insn(env, s, &f);
/* Emit insn_start now that we know the ILEN. */
tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen);
/* Not found means unimplemented/illegal opcode. */
if (insn == NULL) {
qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
@ -6463,9 +6466,6 @@ static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
}
static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
@ -6473,6 +6473,14 @@ static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
/*
* Emit an insn_start to accompany the breakpoint exception.
* The ILEN value is a dummy, since this does not result in
* an s390x exception, but an internal qemu exception which
* brings us back to interact with the gdbstub.
*/
tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2);
dc->base.is_jmp = DISAS_PC_STALE;
dc->do_debug = true;
/* The address covered by the breakpoint must be included in
@ -6567,8 +6575,14 @@ void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
target_ulong *data)
{
int cc_op = data[1];
env->psw.addr = data[0];
/* Update the CC opcode if it is not already up-to-date. */
if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
env->cc_op = cc_op;
}
/* Record ILEN. */
env->int_pgm_ilen = data[2];
}