2019-05-29 22:12:40 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2009-10-30 13:47:14 +08:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright SUSE Linux Products GmbH 2009
|
|
|
|
*
|
|
|
|
* Authors: Alexander Graf <agraf@suse.de>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm/kvm_ppc.h>
|
|
|
|
#include <asm/disassemble.h>
|
|
|
|
#include <asm/kvm_book3s.h>
|
|
|
|
#include <asm/reg.h>
|
2012-04-02 01:35:53 +08:00
|
|
|
#include <asm/switch_to.h>
|
2012-11-05 02:15:43 +08:00
|
|
|
#include <asm/time.h>
|
2018-05-23 15:02:01 +08:00
|
|
|
#include <asm/tm.h>
|
2015-05-22 15:25:02 +08:00
|
|
|
#include "book3s.h"
|
2018-05-23 15:02:00 +08:00
|
|
|
#include <asm/asm-prototypes.h>
|
2009-10-30 13:47:14 +08:00
|
|
|
|
|
|
|
#define OP_19_XOP_RFID 18
|
|
|
|
#define OP_19_XOP_RFI 50
|
|
|
|
|
|
|
|
#define OP_31_XOP_MFMSR 83
|
|
|
|
#define OP_31_XOP_MTMSR 146
|
|
|
|
#define OP_31_XOP_MTMSRD 178
|
2010-02-19 18:00:37 +08:00
|
|
|
#define OP_31_XOP_MTSR 210
|
2009-10-30 13:47:14 +08:00
|
|
|
#define OP_31_XOP_MTSRIN 242
|
|
|
|
#define OP_31_XOP_TLBIEL 274
|
2012-12-15 06:42:05 +08:00
|
|
|
/* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
|
|
|
|
#define OP_31_XOP_FAKE_SC1 308
|
2009-10-30 13:47:14 +08:00
|
|
|
#define OP_31_XOP_SLBMTE 402
|
|
|
|
#define OP_31_XOP_SLBIE 434
|
|
|
|
#define OP_31_XOP_SLBIA 498
|
2010-03-25 04:48:24 +08:00
|
|
|
#define OP_31_XOP_MFSR 595
|
2009-10-30 13:47:14 +08:00
|
|
|
#define OP_31_XOP_MFSRIN 659
|
2010-03-25 04:48:33 +08:00
|
|
|
#define OP_31_XOP_DCBA 758
|
2009-10-30 13:47:14 +08:00
|
|
|
#define OP_31_XOP_SLBMFEV 851
|
|
|
|
#define OP_31_XOP_EIOIO 854
|
|
|
|
#define OP_31_XOP_SLBMFEE 915
|
2019-02-04 16:06:18 +08:00
|
|
|
#define OP_31_XOP_SLBFEE 979
|
2009-10-30 13:47:14 +08:00
|
|
|
|
2018-05-23 15:02:01 +08:00
|
|
|
#define OP_31_XOP_TBEGIN 654
|
2018-05-23 15:02:05 +08:00
|
|
|
#define OP_31_XOP_TABORT 910
|
2018-05-23 15:02:01 +08:00
|
|
|
|
2018-05-23 15:02:03 +08:00
|
|
|
#define OP_31_XOP_TRECLAIM 942
|
2018-05-23 15:02:04 +08:00
|
|
|
#define OP_31_XOP_TRCHKPT 1006
|
2018-05-23 15:02:03 +08:00
|
|
|
|
2009-10-30 13:47:14 +08:00
|
|
|
/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
|
|
|
|
#define OP_31_XOP_DCBZ 1010
|
|
|
|
|
2010-03-25 04:48:28 +08:00
|
|
|
#define OP_LFS 48
|
|
|
|
#define OP_LFD 50
|
|
|
|
#define OP_STFS 52
|
|
|
|
#define OP_STFD 54
|
|
|
|
|
2010-02-19 18:00:33 +08:00
|
|
|
#define SPRN_GQR0 912
|
|
|
|
#define SPRN_GQR1 913
|
|
|
|
#define SPRN_GQR2 914
|
|
|
|
#define SPRN_GQR3 915
|
|
|
|
#define SPRN_GQR4 916
|
|
|
|
#define SPRN_GQR5 917
|
|
|
|
#define SPRN_GQR6 918
|
|
|
|
#define SPRN_GQR7 919
|
|
|
|
|
2011-08-08 22:07:16 +08:00
|
|
|
enum priv_level {
|
|
|
|
PRIV_PROBLEM = 0,
|
|
|
|
PRIV_SUPER = 1,
|
|
|
|
PRIV_HYPER = 2,
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
|
|
|
|
{
|
|
|
|
/* PAPR VMs only access supervisor SPRs */
|
|
|
|
if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Limit user space to its own small SPR set */
|
2014-04-24 19:46:24 +08:00
|
|
|
if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
|
2011-08-08 22:07:16 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-05-23 15:01:56 +08:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0],
|
|
|
|
sizeof(vcpu->arch.gpr_tm));
|
|
|
|
memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp,
|
|
|
|
sizeof(struct thread_fp_state));
|
|
|
|
memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr,
|
|
|
|
sizeof(struct thread_vr_state));
|
|
|
|
vcpu->arch.ppr_tm = vcpu->arch.ppr;
|
|
|
|
vcpu->arch.dscr_tm = vcpu->arch.dscr;
|
|
|
|
vcpu->arch.amr_tm = vcpu->arch.amr;
|
|
|
|
vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
|
|
|
|
vcpu->arch.tar_tm = vcpu->arch.tar;
|
|
|
|
vcpu->arch.lr_tm = vcpu->arch.regs.link;
|
2018-10-08 13:30:58 +08:00
|
|
|
vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
|
2018-05-23 15:01:56 +08:00
|
|
|
vcpu->arch.xer_tm = vcpu->arch.regs.xer;
|
|
|
|
vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0],
|
|
|
|
sizeof(vcpu->arch.regs.gpr));
|
|
|
|
memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm,
|
|
|
|
sizeof(struct thread_fp_state));
|
|
|
|
memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm,
|
|
|
|
sizeof(struct thread_vr_state));
|
|
|
|
vcpu->arch.ppr = vcpu->arch.ppr_tm;
|
|
|
|
vcpu->arch.dscr = vcpu->arch.dscr_tm;
|
|
|
|
vcpu->arch.amr = vcpu->arch.amr_tm;
|
|
|
|
vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
|
|
|
|
vcpu->arch.tar = vcpu->arch.tar_tm;
|
|
|
|
vcpu->arch.regs.link = vcpu->arch.lr_tm;
|
2018-10-08 13:30:58 +08:00
|
|
|
vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
|
2018-05-23 15:01:56 +08:00
|
|
|
vcpu->arch.regs.xer = vcpu->arch.xer_tm;
|
|
|
|
vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
|
|
|
|
}
|
|
|
|
|
2018-05-23 15:02:03 +08:00
|
|
|
static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
|
|
|
|
{
|
|
|
|
unsigned long guest_msr = kvmppc_get_msr(vcpu);
|
|
|
|
int fc_val = ra_val ? ra_val : 1;
|
2018-06-07 16:06:21 +08:00
|
|
|
uint64_t texasr;
|
2018-05-23 15:02:03 +08:00
|
|
|
|
|
|
|
/* CR0 = 0 | MSR[TS] | 0 */
|
2018-10-08 13:30:58 +08:00
|
|
|
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
|
2018-05-23 15:02:03 +08:00
|
|
|
(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
|
|
|
|
<< CR0_SHIFT);
|
|
|
|
|
|
|
|
preempt_disable();
|
2018-06-07 16:06:21 +08:00
|
|
|
tm_enable();
|
|
|
|
texasr = mfspr(SPRN_TEXASR);
|
2018-05-23 15:02:03 +08:00
|
|
|
kvmppc_save_tm_pr(vcpu);
|
|
|
|
kvmppc_copyfrom_vcpu_tm(vcpu);
|
|
|
|
|
|
|
|
/* failure recording depends on Failure Summary bit */
|
2018-06-07 16:06:21 +08:00
|
|
|
if (!(texasr & TEXASR_FS)) {
|
|
|
|
texasr &= ~TEXASR_FC;
|
|
|
|
texasr |= ((u64)fc_val << TEXASR_FC_LG) | TEXASR_FS;
|
2018-05-23 15:02:03 +08:00
|
|
|
|
2018-06-07 16:06:21 +08:00
|
|
|
texasr &= ~(TEXASR_PR | TEXASR_HV);
|
2018-05-23 15:02:03 +08:00
|
|
|
if (kvmppc_get_msr(vcpu) & MSR_PR)
|
2018-06-07 16:06:21 +08:00
|
|
|
texasr |= TEXASR_PR;
|
2018-05-23 15:02:03 +08:00
|
|
|
|
|
|
|
if (kvmppc_get_msr(vcpu) & MSR_HV)
|
2018-06-07 16:06:21 +08:00
|
|
|
texasr |= TEXASR_HV;
|
2018-05-23 15:02:03 +08:00
|
|
|
|
2018-06-07 16:06:21 +08:00
|
|
|
vcpu->arch.texasr = texasr;
|
2018-05-23 15:02:03 +08:00
|
|
|
vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
|
2018-06-07 16:06:21 +08:00
|
|
|
mtspr(SPRN_TEXASR, texasr);
|
2018-05-23 15:02:03 +08:00
|
|
|
mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
|
|
|
|
}
|
|
|
|
tm_disable();
|
|
|
|
/*
|
|
|
|
* treclaim need quit to non-transactional state.
|
|
|
|
*/
|
|
|
|
guest_msr &= ~(MSR_TS_MASK);
|
|
|
|
kvmppc_set_msr(vcpu, guest_msr);
|
|
|
|
preempt_enable();
|
2018-05-23 15:02:07 +08:00
|
|
|
|
|
|
|
if (vcpu->arch.shadow_fscr & FSCR_TAR)
|
|
|
|
mtspr(SPRN_TAR, vcpu->arch.tar);
|
2018-05-23 15:02:03 +08:00
|
|
|
}
|
2018-05-23 15:02:04 +08:00
|
|
|
|
|
|
|
static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
unsigned long guest_msr = kvmppc_get_msr(vcpu);
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
/*
|
|
|
|
* need flush FP/VEC/VSX to vcpu save area before
|
|
|
|
* copy.
|
|
|
|
*/
|
|
|
|
kvmppc_giveup_ext(vcpu, MSR_VSX);
|
2018-05-23 15:02:07 +08:00
|
|
|
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
|
2018-05-23 15:02:04 +08:00
|
|
|
kvmppc_copyto_vcpu_tm(vcpu);
|
|
|
|
kvmppc_save_tm_sprs(vcpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* as a result of trecheckpoint. set TS to suspended.
|
|
|
|
*/
|
|
|
|
guest_msr &= ~(MSR_TS_MASK);
|
|
|
|
guest_msr |= MSR_TS_S;
|
|
|
|
kvmppc_set_msr(vcpu, guest_msr);
|
|
|
|
kvmppc_restore_tm_pr(vcpu);
|
|
|
|
preempt_enable();
|
|
|
|
}
|
2018-05-23 15:02:05 +08:00
|
|
|
|
|
|
|
/* emulate tabort. at guest privilege state */
|
2018-05-23 15:02:06 +08:00
|
|
|
void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
|
2018-05-23 15:02:05 +08:00
|
|
|
{
|
|
|
|
/* currently we only emulate tabort. but no emulation of other
|
|
|
|
* tabort variants since there is no kernel usage of them at
|
|
|
|
* present.
|
|
|
|
*/
|
|
|
|
unsigned long guest_msr = kvmppc_get_msr(vcpu);
|
2018-06-08 13:40:03 +08:00
|
|
|
uint64_t org_texasr;
|
2018-05-23 15:02:05 +08:00
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
tm_enable();
|
2018-06-08 13:40:03 +08:00
|
|
|
org_texasr = mfspr(SPRN_TEXASR);
|
2018-05-23 15:02:05 +08:00
|
|
|
tm_abort(ra_val);
|
|
|
|
|
|
|
|
/* CR0 = 0 | MSR[TS] | 0 */
|
2018-10-08 13:30:58 +08:00
|
|
|
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
|
2018-05-23 15:02:05 +08:00
|
|
|
(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
|
|
|
|
<< CR0_SHIFT);
|
|
|
|
|
|
|
|
vcpu->arch.texasr = mfspr(SPRN_TEXASR);
|
|
|
|
/* failure recording depends on Failure Summary bit,
|
|
|
|
* and tabort will be treated as nops in non-transactional
|
|
|
|
* state.
|
|
|
|
*/
|
2018-06-08 13:40:03 +08:00
|
|
|
if (!(org_texasr & TEXASR_FS) &&
|
2018-05-23 15:02:05 +08:00
|
|
|
MSR_TM_ACTIVE(guest_msr)) {
|
|
|
|
vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV);
|
|
|
|
if (guest_msr & MSR_PR)
|
|
|
|
vcpu->arch.texasr |= TEXASR_PR;
|
|
|
|
|
|
|
|
if (guest_msr & MSR_HV)
|
|
|
|
vcpu->arch.texasr |= TEXASR_HV;
|
|
|
|
|
|
|
|
vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
|
|
|
|
}
|
|
|
|
tm_disable();
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
2018-05-23 15:01:56 +08:00
|
|
|
#endif
|
|
|
|
|
2020-04-27 12:35:11 +08:00
|
|
|
int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
|
2013-10-08 00:47:53 +08:00
|
|
|
unsigned int inst, int *advance)
|
2009-10-30 13:47:14 +08:00
|
|
|
{
|
|
|
|
int emulated = EMULATE_DONE;
|
2012-05-04 20:01:33 +08:00
|
|
|
int rt = get_rt(inst);
|
|
|
|
int rs = get_rs(inst);
|
|
|
|
int ra = get_ra(inst);
|
|
|
|
int rb = get_rb(inst);
|
2014-05-13 23:05:51 +08:00
|
|
|
u32 inst_sc = 0x44000002;
|
2009-10-30 13:47:14 +08:00
|
|
|
|
|
|
|
switch (get_op(inst)) {
|
2014-05-13 23:05:51 +08:00
|
|
|
case 0:
|
|
|
|
emulated = EMULATE_FAIL;
|
|
|
|
if ((kvmppc_get_msr(vcpu) & MSR_LE) &&
|
|
|
|
(inst == swab32(inst_sc))) {
|
|
|
|
/*
|
|
|
|
* This is the byte reversed syscall instruction of our
|
|
|
|
* hypercall handler. Early versions of LE Linux didn't
|
|
|
|
* swap the instructions correctly and ended up in
|
|
|
|
* illegal instructions.
|
|
|
|
* Just always fail hypercalls on these broken systems.
|
|
|
|
*/
|
|
|
|
kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED);
|
|
|
|
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
|
|
|
|
emulated = EMULATE_DONE;
|
|
|
|
}
|
|
|
|
break;
|
2009-10-30 13:47:14 +08:00
|
|
|
case 19:
|
|
|
|
switch (get_xop(inst)) {
|
|
|
|
case OP_19_XOP_RFID:
|
2018-05-23 15:01:54 +08:00
|
|
|
case OP_19_XOP_RFI: {
|
|
|
|
unsigned long srr1 = kvmppc_get_srr1(vcpu);
|
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
unsigned long cur_msr = kvmppc_get_msr(vcpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* add rules to fit in ISA specification regarding TM
|
|
|
|
* state transistion in TM disable/Suspended state,
|
|
|
|
* and target TM state is TM inactive(00) state. (the
|
|
|
|
* change should be suppressed).
|
|
|
|
*/
|
|
|
|
if (((cur_msr & MSR_TM) == 0) &&
|
|
|
|
((srr1 & MSR_TM) == 0) &&
|
|
|
|
MSR_TM_SUSPENDED(cur_msr) &&
|
|
|
|
!MSR_TM_ACTIVE(srr1))
|
|
|
|
srr1 |= MSR_TS_S;
|
|
|
|
#endif
|
2014-04-24 19:46:24 +08:00
|
|
|
kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
|
2018-05-23 15:01:54 +08:00
|
|
|
kvmppc_set_msr(vcpu, srr1);
|
2009-10-30 13:47:14 +08:00
|
|
|
*advance = 0;
|
|
|
|
break;
|
2018-05-23 15:01:54 +08:00
|
|
|
}
|
2009-10-30 13:47:14 +08:00
|
|
|
|
|
|
|
default:
|
|
|
|
emulated = EMULATE_FAIL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 31:
|
|
|
|
switch (get_xop(inst)) {
|
|
|
|
case OP_31_XOP_MFMSR:
|
2014-04-24 19:46:24 +08:00
|
|
|
kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case OP_31_XOP_MTMSRD:
|
|
|
|
{
|
2012-05-04 20:01:33 +08:00
|
|
|
ulong rs_val = kvmppc_get_gpr(vcpu, rs);
|
2009-10-30 13:47:14 +08:00
|
|
|
if (inst & 0x10000) {
|
2014-04-24 19:46:24 +08:00
|
|
|
ulong new_msr = kvmppc_get_msr(vcpu);
|
2012-05-04 20:01:33 +08:00
|
|
|
new_msr &= ~(MSR_RI | MSR_EE);
|
|
|
|
new_msr |= rs_val & (MSR_RI | MSR_EE);
|
2014-04-24 19:46:24 +08:00
|
|
|
kvmppc_set_msr_fast(vcpu, new_msr);
|
2009-10-30 13:47:14 +08:00
|
|
|
} else
|
2012-05-04 20:01:33 +08:00
|
|
|
kvmppc_set_msr(vcpu, rs_val);
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case OP_31_XOP_MTMSR:
|
2012-05-04 20:01:33 +08:00
|
|
|
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
2010-03-25 04:48:24 +08:00
|
|
|
case OP_31_XOP_MFSR:
|
|
|
|
{
|
|
|
|
int srnum;
|
|
|
|
|
|
|
|
srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
|
|
|
|
if (vcpu->arch.mmu.mfsrin) {
|
|
|
|
u32 sr;
|
|
|
|
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
|
2012-05-04 20:01:33 +08:00
|
|
|
kvmppc_set_gpr(vcpu, rt, sr);
|
2010-03-25 04:48:24 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2009-10-30 13:47:14 +08:00
|
|
|
case OP_31_XOP_MFSRIN:
|
|
|
|
{
|
|
|
|
int srnum;
|
|
|
|
|
2012-05-04 20:01:33 +08:00
|
|
|
srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
|
2009-10-30 13:47:14 +08:00
|
|
|
if (vcpu->arch.mmu.mfsrin) {
|
|
|
|
u32 sr;
|
|
|
|
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
|
2012-05-04 20:01:33 +08:00
|
|
|
kvmppc_set_gpr(vcpu, rt, sr);
|
2009-10-30 13:47:14 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2010-02-19 18:00:37 +08:00
|
|
|
case OP_31_XOP_MTSR:
|
|
|
|
vcpu->arch.mmu.mtsrin(vcpu,
|
|
|
|
(inst >> 16) & 0xf,
|
2012-05-04 20:01:33 +08:00
|
|
|
kvmppc_get_gpr(vcpu, rs));
|
2010-02-19 18:00:37 +08:00
|
|
|
break;
|
2009-10-30 13:47:14 +08:00
|
|
|
case OP_31_XOP_MTSRIN:
|
|
|
|
vcpu->arch.mmu.mtsrin(vcpu,
|
2012-05-04 20:01:33 +08:00
|
|
|
(kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
|
|
|
|
kvmppc_get_gpr(vcpu, rs));
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case OP_31_XOP_TLBIE:
|
|
|
|
case OP_31_XOP_TLBIEL:
|
|
|
|
{
|
|
|
|
bool large = (inst & 0x00200000) ? true : false;
|
2012-05-04 20:01:33 +08:00
|
|
|
ulong addr = kvmppc_get_gpr(vcpu, rb);
|
2009-10-30 13:47:14 +08:00
|
|
|
vcpu->arch.mmu.tlbie(vcpu, addr, large);
|
|
|
|
break;
|
|
|
|
}
|
2013-10-08 00:47:59 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
2012-12-15 06:42:05 +08:00
|
|
|
case OP_31_XOP_FAKE_SC1:
|
|
|
|
{
|
|
|
|
/* SC 1 papr hypercalls */
|
|
|
|
ulong cmd = kvmppc_get_gpr(vcpu, 3);
|
|
|
|
int i;
|
|
|
|
|
2014-04-24 19:46:24 +08:00
|
|
|
if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
|
2012-12-15 06:42:05 +08:00
|
|
|
!vcpu->arch.papr_enabled) {
|
|
|
|
emulated = EMULATE_FAIL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
|
|
|
|
break;
|
|
|
|
|
2020-04-27 12:35:11 +08:00
|
|
|
vcpu->run->papr_hcall.nr = cmd;
|
2012-12-15 06:42:05 +08:00
|
|
|
for (i = 0; i < 9; ++i) {
|
|
|
|
ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
|
2020-04-27 12:35:11 +08:00
|
|
|
vcpu->run->papr_hcall.args[i] = gpr;
|
2012-12-15 06:42:05 +08:00
|
|
|
}
|
|
|
|
|
2020-04-27 12:35:11 +08:00
|
|
|
vcpu->run->exit_reason = KVM_EXIT_PAPR_HCALL;
|
2013-04-08 08:32:14 +08:00
|
|
|
vcpu->arch.hcall_needed = 1;
|
2013-04-08 08:32:13 +08:00
|
|
|
emulated = EMULATE_EXIT_USER;
|
2012-12-15 06:42:05 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
2009-10-30 13:47:14 +08:00
|
|
|
case OP_31_XOP_EIOIO:
|
|
|
|
break;
|
|
|
|
case OP_31_XOP_SLBMTE:
|
|
|
|
if (!vcpu->arch.mmu.slbmte)
|
|
|
|
return EMULATE_FAIL;
|
|
|
|
|
2010-01-08 09:58:01 +08:00
|
|
|
vcpu->arch.mmu.slbmte(vcpu,
|
2012-05-04 20:01:33 +08:00
|
|
|
kvmppc_get_gpr(vcpu, rs),
|
|
|
|
kvmppc_get_gpr(vcpu, rb));
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case OP_31_XOP_SLBIE:
|
|
|
|
if (!vcpu->arch.mmu.slbie)
|
|
|
|
return EMULATE_FAIL;
|
|
|
|
|
2010-01-08 09:58:01 +08:00
|
|
|
vcpu->arch.mmu.slbie(vcpu,
|
2012-05-04 20:01:33 +08:00
|
|
|
kvmppc_get_gpr(vcpu, rb));
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case OP_31_XOP_SLBIA:
|
|
|
|
if (!vcpu->arch.mmu.slbia)
|
|
|
|
return EMULATE_FAIL;
|
|
|
|
|
|
|
|
vcpu->arch.mmu.slbia(vcpu);
|
|
|
|
break;
|
2019-02-04 16:06:18 +08:00
|
|
|
case OP_31_XOP_SLBFEE:
|
|
|
|
if (!(inst & 1) || !vcpu->arch.mmu.slbfee) {
|
|
|
|
return EMULATE_FAIL;
|
|
|
|
} else {
|
|
|
|
ulong b, t;
|
|
|
|
ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK;
|
|
|
|
|
|
|
|
b = kvmppc_get_gpr(vcpu, rb);
|
|
|
|
if (!vcpu->arch.mmu.slbfee(vcpu, b, &t))
|
|
|
|
cr |= 2 << CR0_SHIFT;
|
|
|
|
kvmppc_set_gpr(vcpu, rt, t);
|
|
|
|
/* copy XER[SO] bit to CR0[SO] */
|
|
|
|
cr |= (vcpu->arch.regs.xer & 0x80000000) >>
|
|
|
|
(31 - CR0_SHIFT);
|
|
|
|
kvmppc_set_cr(vcpu, cr);
|
|
|
|
}
|
|
|
|
break;
|
2009-10-30 13:47:14 +08:00
|
|
|
case OP_31_XOP_SLBMFEE:
|
|
|
|
if (!vcpu->arch.mmu.slbmfee) {
|
|
|
|
emulated = EMULATE_FAIL;
|
|
|
|
} else {
|
2012-05-04 20:01:33 +08:00
|
|
|
ulong t, rb_val;
|
2009-10-30 13:47:14 +08:00
|
|
|
|
2012-05-04 20:01:33 +08:00
|
|
|
rb_val = kvmppc_get_gpr(vcpu, rb);
|
|
|
|
t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
|
|
|
|
kvmppc_set_gpr(vcpu, rt, t);
|
2009-10-30 13:47:14 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case OP_31_XOP_SLBMFEV:
|
|
|
|
if (!vcpu->arch.mmu.slbmfev) {
|
|
|
|
emulated = EMULATE_FAIL;
|
|
|
|
} else {
|
2012-05-04 20:01:33 +08:00
|
|
|
ulong t, rb_val;
|
2009-10-30 13:47:14 +08:00
|
|
|
|
2012-05-04 20:01:33 +08:00
|
|
|
rb_val = kvmppc_get_gpr(vcpu, rb);
|
|
|
|
t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
|
|
|
|
kvmppc_set_gpr(vcpu, rt, t);
|
2009-10-30 13:47:14 +08:00
|
|
|
}
|
|
|
|
break;
|
2010-03-25 04:48:33 +08:00
|
|
|
case OP_31_XOP_DCBA:
|
|
|
|
/* Gets treated as NOP */
|
|
|
|
break;
|
2009-10-30 13:47:14 +08:00
|
|
|
case OP_31_XOP_DCBZ:
|
|
|
|
{
|
2012-05-04 20:01:33 +08:00
|
|
|
ulong rb_val = kvmppc_get_gpr(vcpu, rb);
|
|
|
|
ulong ra_val = 0;
|
2010-02-19 18:00:38 +08:00
|
|
|
ulong addr, vaddr;
|
2009-10-30 13:47:14 +08:00
|
|
|
u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
|
2010-03-25 04:48:32 +08:00
|
|
|
u32 dsisr;
|
|
|
|
int r;
|
2009-10-30 13:47:14 +08:00
|
|
|
|
2012-05-04 20:01:33 +08:00
|
|
|
if (ra)
|
|
|
|
ra_val = kvmppc_get_gpr(vcpu, ra);
|
2009-10-30 13:47:14 +08:00
|
|
|
|
2012-05-04 20:01:33 +08:00
|
|
|
addr = (ra_val + rb_val) & ~31ULL;
|
2014-04-24 19:46:24 +08:00
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_SF))
|
2009-10-30 13:47:14 +08:00
|
|
|
addr &= 0xffffffff;
|
2010-02-19 18:00:38 +08:00
|
|
|
vaddr = addr;
|
2009-10-30 13:47:14 +08:00
|
|
|
|
2010-03-25 04:48:32 +08:00
|
|
|
r = kvmppc_st(vcpu, &addr, 32, zeros, true);
|
|
|
|
if ((r == -ENOENT) || (r == -EPERM)) {
|
|
|
|
*advance = 0;
|
2014-04-24 19:46:24 +08:00
|
|
|
kvmppc_set_dar(vcpu, vaddr);
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
vcpu->arch.fault_dar = vaddr;
|
2010-03-25 04:48:32 +08:00
|
|
|
|
|
|
|
dsisr = DSISR_ISSTORE;
|
|
|
|
if (r == -ENOENT)
|
|
|
|
dsisr |= DSISR_NOHPTE;
|
|
|
|
else if (r == -EPERM)
|
|
|
|
dsisr |= DSISR_PROTFAULT;
|
|
|
|
|
2014-04-24 19:46:24 +08:00
|
|
|
kvmppc_set_dsisr(vcpu, dsisr);
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
vcpu->arch.fault_dsisr = dsisr;
|
2010-03-25 04:48:32 +08:00
|
|
|
|
2009-10-30 13:47:14 +08:00
|
|
|
kvmppc_book3s_queue_irqprio(vcpu,
|
|
|
|
BOOK3S_INTERRUPT_DATA_STORAGE);
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2018-05-23 15:02:01 +08:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
case OP_31_XOP_TBEGIN:
|
|
|
|
{
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
|
|
|
|
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
|
|
|
|
emulated = EMULATE_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
|
|
|
|
preempt_disable();
|
2018-10-08 13:30:58 +08:00
|
|
|
vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
|
|
|
|
(vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
|
2018-05-23 15:02:01 +08:00
|
|
|
|
|
|
|
vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
|
|
|
|
(((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
|
|
|
|
<< TEXASR_FC_LG));
|
|
|
|
|
|
|
|
if ((inst >> 21) & 0x1)
|
|
|
|
vcpu->arch.texasr |= TEXASR_ROT;
|
|
|
|
|
|
|
|
if (kvmppc_get_msr(vcpu) & MSR_HV)
|
|
|
|
vcpu->arch.texasr |= TEXASR_HV;
|
|
|
|
|
|
|
|
vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4;
|
|
|
|
vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
|
|
|
|
|
|
|
|
kvmppc_restore_tm_sprs(vcpu);
|
|
|
|
preempt_enable();
|
|
|
|
} else
|
|
|
|
emulated = EMULATE_FAIL;
|
|
|
|
break;
|
|
|
|
}
|
2018-05-23 15:02:05 +08:00
|
|
|
case OP_31_XOP_TABORT:
|
|
|
|
{
|
|
|
|
ulong guest_msr = kvmppc_get_msr(vcpu);
|
|
|
|
unsigned long ra_val = 0;
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
|
|
|
|
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
|
|
|
|
emulated = EMULATE_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* only emulate for privilege guest, since problem state
|
|
|
|
* guest can run with TM enabled and we don't expect to
|
|
|
|
* trap at here for that case.
|
|
|
|
*/
|
|
|
|
WARN_ON(guest_msr & MSR_PR);
|
|
|
|
|
|
|
|
if (ra)
|
|
|
|
ra_val = kvmppc_get_gpr(vcpu, ra);
|
|
|
|
|
|
|
|
kvmppc_emulate_tabort(vcpu, ra_val);
|
|
|
|
break;
|
|
|
|
}
|
2018-05-23 15:02:03 +08:00
|
|
|
case OP_31_XOP_TRECLAIM:
|
|
|
|
{
|
|
|
|
ulong guest_msr = kvmppc_get_msr(vcpu);
|
|
|
|
unsigned long ra_val = 0;
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
|
|
|
|
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
|
|
|
|
emulated = EMULATE_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* generate interrupts based on priorities */
|
|
|
|
if (guest_msr & MSR_PR) {
|
|
|
|
/* Privileged Instruction type Program Interrupt */
|
|
|
|
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
|
|
|
|
emulated = EMULATE_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(guest_msr)) {
|
|
|
|
/* TM bad thing interrupt */
|
|
|
|
kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
|
|
|
|
emulated = EMULATE_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ra)
|
|
|
|
ra_val = kvmppc_get_gpr(vcpu, ra);
|
|
|
|
kvmppc_emulate_treclaim(vcpu, ra_val);
|
|
|
|
break;
|
|
|
|
}
|
2018-05-23 15:02:04 +08:00
|
|
|
case OP_31_XOP_TRCHKPT:
|
|
|
|
{
|
|
|
|
ulong guest_msr = kvmppc_get_msr(vcpu);
|
|
|
|
unsigned long texasr;
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
|
|
|
|
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
|
|
|
|
emulated = EMULATE_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* generate interrupt based on priorities */
|
|
|
|
if (guest_msr & MSR_PR) {
|
|
|
|
/* Privileged Instruction type Program Intr */
|
|
|
|
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
|
|
|
|
emulated = EMULATE_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
tm_enable();
|
|
|
|
texasr = mfspr(SPRN_TEXASR);
|
|
|
|
tm_disable();
|
|
|
|
|
|
|
|
if (MSR_TM_ACTIVE(guest_msr) ||
|
|
|
|
!(texasr & (TEXASR_FS))) {
|
|
|
|
/* TM bad thing interrupt */
|
|
|
|
kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
|
|
|
|
emulated = EMULATE_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
kvmppc_emulate_trchkpt(vcpu);
|
|
|
|
break;
|
|
|
|
}
|
2018-05-23 15:02:01 +08:00
|
|
|
#endif
|
2009-10-30 13:47:14 +08:00
|
|
|
default:
|
|
|
|
emulated = EMULATE_FAIL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
emulated = EMULATE_FAIL;
|
|
|
|
}
|
|
|
|
|
2010-02-19 18:00:44 +08:00
|
|
|
if (emulated == EMULATE_FAIL)
|
2020-04-27 12:35:11 +08:00
|
|
|
emulated = kvmppc_emulate_paired_single(vcpu);
|
2010-02-19 18:00:44 +08:00
|
|
|
|
2009-10-30 13:47:14 +08:00
|
|
|
return emulated;
|
|
|
|
}
|
|
|
|
|
2009-11-30 11:02:02 +08:00
|
|
|
void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
|
|
|
|
u32 val)
|
|
|
|
{
|
|
|
|
if (upper) {
|
|
|
|
/* Upper BAT */
|
|
|
|
u32 bl = (val >> 2) & 0x7ff;
|
|
|
|
bat->bepi_mask = (~bl << 17);
|
|
|
|
bat->bepi = val & 0xfffe0000;
|
|
|
|
bat->vs = (val & 2) ? 1 : 0;
|
|
|
|
bat->vp = (val & 1) ? 1 : 0;
|
|
|
|
bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
|
|
|
|
} else {
|
|
|
|
/* Lower BAT */
|
|
|
|
bat->brpn = val & 0xfffe0000;
|
|
|
|
bat->wimg = (val >> 3) & 0xf;
|
|
|
|
bat->pp = val & 3;
|
|
|
|
bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-03 05:23:04 +08:00
|
|
|
static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
|
2010-03-25 04:48:25 +08:00
|
|
|
{
|
|
|
|
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
|
|
|
|
struct kvmppc_bat *bat;
|
|
|
|
|
|
|
|
switch (sprn) {
|
|
|
|
case SPRN_IBAT0U ... SPRN_IBAT3L:
|
|
|
|
bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
|
|
|
|
break;
|
|
|
|
case SPRN_IBAT4U ... SPRN_IBAT7L:
|
|
|
|
bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
|
|
|
|
break;
|
|
|
|
case SPRN_DBAT0U ... SPRN_DBAT3L:
|
|
|
|
bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
|
|
|
|
break;
|
|
|
|
case SPRN_DBAT4U ... SPRN_DBAT7L:
|
|
|
|
bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
2010-08-03 05:23:04 +08:00
|
|
|
return bat;
|
2009-10-30 13:47:14 +08:00
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
2009-10-30 13:47:14 +08:00
|
|
|
{
|
|
|
|
int emulated = EMULATE_DONE;
|
|
|
|
|
|
|
|
switch (sprn) {
|
|
|
|
case SPRN_SDR1:
|
2011-08-08 22:07:16 +08:00
|
|
|
if (!spr_allowed(vcpu, PRIV_HYPER))
|
|
|
|
goto unprivileged;
|
2010-01-08 09:58:01 +08:00
|
|
|
to_book3s(vcpu)->sdr1 = spr_val;
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case SPRN_DSISR:
|
2014-04-24 19:46:24 +08:00
|
|
|
kvmppc_set_dsisr(vcpu, spr_val);
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case SPRN_DAR:
|
2014-04-24 19:46:24 +08:00
|
|
|
kvmppc_set_dar(vcpu, spr_val);
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case SPRN_HIOR:
|
2010-01-08 09:58:01 +08:00
|
|
|
to_book3s(vcpu)->hior = spr_val;
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case SPRN_IBAT0U ... SPRN_IBAT3L:
|
|
|
|
case SPRN_IBAT4U ... SPRN_IBAT7L:
|
|
|
|
case SPRN_DBAT0U ... SPRN_DBAT3L:
|
|
|
|
case SPRN_DBAT4U ... SPRN_DBAT7L:
|
2010-08-03 05:23:04 +08:00
|
|
|
{
|
|
|
|
struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
|
|
|
|
|
|
|
|
kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
|
2009-10-30 13:47:14 +08:00
|
|
|
/* BAT writes happen so rarely that we're ok to flush
|
|
|
|
* everything here */
|
|
|
|
kvmppc_mmu_pte_flush(vcpu, 0, 0);
|
2010-03-25 04:48:25 +08:00
|
|
|
kvmppc_mmu_flush_segments(vcpu);
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
2010-08-03 05:23:04 +08:00
|
|
|
}
|
2009-10-30 13:47:14 +08:00
|
|
|
case SPRN_HID0:
|
2010-01-08 09:58:01 +08:00
|
|
|
to_book3s(vcpu)->hid[0] = spr_val;
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case SPRN_HID1:
|
2010-01-08 09:58:01 +08:00
|
|
|
to_book3s(vcpu)->hid[1] = spr_val;
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case SPRN_HID2:
|
2010-01-08 09:58:01 +08:00
|
|
|
to_book3s(vcpu)->hid[2] = spr_val;
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
2010-02-19 18:00:33 +08:00
|
|
|
case SPRN_HID2_GEKKO:
|
|
|
|
to_book3s(vcpu)->hid[2] = spr_val;
|
|
|
|
/* HID2.PSE controls paired single on gekko */
|
|
|
|
switch (vcpu->arch.pvr) {
|
|
|
|
case 0x00080200: /* lonestar 2.0 */
|
|
|
|
case 0x00088202: /* lonestar 2.2 */
|
|
|
|
case 0x70000100: /* gekko 1.0 */
|
|
|
|
case 0x00080100: /* gekko 2.0 */
|
|
|
|
case 0x00083203: /* gekko 2.3a */
|
|
|
|
case 0x00083213: /* gekko 2.3b */
|
|
|
|
case 0x00083204: /* gekko 2.4 */
|
|
|
|
case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
|
2010-04-20 08:49:54 +08:00
|
|
|
case 0x00087200: /* broadway */
|
|
|
|
if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
|
|
|
|
/* Native paired singles */
|
|
|
|
} else if (spr_val & (1 << 29)) { /* HID2.PSE */
|
2010-02-19 18:00:33 +08:00
|
|
|
vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
|
|
|
|
kvmppc_giveup_ext(vcpu, MSR_FP);
|
|
|
|
} else {
|
|
|
|
vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
2009-10-30 13:47:14 +08:00
|
|
|
case SPRN_HID4:
|
2010-02-19 18:00:33 +08:00
|
|
|
case SPRN_HID4_GEKKO:
|
2010-01-08 09:58:01 +08:00
|
|
|
to_book3s(vcpu)->hid[4] = spr_val;
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case SPRN_HID5:
|
2010-01-08 09:58:01 +08:00
|
|
|
to_book3s(vcpu)->hid[5] = spr_val;
|
2009-10-30 13:47:14 +08:00
|
|
|
/* guest HID5 set can change is_dcbz32 */
|
|
|
|
if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
|
|
|
|
(mfmsr() & MSR_HV))
|
|
|
|
vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
|
|
|
|
break;
|
2010-02-19 18:00:33 +08:00
|
|
|
case SPRN_GQR0:
|
|
|
|
case SPRN_GQR1:
|
|
|
|
case SPRN_GQR2:
|
|
|
|
case SPRN_GQR3:
|
|
|
|
case SPRN_GQR4:
|
|
|
|
case SPRN_GQR5:
|
|
|
|
case SPRN_GQR6:
|
|
|
|
case SPRN_GQR7:
|
|
|
|
to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
|
|
|
|
break;
|
2014-07-31 16:21:59 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
2014-04-29 22:48:44 +08:00
|
|
|
case SPRN_FSCR:
|
2014-07-31 16:21:59 +08:00
|
|
|
kvmppc_set_fscr(vcpu, spr_val);
|
2014-04-29 22:48:44 +08:00
|
|
|
break;
|
2014-04-29 19:36:21 +08:00
|
|
|
case SPRN_BESCR:
|
|
|
|
vcpu->arch.bescr = spr_val;
|
|
|
|
break;
|
|
|
|
case SPRN_EBBHR:
|
|
|
|
vcpu->arch.ebbhr = spr_val;
|
|
|
|
break;
|
|
|
|
case SPRN_EBBRR:
|
|
|
|
vcpu->arch.ebbrr = spr_val;
|
|
|
|
break;
|
2014-04-29 23:54:40 +08:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
case SPRN_TFHAR:
|
|
|
|
case SPRN_TEXASR:
|
|
|
|
case SPRN_TFIAR:
|
2018-05-23 15:02:00 +08:00
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
|
|
|
|
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
|
|
|
|
emulated = EMULATE_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) &&
|
|
|
|
!((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) &&
|
|
|
|
(sprn == SPRN_TFHAR))) {
|
|
|
|
/* it is illegal to mtspr() TM regs in
|
|
|
|
* other than non-transactional state, with
|
|
|
|
* the exception of TFHAR in suspend state.
|
|
|
|
*/
|
|
|
|
kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
|
|
|
|
emulated = EMULATE_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
tm_enable();
|
|
|
|
if (sprn == SPRN_TFHAR)
|
|
|
|
mtspr(SPRN_TFHAR, spr_val);
|
|
|
|
else if (sprn == SPRN_TEXASR)
|
|
|
|
mtspr(SPRN_TEXASR, spr_val);
|
|
|
|
else
|
|
|
|
mtspr(SPRN_TFIAR, spr_val);
|
|
|
|
tm_disable();
|
|
|
|
|
2014-04-29 23:54:40 +08:00
|
|
|
break;
|
|
|
|
#endif
|
2014-04-29 19:36:21 +08:00
|
|
|
#endif
|
2009-10-30 13:47:14 +08:00
|
|
|
case SPRN_ICTC:
|
|
|
|
case SPRN_THRM1:
|
|
|
|
case SPRN_THRM2:
|
|
|
|
case SPRN_THRM3:
|
|
|
|
case SPRN_CTRLF:
|
|
|
|
case SPRN_CTRLT:
|
2010-02-19 18:00:33 +08:00
|
|
|
case SPRN_L2CR:
|
2012-11-05 02:15:43 +08:00
|
|
|
case SPRN_DSCR:
|
2010-02-19 18:00:33 +08:00
|
|
|
case SPRN_MMCR0_GEKKO:
|
|
|
|
case SPRN_MMCR1_GEKKO:
|
|
|
|
case SPRN_PMC1_GEKKO:
|
|
|
|
case SPRN_PMC2_GEKKO:
|
|
|
|
case SPRN_PMC3_GEKKO:
|
|
|
|
case SPRN_PMC4_GEKKO:
|
|
|
|
case SPRN_WPAR_GEKKO:
|
2012-12-20 12:52:39 +08:00
|
|
|
case SPRN_MSSSR0:
|
2013-07-02 22:15:10 +08:00
|
|
|
case SPRN_DABR:
|
2014-04-22 18:41:06 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
case SPRN_MMCRS:
|
|
|
|
case SPRN_MMCRA:
|
|
|
|
case SPRN_MMCR0:
|
|
|
|
case SPRN_MMCR1:
|
|
|
|
case SPRN_MMCR2:
|
KVM: PPC: Book3s PR: Allow access to unprivileged MMCR2 register
The MMCR2 register is available twice, one time with number 785
(privileged access), and one time with number 769 (unprivileged,
but it can be disabled completely). In former times, the Linux
kernel was using the unprivileged register 769 only, but since
commit 8dd75ccb571f3c92c ("powerpc: Use privileged SPR number
for MMCR2"), it uses the privileged register 785 instead.
The KVM-PR code then of course also switched to use the SPR 785,
but this is causing older guest kernels to crash, since these
kernels still access 769 instead. So to support older kernels
with KVM-PR again, we have to support register 769 in KVM-PR, too.
Fixes: 8dd75ccb571f3c92c48014b3dabd3d51a115ab41
Cc: stable@vger.kernel.org # v3.10+
Signed-off-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2016-09-21 21:06:45 +08:00
|
|
|
case SPRN_UMMCR2:
|
2020-11-27 12:44:04 +08:00
|
|
|
case SPRN_UAMOR:
|
|
|
|
case SPRN_IAMR:
|
|
|
|
case SPRN_AMR:
|
2014-04-22 18:41:06 +08:00
|
|
|
#endif
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
2011-08-08 22:07:16 +08:00
|
|
|
unprivileged:
|
2009-10-30 13:47:14 +08:00
|
|
|
default:
|
2017-04-05 21:58:51 +08:00
|
|
|
pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
|
|
|
|
if (sprn & 0x10) {
|
|
|
|
if (kvmppc_get_msr(vcpu) & MSR_PR) {
|
|
|
|
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
|
|
|
|
emulated = EMULATE_AGAIN;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
|
|
|
|
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
|
|
|
emulated = EMULATE_AGAIN;
|
|
|
|
}
|
|
|
|
}
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return emulated;
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
|
2009-10-30 13:47:14 +08:00
|
|
|
{
|
|
|
|
int emulated = EMULATE_DONE;
|
|
|
|
|
|
|
|
switch (sprn) {
|
2010-03-25 04:48:25 +08:00
|
|
|
case SPRN_IBAT0U ... SPRN_IBAT3L:
|
|
|
|
case SPRN_IBAT4U ... SPRN_IBAT7L:
|
|
|
|
case SPRN_DBAT0U ... SPRN_DBAT3L:
|
|
|
|
case SPRN_DBAT4U ... SPRN_DBAT7L:
|
2010-08-03 05:23:04 +08:00
|
|
|
{
|
|
|
|
struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
|
|
|
|
|
|
|
|
if (sprn % 2)
|
2012-05-04 20:55:12 +08:00
|
|
|
*spr_val = bat->raw >> 32;
|
2010-08-03 05:23:04 +08:00
|
|
|
else
|
2012-05-04 20:55:12 +08:00
|
|
|
*spr_val = bat->raw;
|
2010-08-03 05:23:04 +08:00
|
|
|
|
2010-03-25 04:48:25 +08:00
|
|
|
break;
|
2010-08-03 05:23:04 +08:00
|
|
|
}
|
2009-10-30 13:47:14 +08:00
|
|
|
case SPRN_SDR1:
|
2011-08-08 22:07:16 +08:00
|
|
|
if (!spr_allowed(vcpu, PRIV_HYPER))
|
|
|
|
goto unprivileged;
|
2012-05-04 20:55:12 +08:00
|
|
|
*spr_val = to_book3s(vcpu)->sdr1;
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case SPRN_DSISR:
|
2014-04-24 19:46:24 +08:00
|
|
|
*spr_val = kvmppc_get_dsisr(vcpu);
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case SPRN_DAR:
|
2014-04-24 19:46:24 +08:00
|
|
|
*spr_val = kvmppc_get_dar(vcpu);
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case SPRN_HIOR:
|
2012-05-04 20:55:12 +08:00
|
|
|
*spr_val = to_book3s(vcpu)->hior;
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case SPRN_HID0:
|
2012-05-04 20:55:12 +08:00
|
|
|
*spr_val = to_book3s(vcpu)->hid[0];
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case SPRN_HID1:
|
2012-05-04 20:55:12 +08:00
|
|
|
*spr_val = to_book3s(vcpu)->hid[1];
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case SPRN_HID2:
|
2010-02-19 18:00:33 +08:00
|
|
|
case SPRN_HID2_GEKKO:
|
2012-05-04 20:55:12 +08:00
|
|
|
*spr_val = to_book3s(vcpu)->hid[2];
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case SPRN_HID4:
|
2010-02-19 18:00:33 +08:00
|
|
|
case SPRN_HID4_GEKKO:
|
2012-05-04 20:55:12 +08:00
|
|
|
*spr_val = to_book3s(vcpu)->hid[4];
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
case SPRN_HID5:
|
2012-05-04 20:55:12 +08:00
|
|
|
*spr_val = to_book3s(vcpu)->hid[5];
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
2011-08-08 23:22:59 +08:00
|
|
|
case SPRN_CFAR:
|
2012-11-05 02:15:43 +08:00
|
|
|
case SPRN_DSCR:
|
2012-05-04 20:55:12 +08:00
|
|
|
*spr_val = 0;
|
2011-08-08 23:22:59 +08:00
|
|
|
break;
|
2012-11-05 02:15:43 +08:00
|
|
|
case SPRN_PURR:
|
2014-06-04 19:17:55 +08:00
|
|
|
/*
|
|
|
|
* On exit we would have updated purr
|
|
|
|
*/
|
|
|
|
*spr_val = vcpu->arch.purr;
|
2012-11-05 02:15:43 +08:00
|
|
|
break;
|
|
|
|
case SPRN_SPURR:
|
2014-06-04 19:17:55 +08:00
|
|
|
/*
|
|
|
|
* On exit we would have updated spurr
|
|
|
|
*/
|
|
|
|
*spr_val = vcpu->arch.spurr;
|
2012-11-05 02:15:43 +08:00
|
|
|
break;
|
2014-06-05 20:08:02 +08:00
|
|
|
case SPRN_VTB:
|
2016-09-15 11:42:52 +08:00
|
|
|
*spr_val = to_book3s(vcpu)->vtb;
|
2014-06-05 20:08:02 +08:00
|
|
|
break;
|
2014-06-05 20:08:05 +08:00
|
|
|
case SPRN_IC:
|
|
|
|
*spr_val = vcpu->arch.ic;
|
|
|
|
break;
|
2010-02-19 18:00:33 +08:00
|
|
|
case SPRN_GQR0:
|
|
|
|
case SPRN_GQR1:
|
|
|
|
case SPRN_GQR2:
|
|
|
|
case SPRN_GQR3:
|
|
|
|
case SPRN_GQR4:
|
|
|
|
case SPRN_GQR5:
|
|
|
|
case SPRN_GQR6:
|
|
|
|
case SPRN_GQR7:
|
2012-05-04 20:55:12 +08:00
|
|
|
*spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
|
2010-02-19 18:00:33 +08:00
|
|
|
break;
|
2014-07-31 16:21:59 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
2014-04-29 22:48:44 +08:00
|
|
|
case SPRN_FSCR:
|
|
|
|
*spr_val = vcpu->arch.fscr;
|
|
|
|
break;
|
2014-04-29 19:36:21 +08:00
|
|
|
case SPRN_BESCR:
|
|
|
|
*spr_val = vcpu->arch.bescr;
|
|
|
|
break;
|
|
|
|
case SPRN_EBBHR:
|
|
|
|
*spr_val = vcpu->arch.ebbhr;
|
|
|
|
break;
|
|
|
|
case SPRN_EBBRR:
|
|
|
|
*spr_val = vcpu->arch.ebbrr;
|
|
|
|
break;
|
2014-04-29 23:54:40 +08:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
case SPRN_TFHAR:
|
|
|
|
case SPRN_TEXASR:
|
|
|
|
case SPRN_TFIAR:
|
2018-05-23 15:02:00 +08:00
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
|
|
|
|
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
|
|
|
|
emulated = EMULATE_AGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
tm_enable();
|
|
|
|
if (sprn == SPRN_TFHAR)
|
|
|
|
*spr_val = mfspr(SPRN_TFHAR);
|
|
|
|
else if (sprn == SPRN_TEXASR)
|
|
|
|
*spr_val = mfspr(SPRN_TEXASR);
|
|
|
|
else if (sprn == SPRN_TFIAR)
|
|
|
|
*spr_val = mfspr(SPRN_TFIAR);
|
|
|
|
tm_disable();
|
2014-04-29 23:54:40 +08:00
|
|
|
break;
|
|
|
|
#endif
|
2014-04-29 19:36:21 +08:00
|
|
|
#endif
|
2009-10-30 13:47:14 +08:00
|
|
|
case SPRN_THRM1:
|
|
|
|
case SPRN_THRM2:
|
|
|
|
case SPRN_THRM3:
|
|
|
|
case SPRN_CTRLF:
|
|
|
|
case SPRN_CTRLT:
|
2010-02-19 18:00:33 +08:00
|
|
|
case SPRN_L2CR:
|
|
|
|
case SPRN_MMCR0_GEKKO:
|
|
|
|
case SPRN_MMCR1_GEKKO:
|
|
|
|
case SPRN_PMC1_GEKKO:
|
|
|
|
case SPRN_PMC2_GEKKO:
|
|
|
|
case SPRN_PMC3_GEKKO:
|
|
|
|
case SPRN_PMC4_GEKKO:
|
|
|
|
case SPRN_WPAR_GEKKO:
|
2012-12-20 12:52:39 +08:00
|
|
|
case SPRN_MSSSR0:
|
2013-07-02 22:15:10 +08:00
|
|
|
case SPRN_DABR:
|
2014-04-22 18:41:06 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
case SPRN_MMCRS:
|
|
|
|
case SPRN_MMCRA:
|
|
|
|
case SPRN_MMCR0:
|
|
|
|
case SPRN_MMCR1:
|
|
|
|
case SPRN_MMCR2:
|
KVM: PPC: Book3s PR: Allow access to unprivileged MMCR2 register
The MMCR2 register is available twice, one time with number 785
(privileged access), and one time with number 769 (unprivileged,
but it can be disabled completely). In former times, the Linux
kernel was using the unprivileged register 769 only, but since
commit 8dd75ccb571f3c92c ("powerpc: Use privileged SPR number
for MMCR2"), it uses the privileged register 785 instead.
The KVM-PR code then of course also switched to use the SPR 785,
but this is causing older guest kernels to crash, since these
kernels still access 769 instead. So to support older kernels
with KVM-PR again, we have to support register 769 in KVM-PR, too.
Fixes: 8dd75ccb571f3c92c48014b3dabd3d51a115ab41
Cc: stable@vger.kernel.org # v3.10+
Signed-off-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2016-09-21 21:06:45 +08:00
|
|
|
case SPRN_UMMCR2:
|
2014-04-25 22:07:21 +08:00
|
|
|
case SPRN_TIR:
|
2020-11-27 12:44:04 +08:00
|
|
|
case SPRN_UAMOR:
|
|
|
|
case SPRN_IAMR:
|
|
|
|
case SPRN_AMR:
|
2014-04-22 18:41:06 +08:00
|
|
|
#endif
|
2012-05-04 20:55:12 +08:00
|
|
|
*spr_val = 0;
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
default:
|
2011-08-08 22:07:16 +08:00
|
|
|
unprivileged:
|
2017-04-05 21:58:51 +08:00
|
|
|
pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
|
|
|
|
if (sprn & 0x10) {
|
|
|
|
if (kvmppc_get_msr(vcpu) & MSR_PR) {
|
|
|
|
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
|
|
|
|
emulated = EMULATE_AGAIN;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
|
|
|
|
sprn == 4 || sprn == 5 || sprn == 6) {
|
|
|
|
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
|
|
|
emulated = EMULATE_AGAIN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-30 13:47:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return emulated;
|
|
|
|
}
|
|
|
|
|
2010-03-25 04:48:28 +08:00
|
|
|
u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
|
|
|
|
{
|
2014-05-12 19:34:06 +08:00
|
|
|
return make_dsisr(inst);
|
2010-03-25 04:48:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
|
|
|
|
{
|
2014-05-12 19:34:05 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
/*
|
|
|
|
* Linux's fix_alignment() assumes that DAR is valid, so can we
|
|
|
|
*/
|
|
|
|
return vcpu->arch.fault_dar;
|
|
|
|
#else
|
2010-03-25 04:48:28 +08:00
|
|
|
ulong dar = 0;
|
2012-05-04 20:01:33 +08:00
|
|
|
ulong ra = get_ra(inst);
|
|
|
|
ulong rb = get_rb(inst);
|
2010-03-25 04:48:28 +08:00
|
|
|
|
|
|
|
switch (get_op(inst)) {
|
|
|
|
case OP_LFS:
|
|
|
|
case OP_LFD:
|
|
|
|
case OP_STFD:
|
|
|
|
case OP_STFS:
|
|
|
|
if (ra)
|
|
|
|
dar = kvmppc_get_gpr(vcpu, ra);
|
|
|
|
dar += (s32)((s16)inst);
|
|
|
|
break;
|
|
|
|
case 31:
|
|
|
|
if (ra)
|
|
|
|
dar = kvmppc_get_gpr(vcpu, ra);
|
2012-05-04 20:01:33 +08:00
|
|
|
dar += kvmppc_get_gpr(vcpu, rb);
|
2010-03-25 04:48:28 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return dar;
|
2014-05-12 19:34:05 +08:00
|
|
|
#endif
|
2010-03-25 04:48:28 +08:00
|
|
|
}
|