mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-26 15:54:18 +08:00
45e96ea6b3
When the guest accesses I/O memory this will create data abort exceptions and they are handled by decoding the HSR information (physical address, read/write, length, register) and forwarding reads and writes to QEMU which performs the device emulation. Certain classes of load/store operations do not support the syndrome information provided in the HSR. We don't support decoding these (patches are available elsewhere), so we report an error to user space in this case. This requires changing the general flow somewhat since new calls to run the VCPU must check if there's a pending MMIO load and perform the write after userspace has made the data available. Reviewed-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com>
159 lines
4.3 KiB
C
159 lines
4.3 KiB
C
/*
|
|
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
|
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License, version 2, as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
*/
|
|
|
|
#ifndef __ARM_KVM_HOST_H__
|
|
#define __ARM_KVM_HOST_H__
|
|
|
|
#include <asm/kvm.h>
|
|
#include <asm/kvm_asm.h>
|
|
#include <asm/kvm_mmio.h>
|
|
#include <asm/fpstate.h>
|
|
|
|
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
|
|
#define KVM_MEMORY_SLOTS 32
|
|
#define KVM_PRIVATE_MEM_SLOTS 4
|
|
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
|
#define KVM_HAVE_ONE_REG
|
|
|
|
#define KVM_VCPU_MAX_FEATURES 0
|
|
|
|
/* We don't currently support large pages. */
|
|
#define KVM_HPAGE_GFN_SHIFT(x) 0
|
|
#define KVM_NR_PAGE_SIZES 1
|
|
#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
|
|
|
|
struct kvm_vcpu;
|
|
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
|
|
int kvm_target_cpu(void);
|
|
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
|
|
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
|
|
|
|
struct kvm_arch {
|
|
/* VTTBR value associated with below pgd and vmid */
|
|
u64 vttbr;
|
|
|
|
/*
|
|
* Anything that is not used directly from assembly code goes
|
|
* here.
|
|
*/
|
|
|
|
/* The VMID generation used for the virt. memory system */
|
|
u64 vmid_gen;
|
|
u32 vmid;
|
|
|
|
/* Stage-2 page table */
|
|
pgd_t *pgd;
|
|
};
|
|
|
|
#define KVM_NR_MEM_OBJS 40
|
|
|
|
/*
|
|
* We don't want allocation failures within the mmu code, so we preallocate
|
|
* enough memory for a single page fault in a cache.
|
|
*/
|
|
struct kvm_mmu_memory_cache {
|
|
int nobjs;
|
|
void *objects[KVM_NR_MEM_OBJS];
|
|
};
|
|
|
|
struct kvm_vcpu_arch {
|
|
struct kvm_regs regs;
|
|
|
|
int target; /* Processor target */
|
|
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
|
|
|
|
/* System control coprocessor (cp15) */
|
|
u32 cp15[NR_CP15_REGS];
|
|
|
|
/* The CPU type we expose to the VM */
|
|
u32 midr;
|
|
|
|
/* Exception Information */
|
|
u32 hsr; /* Hyp Syndrome Register */
|
|
u32 hxfar; /* Hyp Data/Inst Fault Address Register */
|
|
u32 hpfar; /* Hyp IPA Fault Address Register */
|
|
|
|
/* Floating point registers (VFP and Advanced SIMD/NEON) */
|
|
struct vfp_hard_struct vfp_guest;
|
|
struct vfp_hard_struct *vfp_host;
|
|
|
|
/*
|
|
* Anything that is not used directly from assembly code goes
|
|
* here.
|
|
*/
|
|
/* dcache set/way operation pending */
|
|
int last_pcpu;
|
|
cpumask_t require_dcache_flush;
|
|
|
|
/* IO related fields */
|
|
struct kvm_decode mmio_decode;
|
|
|
|
/* Interrupt related fields */
|
|
u32 irq_lines; /* IRQ and FIQ levels */
|
|
|
|
/* Hyp exception information */
|
|
u32 hyp_pc; /* PC when exception was taken from Hyp mode */
|
|
|
|
/* Cache some mmu pages needed inside spinlock regions */
|
|
struct kvm_mmu_memory_cache mmu_page_cache;
|
|
|
|
/* Detect first run of a vcpu */
|
|
bool has_run_once;
|
|
};
|
|
|
|
struct kvm_vm_stat {
|
|
u32 remote_tlb_flush;
|
|
};
|
|
|
|
struct kvm_vcpu_stat {
|
|
u32 halt_wakeup;
|
|
};
|
|
|
|
struct kvm_vcpu_init;
|
|
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
|
|
const struct kvm_vcpu_init *init);
|
|
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
|
|
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
|
|
struct kvm_one_reg;
|
|
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
|
|
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
|
|
u64 kvm_call_hyp(void *hypfn, ...);
|
|
void force_vm_exit(const cpumask_t *mask);
|
|
|
|
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
|
struct kvm;
|
|
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
|
|
int kvm_unmap_hva_range(struct kvm *kvm,
|
|
unsigned long start, unsigned long end);
|
|
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
|
|
|
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
|
|
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
|
|
|
|
/* We do not have shadow page tables, hence the empty hooks */
|
|
static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* __ARM_KVM_HOST_H__ */
|