mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-26 07:44:27 +08:00
KVM: x86/xen: Use gfn_to_pfn_cache for runstate area
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20220303154127.202856-4-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
249f324933
commit
a795cd43c5
@ -608,10 +608,9 @@ struct kvm_vcpu_xen {
|
||||
u32 current_runstate;
|
||||
bool vcpu_info_set;
|
||||
bool vcpu_time_info_set;
|
||||
bool runstate_set;
|
||||
struct gfn_to_hva_cache vcpu_info_cache;
|
||||
struct gfn_to_hva_cache vcpu_time_info_cache;
|
||||
struct gfn_to_hva_cache runstate_cache;
|
||||
struct gfn_to_pfn_cache runstate_cache;
|
||||
u64 last_steal;
|
||||
u64 runstate_entry_time;
|
||||
u64 runstate_times[4];
|
||||
|
@ -11316,6 +11316,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
|
||||
fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);
|
||||
|
||||
kvm_xen_destroy_vcpu(vcpu);
|
||||
kvm_hv_vcpu_uninit(vcpu);
|
||||
kvm_pmu_destroy(vcpu);
|
||||
kfree(vcpu->arch.mce_banks);
|
||||
|
@ -133,27 +133,36 @@ static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
|
||||
void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
{
|
||||
struct kvm_vcpu_xen *vx = &v->arch.xen;
|
||||
struct gfn_to_hva_cache *ghc = &vx->runstate_cache;
|
||||
struct kvm_memslots *slots = kvm_memslots(v->kvm);
|
||||
bool atomic = (state == RUNSTATE_runnable);
|
||||
uint64_t state_entry_time;
|
||||
int __user *user_state;
|
||||
uint64_t __user *user_times;
|
||||
struct gfn_to_pfn_cache *gpc = &vx->runstate_cache;
|
||||
uint64_t *user_times;
|
||||
unsigned long flags;
|
||||
size_t user_len;
|
||||
int *user_state;
|
||||
|
||||
kvm_xen_update_runstate(v, state);
|
||||
|
||||
if (!vx->runstate_set)
|
||||
if (!vx->runstate_cache.active)
|
||||
return;
|
||||
|
||||
if (unlikely(slots->generation != ghc->generation || kvm_is_error_hva(ghc->hva)) &&
|
||||
kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len))
|
||||
return;
|
||||
if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode)
|
||||
user_len = sizeof(struct vcpu_runstate_info);
|
||||
else
|
||||
user_len = sizeof(struct compat_vcpu_runstate_info);
|
||||
|
||||
/* We made sure it fits in a single page */
|
||||
BUG_ON(!ghc->memslot);
|
||||
read_lock_irqsave(&gpc->lock, flags);
|
||||
while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
|
||||
user_len)) {
|
||||
read_unlock_irqrestore(&gpc->lock, flags);
|
||||
|
||||
if (atomic)
|
||||
pagefault_disable();
|
||||
/* When invoked from kvm_sched_out() we cannot sleep */
|
||||
if (state == RUNSTATE_runnable)
|
||||
return;
|
||||
|
||||
if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, user_len))
|
||||
return;
|
||||
|
||||
read_lock_irqsave(&gpc->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* The only difference between 32-bit and 64-bit versions of the
|
||||
@ -167,38 +176,33 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
*/
|
||||
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
|
||||
BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
|
||||
user_state = (int __user *)ghc->hva;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
|
||||
|
||||
user_times = (uint64_t __user *)(ghc->hva +
|
||||
offsetof(struct compat_vcpu_runstate_info,
|
||||
state_entry_time));
|
||||
#ifdef CONFIG_X86_64
|
||||
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
|
||||
offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
|
||||
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
|
||||
offsetof(struct compat_vcpu_runstate_info, time) + 4);
|
||||
|
||||
if (v->kvm->arch.xen.long_mode)
|
||||
user_times = (uint64_t __user *)(ghc->hva +
|
||||
offsetof(struct vcpu_runstate_info,
|
||||
state_entry_time));
|
||||
#endif
|
||||
|
||||
user_state = gpc->khva;
|
||||
|
||||
if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode)
|
||||
user_times = gpc->khva + offsetof(struct vcpu_runstate_info,
|
||||
state_entry_time);
|
||||
else
|
||||
user_times = gpc->khva + offsetof(struct compat_vcpu_runstate_info,
|
||||
state_entry_time);
|
||||
|
||||
/*
|
||||
* First write the updated state_entry_time at the appropriate
|
||||
* location determined by 'offset'.
|
||||
*/
|
||||
state_entry_time = vx->runstate_entry_time;
|
||||
state_entry_time |= XEN_RUNSTATE_UPDATE;
|
||||
|
||||
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) !=
|
||||
sizeof(state_entry_time));
|
||||
sizeof(user_times[0]));
|
||||
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
|
||||
sizeof(state_entry_time));
|
||||
sizeof(user_times[0]));
|
||||
|
||||
if (__put_user(state_entry_time, user_times))
|
||||
goto out;
|
||||
user_times[0] = vx->runstate_entry_time | XEN_RUNSTATE_UPDATE;
|
||||
smp_wmb();
|
||||
|
||||
/*
|
||||
@ -212,8 +216,7 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
|
||||
sizeof(vx->current_runstate));
|
||||
|
||||
if (__put_user(vx->current_runstate, user_state))
|
||||
goto out;
|
||||
*user_state = vx->current_runstate;
|
||||
|
||||
/*
|
||||
* Write the actual runstate times immediately after the
|
||||
@ -228,23 +231,19 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
|
||||
sizeof(vx->runstate_times));
|
||||
|
||||
if (__copy_to_user(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)))
|
||||
goto out;
|
||||
memcpy(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times));
|
||||
smp_wmb();
|
||||
|
||||
/*
|
||||
* Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
|
||||
* runstate_entry_time field.
|
||||
*/
|
||||
state_entry_time &= ~XEN_RUNSTATE_UPDATE;
|
||||
__put_user(state_entry_time, user_times);
|
||||
user_times[0] &= ~XEN_RUNSTATE_UPDATE;
|
||||
smp_wmb();
|
||||
|
||||
out:
|
||||
mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
|
||||
read_unlock_irqrestore(&gpc->lock, flags);
|
||||
|
||||
if (atomic)
|
||||
pagefault_enable();
|
||||
mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
||||
@ -507,24 +506,16 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
break;
|
||||
}
|
||||
if (data->u.gpa == GPA_INVALID) {
|
||||
vcpu->arch.xen.runstate_set = false;
|
||||
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
|
||||
&vcpu->arch.xen.runstate_cache);
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* It must fit within a single page */
|
||||
if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_runstate_info) > PAGE_SIZE) {
|
||||
r = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||
r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
|
||||
&vcpu->arch.xen.runstate_cache,
|
||||
data->u.gpa,
|
||||
NULL, KVM_HOST_USES_PFN, data->u.gpa,
|
||||
sizeof(struct vcpu_runstate_info));
|
||||
if (!r) {
|
||||
vcpu->arch.xen.runstate_set = true;
|
||||
}
|
||||
break;
|
||||
|
||||
case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
|
||||
@ -659,7 +650,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
r = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
if (vcpu->arch.xen.runstate_set) {
|
||||
if (vcpu->arch.xen.runstate_cache.active) {
|
||||
data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
|
||||
r = 0;
|
||||
}
|
||||
@ -1056,3 +1047,9 @@ int kvm_xen_setup_evtchn(struct kvm *kvm,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
|
||||
&vcpu->arch.xen.runstate_cache);
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
|
||||
int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
|
||||
void kvm_xen_init_vm(struct kvm *kvm);
|
||||
void kvm_xen_destroy_vm(struct kvm *kvm);
|
||||
|
||||
void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu);
|
||||
int kvm_xen_set_evtchn_fast(struct kvm_kernel_irq_routing_entry *e,
|
||||
struct kvm *kvm);
|
||||
int kvm_xen_setup_evtchn(struct kvm *kvm,
|
||||
@ -65,6 +65,10 @@ static inline void kvm_xen_destroy_vm(struct kvm *kvm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
|
||||
{
|
||||
return false;
|
||||
|
Loading…
Reference in New Issue
Block a user