mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-26 04:25:27 +08:00
drm/i915/gvt: move write protect handler out of mmio emulation function
It's a bit confusing that page write protect handler is live in mmio emulation handler. This moves it to stand alone gvt ops. Also remove unnecessary check of write protected page access in mmio read handler and cleanup handling of failsafe case. v2: rebase Reviewed-by: Xiong Zhang <xiong.y.zhang@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
This commit is contained in:
parent
90551a1296
commit
4fafba2d73
@ -1968,6 +1968,39 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
|
||||||
|
void *p_data, unsigned int bytes)
|
||||||
|
{
|
||||||
|
struct intel_gvt *gvt = vgpu->gvt;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
|
||||||
|
struct intel_vgpu_page_track *t;
|
||||||
|
|
||||||
|
mutex_lock(&gvt->lock);
|
||||||
|
|
||||||
|
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
|
||||||
|
if (t) {
|
||||||
|
if (unlikely(vgpu->failsafe)) {
|
||||||
|
/* remove write protection to prevent furture traps */
|
||||||
|
intel_vgpu_clean_page_track(vgpu, t);
|
||||||
|
} else {
|
||||||
|
ret = t->handler(t, pa, p_data, bytes);
|
||||||
|
if (ret) {
|
||||||
|
gvt_err("guest page write error %d, "
|
||||||
|
"gfn 0x%lx, pa 0x%llx, "
|
||||||
|
"var 0x%x, len %d\n",
|
||||||
|
ret, t->gfn, pa,
|
||||||
|
*(u32 *)p_data, bytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mutex_unlock(&gvt->lock);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
||||||
intel_gvt_gtt_type_t type)
|
intel_gvt_gtt_type_t type)
|
||||||
{
|
{
|
||||||
|
@ -308,4 +308,7 @@ int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
|
|||||||
int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
|
int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
|
||||||
unsigned int off, void *p_data, unsigned int bytes);
|
unsigned int off, void *p_data, unsigned int bytes);
|
||||||
|
|
||||||
|
int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
|
||||||
|
void *p_data, unsigned int bytes);
|
||||||
|
|
||||||
#endif /* _GVT_GTT_H_ */
|
#endif /* _GVT_GTT_H_ */
|
||||||
|
@ -183,6 +183,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
|
|||||||
.get_gvt_attrs = intel_get_gvt_attrs,
|
.get_gvt_attrs = intel_get_gvt_attrs,
|
||||||
.vgpu_query_plane = intel_vgpu_query_plane,
|
.vgpu_query_plane = intel_vgpu_query_plane,
|
||||||
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
|
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
|
||||||
|
.write_protect_handler = intel_vgpu_write_protect_handler,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -546,6 +546,8 @@ struct intel_gvt_ops {
|
|||||||
struct attribute_group ***intel_vgpu_type_groups);
|
struct attribute_group ***intel_vgpu_type_groups);
|
||||||
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
|
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
|
||||||
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
|
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
|
||||||
|
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
|
||||||
|
unsigned int);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -1360,8 +1360,8 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|||||||
struct kvmgt_guest_info, track_node);
|
struct kvmgt_guest_info, track_node);
|
||||||
|
|
||||||
if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
|
if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
|
||||||
intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
|
intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
|
||||||
(void *)val, len);
|
(void *)val, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvmgt_page_track_flush_slot(struct kvm *kvm,
|
static void kvmgt_page_track_flush_slot(struct kvm *kvm,
|
||||||
|
@ -117,25 +117,6 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
|
|||||||
else
|
else
|
||||||
memcpy(pt, p_data, bytes);
|
memcpy(pt, p_data, bytes);
|
||||||
|
|
||||||
} else if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
|
|
||||||
struct intel_vgpu_page_track *t;
|
|
||||||
|
|
||||||
/* Since we enter the failsafe mode early during guest boot,
|
|
||||||
* guest may not have chance to set up its ppgtt table, so
|
|
||||||
* there should not be any wp pages for guest. Keep the wp
|
|
||||||
* related code here in case we need to handle it in furture.
|
|
||||||
*/
|
|
||||||
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
|
|
||||||
if (t) {
|
|
||||||
/* remove write protection to prevent furture traps */
|
|
||||||
intel_vgpu_clean_page_track(vgpu, t);
|
|
||||||
if (read)
|
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, pa,
|
|
||||||
p_data, bytes);
|
|
||||||
else
|
|
||||||
intel_gvt_hypervisor_write_gpa(vgpu, pa,
|
|
||||||
p_data, bytes);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
mutex_unlock(&gvt->lock);
|
mutex_unlock(&gvt->lock);
|
||||||
}
|
}
|
||||||
@ -168,23 +149,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
|
|
||||||
struct intel_vgpu_page_track *t;
|
|
||||||
|
|
||||||
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
|
|
||||||
if (t) {
|
|
||||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
|
|
||||||
p_data, bytes);
|
|
||||||
if (ret) {
|
|
||||||
gvt_vgpu_err("guest page read error %d, "
|
|
||||||
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
|
|
||||||
ret, t->gfn, pa, *(u32 *)p_data,
|
|
||||||
bytes);
|
|
||||||
}
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
|
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
|
||||||
|
|
||||||
if (WARN_ON(bytes > 8))
|
if (WARN_ON(bytes > 8))
|
||||||
@ -263,23 +227,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
|
|
||||||
struct intel_vgpu_page_track *t;
|
|
||||||
|
|
||||||
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
|
|
||||||
if (t) {
|
|
||||||
ret = t->handler(t, pa, p_data, bytes);
|
|
||||||
if (ret) {
|
|
||||||
gvt_err("guest page write error %d, "
|
|
||||||
"gfn 0x%lx, pa 0x%llx, "
|
|
||||||
"var 0x%x, len %d\n",
|
|
||||||
ret, t->gfn, pa,
|
|
||||||
*(u32 *)p_data, bytes);
|
|
||||||
}
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
|
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
|
||||||
|
|
||||||
if (WARN_ON(bytes > 8))
|
if (WARN_ON(bytes > 8))
|
||||||
|
Loading…
Reference in New Issue
Block a user