From 493f30cd086e5250e77f1ff201d22872a46e83e4 Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Wed, 23 Dec 2020 11:44:30 +0800 Subject: [PATCH 01/11] drm/i915/gvt: parse init context to update cmd accessible reg whitelist Logical Context is actually a big batch buffer consisting of multiple LRI commands + saved registers. It comprises Ring Context (the first 0x50 dwords) and Engine Context. The registers defined in Engine Context are command accessible, and safe to execute in VM Context. However, not all of them are currently tracked in existing register whitelist. Here we kick hardware to generate a dummy Engine Context and then scan the dummy Engine context to update whitelist dynamically. Based on updated list, later patches will audit future VM Engine Contexts to disallow undesired LRIs (if out of what hardware generates). Cc: Kevin Tian Signed-off-by: Wang Zhi Signed-off-by: Yan Zhao Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201223034430.16934-1-yan.y.zhao@intel.com Reviewed-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 142 +++++++++++++++++++++++++- drivers/gpu/drm/i915/gvt/cmd_parser.h | 2 + drivers/gpu/drm/i915/gvt/gvt.h | 4 + drivers/gpu/drm/i915/gvt/vgpu.c | 4 +- 4 files changed, 148 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 3fea967ee817..2e6d46246f76 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -38,11 +38,17 @@ #include "i915_drv.h" #include "gt/intel_gpu_commands.h" +#include "gt/intel_lrc.h" #include "gt/intel_ring.h" +#include "gt/intel_gt_requests.h" #include "gvt.h" #include "i915_pvinfo.h" #include "trace.h" +#include "gem/i915_gem_context.h" +#include "gem/i915_gem_pm.h" +#include "gt/intel_context.h" + #define INVALID_OP (~0U) #define OP_LEN_MI 9 @@ -455,6 +461,7 @@ enum { RING_BUFFER_INSTRUCTION, BATCH_BUFFER_INSTRUCTION, BATCH_BUFFER_2ND_LEVEL, + RING_BUFFER_CTX, }; enum { @@ -496,6 +503,7 @@ struct parser_exec_state { */ int saved_buf_addr_type; bool is_ctx_wa; + bool is_init_ctx; const struct cmd_info *info; @@ -709,6 +717,11 @@ static inline u32 cmd_val(struct parser_exec_state *s, int index) return *cmd_ptr(s, index); } +static inline bool is_init_ctx(struct parser_exec_state *s) +{ + return (s->buf_type == RING_BUFFER_CTX && s->is_init_ctx); +} + static void parser_exec_state_dump(struct parser_exec_state *s) { int cnt = 0; @@ -722,7 +735,8 @@ static void parser_exec_state_dump(struct parser_exec_state *s) gvt_dbg_cmd(" %s %s ip_gma(%08lx) ", s->buf_type == RING_BUFFER_INSTRUCTION ? - "RING_BUFFER" : "BATCH_BUFFER", + "RING_BUFFER" : ((s->buf_type == RING_BUFFER_CTX) ? + "CTX_BUFFER" : "BATCH_BUFFER"), s->buf_addr_type == GTT_BUFFER ? "GTT" : "PPGTT", s->ip_gma); @@ -757,7 +771,8 @@ static inline void update_ip_va(struct parser_exec_state *s) if (WARN_ON(s->ring_head == s->ring_tail)) return; - if (s->buf_type == RING_BUFFER_INSTRUCTION) { + if (s->buf_type == RING_BUFFER_INSTRUCTION || + s->buf_type == RING_BUFFER_CTX) { unsigned long ring_top = s->ring_start + s->ring_size; if (s->ring_head > s->ring_tail) { @@ -937,6 +952,11 @@ static int cmd_reg_handler(struct parser_exec_state *s, return -EFAULT; } + if (is_init_ctx(s)) { + intel_gvt_mmio_set_cmd_accessible(gvt, offset); + return 0; + } + if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) { gvt_vgpu_err("%s access to non-render register (%x)\n", cmd, offset); @@ -1216,6 +1236,8 @@ static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s) s->buf_type = BATCH_BUFFER_INSTRUCTION; ret = ip_gma_set(s, s->ret_ip_gma_bb); s->buf_addr_type = s->saved_buf_addr_type; + } else if (s->buf_type == RING_BUFFER_CTX) { + ret = ip_gma_set(s, s->ring_tail); } else { s->buf_type = RING_BUFFER_INSTRUCTION; s->buf_addr_type = GTT_BUFFER; @@ -2764,7 +2786,8 @@ static int command_scan(struct parser_exec_state *s, gma_bottom = rb_start + rb_len; while (s->ip_gma != gma_tail) { - if (s->buf_type == RING_BUFFER_INSTRUCTION) { + if (s->buf_type == RING_BUFFER_INSTRUCTION || + s->buf_type == RING_BUFFER_CTX) { if (!(s->ip_gma >= rb_start) || !(s->ip_gma < gma_bottom)) { gvt_vgpu_err("ip_gma %lx out of ring scope." @@ -3057,6 +3080,119 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) return 0; } +/* generate dummy contexts by sending empty requests to HW, and let + * the HW to fill Engine Contexts. This dummy contexts are used for + * initialization purpose (update reg whitelist), so referred to as + * init context here + */ +void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu) +{ + struct intel_gvt *gvt = vgpu->gvt; + struct drm_i915_private *dev_priv = gvt->gt->i915; + struct intel_engine_cs *engine; + enum intel_engine_id id; + const unsigned long start = LRC_STATE_PN * PAGE_SIZE; + struct i915_request *rq; + struct intel_vgpu_submission *s = &vgpu->submission; + struct i915_request *requests[I915_NUM_ENGINES] = {}; + bool is_ctx_pinned[I915_NUM_ENGINES] = {}; + int ret; + + if (gvt->is_reg_whitelist_updated) + return; + + for_each_engine(engine, &dev_priv->gt, id) { + ret = intel_context_pin(s->shadow[id]); + if (ret) { + gvt_vgpu_err("fail to pin shadow ctx\n"); + goto out; + } + is_ctx_pinned[id] = true; + + rq = i915_request_create(s->shadow[id]); + if (IS_ERR(rq)) { + gvt_vgpu_err("fail to alloc default request\n"); + ret = -EIO; + goto out; + } + requests[id] = i915_request_get(rq); + i915_request_add(rq); + } + + if (intel_gt_wait_for_idle(&dev_priv->gt, + I915_GEM_IDLE_TIMEOUT) == -ETIME) { + ret = -EIO; + goto out; + } + + /* scan init ctx to update cmd accessible list */ + for_each_engine(engine, &dev_priv->gt, id) { + int size = engine->context_size - PAGE_SIZE; + void *vaddr; + struct parser_exec_state s; + struct drm_i915_gem_object *obj; + struct i915_request *rq; + + rq = requests[id]; + GEM_BUG_ON(!i915_request_completed(rq)); + GEM_BUG_ON(!intel_context_is_pinned(rq->context)); + obj = rq->context->state->obj; + + if (!obj) { + ret = -EIO; + goto out; + } + + i915_gem_object_set_cache_coherency(obj, + I915_CACHE_LLC); + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + gvt_err("failed to pin init ctx obj, ring=%d, err=%lx\n", + id, PTR_ERR(vaddr)); + goto out; + } + + s.buf_type = RING_BUFFER_CTX; + s.buf_addr_type = GTT_BUFFER; + s.vgpu = vgpu; + s.engine = engine; + s.ring_start = 0; + s.ring_size = size; + s.ring_head = 0; + s.ring_tail = size; + s.rb_va = vaddr + start; + s.workload = NULL; + s.is_ctx_wa = false; + s.is_init_ctx = true; + + /* skipping the first RING_CTX_SIZE(0x50) dwords */ + ret = ip_gma_set(&s, RING_CTX_SIZE); + if (ret) { + i915_gem_object_unpin_map(obj); + goto out; + } + + ret = command_scan(&s, 0, size, 0, size); + if (ret) + gvt_err("Scan init ctx error\n"); + + i915_gem_object_unpin_map(obj); + } + +out: + if (!ret) + gvt->is_reg_whitelist_updated = true; + + for (id = 0; id < I915_NUM_ENGINES ; id++) { + if (requests[id]) + i915_request_put(requests[id]); + + if (is_ctx_pinned[id]) + intel_context_unpin(s->shadow[id]); + } +} + static int init_cmd_table(struct intel_gvt *gvt) { unsigned int gen_type = intel_gvt_get_device_type(gvt); diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h index ab25d151932a..09ca2b8a63c8 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.h +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h @@ -50,4 +50,6 @@ int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload); int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); +void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu); + #endif diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 9831361f181e..3e268e25bc7d 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -327,6 +327,7 @@ struct intel_gvt { u32 *mocs_mmio_offset_list; u32 mocs_mmio_offset_list_cnt; } engine_mmio_list; + bool is_reg_whitelist_updated; struct dentry *debugfs_root; }; @@ -410,6 +411,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); #define vgpu_fence_base(vgpu) (vgpu->fence.base) #define vgpu_fence_sz(vgpu) (vgpu->fence.size) +/* ring context size i.e. the first 0x50 dwords*/ +#define RING_CTX_SIZE 320 + struct intel_vgpu_creation_params { __u64 handle; __u64 low_gm_sz; /* in MB */ diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index f6d7e33c7099..7fc4a3e9a560 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -499,9 +499,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, mutex_lock(&gvt->lock); vgpu = __intel_gvt_create_vgpu(gvt, ¶m); - if (!IS_ERR(vgpu)) + if (!IS_ERR(vgpu)) { /* calculate left instance change for types */ intel_gvt_update_vgpu_types(gvt); + intel_gvt_update_reg_whitelist(vgpu); + } mutex_unlock(&gvt->lock); return vgpu; From 3c4f2120d890b53855366c2e53ebe20806ed0cad Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Wed, 23 Dec 2020 11:45:00 +0800 Subject: [PATCH 02/11] drm/i915/gvt: scan VM ctx pages Logical Context is actually a big batch buffer consisting of multiple LRI commands + saved registers. It comprises Ring Context (the first 0x50 dwords) and Engine Context. The registers defined in Engine Context are command accessible, and safe to execute in VM Context. This patch 1. stops copy Ring Context and only copys Engine Context from VM Context 2. audits VM Engine Contexts to disallow undesired LRIs (if accessing registers out of Engine Context that hardware generates). Cc: Kevin Tian Signed-off-by: Wang Zhi Signed-off-by: Yan Zhao Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201223034500.16983-1-yan.y.zhao@intel.com Reviewed-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 52 +++++++++++++++++++++++++++ drivers/gpu/drm/i915/gvt/cmd_parser.h | 2 ++ drivers/gpu/drm/i915/gvt/reg.h | 2 ++ drivers/gpu/drm/i915/gvt/scheduler.c | 22 +++++++++--- 4 files changed, 74 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 2e6d46246f76..a598329a3354 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -3193,6 +3193,58 @@ out: } } +int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload) +{ + struct intel_vgpu *vgpu = workload->vgpu; + unsigned long gma_head, gma_tail, gma_start, ctx_size; + struct parser_exec_state s; + int ring_id = workload->engine->id; + struct intel_context *ce = vgpu->submission.shadow[ring_id]; + int ret; + + GEM_BUG_ON(atomic_read(&ce->pin_count) < 0); + + ctx_size = workload->engine->context_size - PAGE_SIZE; + + /* Only ring contxt is loaded to HW for inhibit context, no need to + * scan engine context + */ + if (is_inhibit_context(ce)) + return 0; + + gma_start = i915_ggtt_offset(ce->state) + LRC_STATE_PN*PAGE_SIZE; + gma_head = 0; + gma_tail = ctx_size; + + s.buf_type = RING_BUFFER_CTX; + s.buf_addr_type = GTT_BUFFER; + s.vgpu = workload->vgpu; + s.engine = workload->engine; + s.ring_start = gma_start; + s.ring_size = ctx_size; + s.ring_head = gma_start + gma_head; + s.ring_tail = gma_start + gma_tail; + s.rb_va = ce->lrc_reg_state; + s.workload = workload; + s.is_ctx_wa = false; + s.is_init_ctx = false; + + /* don't scan the first RING_CTX_SIZE(0x50) dwords, as it's ring + * context + */ + ret = ip_gma_set(&s, gma_start + gma_head + RING_CTX_SIZE); + if (ret) + goto out; + + ret = command_scan(&s, gma_head, gma_tail, + gma_start, ctx_size); +out: + if (ret) + gvt_vgpu_err("scan shadow ctx error\n"); + + return ret; +} + static int init_cmd_table(struct intel_gvt *gvt) { unsigned int gen_type = intel_gvt_get_device_type(gvt); diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h index 09ca2b8a63c8..d5e95b7026a1 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.h +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h @@ -52,4 +52,6 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu); +int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload); + #endif diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index b58860dee970..244cc7320b54 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -133,4 +133,6 @@ #define RING_GFX_MODE(base) _MMIO((base) + 0x29c) #define VF_GUARDBAND _MMIO(0x83a4) + +#define BCS_TILE_REGISTER_VAL_OFFSET (0x43*4) #endif diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 6af5c06caee0..43f31c2eab14 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -137,6 +137,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) int i; bool skip = false; int ring_id = workload->engine->id; + int ret; GEM_BUG_ON(!intel_context_is_pinned(ctx)); @@ -163,16 +164,24 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) COPY_REG(bb_per_ctx_ptr); COPY_REG(rcs_indirect_ctx); COPY_REG(rcs_indirect_ctx_offset); - } + } else if (workload->engine->id == BCS0) + intel_gvt_hypervisor_read_gpa(vgpu, + workload->ring_context_gpa + + BCS_TILE_REGISTER_VAL_OFFSET, + (void *)shadow_ring_context + + BCS_TILE_REGISTER_VAL_OFFSET, 4); #undef COPY_REG #undef COPY_REG_MASKED + /* don't copy Ring Context (the first 0x50 dwords), + * only copy the Engine Context part from guest + */ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa + - sizeof(*shadow_ring_context), + RING_CTX_SIZE, (void *)shadow_ring_context + - sizeof(*shadow_ring_context), - I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); + RING_CTX_SIZE, + I915_GTT_PAGE_SIZE - RING_CTX_SIZE); sr_oa_regs(workload, (u32 *)shadow_ring_context, false); @@ -239,6 +248,11 @@ read: gpa_size = I915_GTT_PAGE_SIZE; dst = context_base + (i << I915_GTT_PAGE_SHIFT); } + ret = intel_gvt_scan_engine_context(workload); + if (ret) { + gvt_vgpu_err("invalid cmd found in guest context pages\n"); + return ret; + } s->last_ctx[ring_id].valid = true; return 0; } From f18d417a57438498e0de481d3a0bc900c2b0e057 Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Wed, 23 Dec 2020 11:45:08 +0800 Subject: [PATCH 03/11] drm/i915/gvt: filter cmds "srm" and "lrm" in cmd_handler do not allow "srm" and "lrm" except for GEN8_L3SQCREG4 and 0x21f0. Cc: Colin Xu Cc: Kevin Tian Signed-off-by: Yan Zhao Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201223034508.17031-1-yan.y.zhao@intel.com Reviewed-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index a598329a3354..7ee101644e75 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -982,6 +982,17 @@ static int cmd_reg_handler(struct parser_exec_state *s, patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); } + if (!strncmp(cmd, "srm", 3) || + !strncmp(cmd, "lrm", 3)) { + if (offset != i915_mmio_reg_offset(GEN8_L3SQCREG4) && + offset != 0x21f0) { + gvt_vgpu_err("%s access to register (%x)\n", + cmd, offset); + return -EPERM; + } else + return 0; + } + if (is_cmd_update_pdps(offset, s) && cmd_pdp_mmio_update_handler(s, offset, index)) return -EINVAL; From 73a37a43d1b0ad4fb2154bc000d44c89e458619d Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Wed, 23 Dec 2020 11:45:17 +0800 Subject: [PATCH 04/11] drm/i915/gvt: filter cmds "lrr-src" and "lrr-dst" in cmd_handler do not allow "lrr-src" and "lrr-dst" Cc: Colin Xu Cc: Kevin Tian Signed-off-by: Yan Zhao Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201223034517.17080-1-yan.y.zhao@intel.com Reviewed-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 7ee101644e75..be0a7d4e8750 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -993,6 +993,12 @@ static int cmd_reg_handler(struct parser_exec_state *s, return 0; } + if (!strncmp(cmd, "lrr-src", 7) || + !strncmp(cmd, "lrr-dst", 7)) { + gvt_vgpu_err("not allowed cmd %s\n", cmd); + return -EPERM; + } + if (is_cmd_update_pdps(offset, s) && cmd_pdp_mmio_update_handler(s, offset, index)) return -EINVAL; From 252cec9fc95272fb0b00528a37be96d1f960c277 Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Wed, 23 Dec 2020 11:45:26 +0800 Subject: [PATCH 05/11] drm/i915/gvt: filter cmd "pipe-ctrl" in cmd_handler "pipe-ctrl" is still a TODO. allow it here. Cc: Colin Xu Cc: Kevin Tian Signed-off-by: Yan Zhao Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201223034526.17128-1-yan.y.zhao@intel.com Reviewed-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index be0a7d4e8750..83fa4f17c943 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -999,6 +999,11 @@ static int cmd_reg_handler(struct parser_exec_state *s, return -EPERM; } + if (!strncmp(cmd, "pipe_ctrl", 9)) { + /* TODO: add LRI POST logic here */ + return 0; + } + if (is_cmd_update_pdps(offset, s) && cmd_pdp_mmio_update_handler(s, offset, index)) return -EINVAL; From 70add39fad1cb86c9637fe59002a40668ac5628c Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Wed, 23 Dec 2020 11:45:36 +0800 Subject: [PATCH 06/11] drm/i915/gvt: export find_mmio_info export find_mmio_info to be accessible by other gvt components Cc: Kevin Tian Signed-off-by: Yan Zhao Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201223034536.17176-1-yan.y.zhao@intel.com Reviewed-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 6 +++--- drivers/gpu/drm/i915/gvt/mmio.h | 3 +++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 4ddc9c847470..7012e4cf7186 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -83,7 +83,7 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset, memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes); } -static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt, +struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, unsigned int offset) { struct intel_gvt_mmio_info *e; @@ -118,7 +118,7 @@ static int new_mmio_info(struct intel_gvt *gvt, return -ENOMEM; info->offset = i; - p = find_mmio_info(gvt, info->offset); + p = intel_gvt_find_mmio_info(gvt, info->offset); if (p) { WARN(1, "dup mmio definition offset %x\n", info->offset); @@ -3625,7 +3625,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, /* * Normal tracked MMIOs. */ - mmio_info = find_mmio_info(gvt, offset); + mmio_info = intel_gvt_find_mmio_info(gvt, offset); if (!mmio_info) { gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes); goto default_rw; diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index cc4812648bf4..0851b2dc732c 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -80,6 +80,9 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), void *data); +struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, + unsigned int offset); + int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr); void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); From 1a8811930c967e0fdfba17988b5b10e80c6ab9cb Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Wed, 23 Dec 2020 11:45:45 +0800 Subject: [PATCH 07/11] drm/i915/gvt: make width of mmio_attribute bigger 8 bits are all used up. extend it to 16 bits to hold more flags. Suggested-by: Zhenyu Wang Signed-off-by: Yan Zhao Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201223034545.17224-1-yan.y.zhao@intel.com Reviewed-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 2 +- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 3e268e25bc7d..f82ad5a93720 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -244,7 +244,7 @@ struct gvt_mmio_block { #define INTEL_GVT_MMIO_HASH_BITS 11 struct intel_gvt_mmio { - u8 *mmio_attribute; + u16 *mmio_attribute; /* Register contains RO bits */ #define F_RO (1 << 0) /* Register contains graphics address */ diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 7012e4cf7186..bc3d28fba41a 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -96,7 +96,7 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, } static int new_mmio_info(struct intel_gvt *gvt, - u32 offset, u8 flags, u32 size, + u32 offset, u16 flags, u32 size, u32 addr_mask, u32 ro_mask, u32 device, gvt_mmio_func read, gvt_mmio_func write) { From bed42f13cac5e0dc1645e1bfb620d3dec758fe07 Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Wed, 23 Dec 2020 11:45:53 +0800 Subject: [PATCH 08/11] drm/i915/gvt: introduce a new flag F_CMD_WRITE_PATCH F_CMD_WRITE_PATCH means command write to this register needs to be patched Cc: Kevin Tian Signed-off-by: Yan Zhao Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201223034553.17272-1-yan.y.zhao@intel.com Reviewed-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index f82ad5a93720..eb5d8dcd258d 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -261,6 +261,8 @@ struct intel_gvt_mmio { * logical context image */ #define F_SR_IN_CTX (1 << 7) +/* Value of command write of this reg needs to be patched */ +#define F_CMD_WRITE_PATCH (1 << 8) struct gvt_mmio_block *mmio_block; unsigned int num_mmio_block; @@ -685,6 +687,35 @@ static inline void intel_gvt_mmio_set_sr_in_ctx( } void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); +/** + * intel_gvt_mmio_set_cmd_write_patch - + * mark an MMIO if its cmd write needs to be + * patched + * @gvt: a GVT device + * @offset: register offset + * + */ +static inline void intel_gvt_mmio_set_cmd_write_patch( + struct intel_gvt *gvt, unsigned int offset) +{ + gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_WRITE_PATCH; +} + +/** + * intel_gvt_mmio_is_cmd_write_patch - check if an mmio's cmd access needs to + * be patched + * @gvt: a GVT device + * @offset: register offset + * + * Returns: + * True if GPU commmand write to an MMIO should be patched + */ +static inline bool intel_gvt_mmio_is_cmd_write_patch( + struct intel_gvt *gvt, unsigned int offset) +{ + return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH; +} + void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_init(struct intel_gvt *gvt); void intel_gvt_debugfs_clean(struct intel_gvt *gvt); From 885e1938452fc7fc37a3051d76e1ddb7ead099fa Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Wed, 23 Dec 2020 11:46:03 +0800 Subject: [PATCH 09/11] drm/i915/gvt: statically set F_CMD_WRITE_PATCH flag statically set F_CMD_WRITE_PATCH flag for RING MODE registers and force_nonpriv rgisters Cc: Kevin Tian Signed-off-by: Yan Zhao Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201223034603.17320-1-yan.y.zhao@intel.com Reviewed-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index bc3d28fba41a..920ff2c99d0e 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1965,7 +1965,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) /* RING MODE */ #define RING_REG(base) _MMIO((base) + 0x29c) - MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, + MMIO_RING_DFH(RING_REG, D_ALL, + F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL, ring_mode_mmio_write); #undef RING_REG @@ -2885,8 +2886,8 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_D(_MMIO(0xb110), D_BDW); - MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, - NULL, force_nonpriv_write); + MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0, + D_BDW_PLUS, NULL, force_nonpriv_write); MMIO_D(_MMIO(0x44484), D_BDW_PLUS); MMIO_D(_MMIO(0x4448c), D_BDW_PLUS); From b7ccb10ed80c6e152cf2a4eb16f548715cc1869a Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Wed, 23 Dec 2020 11:46:23 +0800 Subject: [PATCH 10/11] drm/i915/gvt: update F_CMD_WRITE_PATCH flag when parsing init ctx if a vreg has write handler, set its flag F_CMD_WRITE_PATCH on when parsing init ctx. Cc: Kevin Tian Signed-off-by: Yan Zhao Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201223034623.17368-1-yan.y.zhao@intel.com Reviewed-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 83fa4f17c943..97f1c3e270e6 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -953,7 +953,12 @@ static int cmd_reg_handler(struct parser_exec_state *s, } if (is_init_ctx(s)) { + struct intel_gvt_mmio_info *mmio_info; + intel_gvt_mmio_set_cmd_accessible(gvt, offset); + mmio_info = intel_gvt_find_mmio_info(gvt, offset); + if (mmio_info && mmio_info->write) + intel_gvt_mmio_set_cmd_write_patch(gvt, offset); return 0; } From 02dd2b12a685944c4d52c569d05f636372a7b6c7 Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Wed, 23 Dec 2020 11:46:32 +0800 Subject: [PATCH 11/11] drm/i915/gvt: unify lri cmd handler and mmio handlers for common ctxs and commands in workload, call mmio handlers for value checking and command patching for registers with command fix write flag set. after calling mmio handlers, resotre old vreg values so as to keep vreg unchanged after command scanning. Note: for regs in below catigories, a. in ctx and save-restore list, and is inhibit ctx b. in mocs after cmd scanning, do not restore old vreg values, but update vreg as what has been implemented before this patch. Cc: Kevin Tian Signed-off-by: Yan Zhao Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201223034632.17416-1-yan.y.zhao@intel.com Reviewed-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 130 +++++++++++--------------- 1 file changed, 55 insertions(+), 75 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 97f1c3e270e6..7fb91de06557 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -836,68 +836,12 @@ static inline int cmd_length(struct parser_exec_state *s) *addr = val; \ } while (0) -static bool is_shadowed_mmio(unsigned int offset) -{ - bool ret = false; - - if ((offset == 0x2168) || /*BB current head register UDW */ - (offset == 0x2140) || /*BB current header register */ - (offset == 0x211c) || /*second BB header register UDW */ - (offset == 0x2114)) { /*second BB header register UDW */ - ret = true; - } - return ret; -} - -static inline bool is_force_nonpriv_mmio(unsigned int offset) -{ - return (offset >= 0x24d0 && offset < 0x2500); -} - -static int force_nonpriv_reg_handler(struct parser_exec_state *s, - unsigned int offset, unsigned int index, char *cmd) -{ - struct intel_gvt *gvt = s->vgpu->gvt; - unsigned int data; - u32 ring_base; - u32 nopid; - - if (!strcmp(cmd, "lri")) - data = cmd_val(s, index + 1); - else { - gvt_err("Unexpected forcenonpriv 0x%x write from cmd %s\n", - offset, cmd); - return -EINVAL; - } - - ring_base = s->engine->mmio_base; - nopid = i915_mmio_reg_offset(RING_NOPID(ring_base)); - - if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) && - data != nopid) { - gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n", - offset, data); - patch_value(s, cmd_ptr(s, index), nopid); - return 0; - } - return 0; -} - static inline bool is_mocs_mmio(unsigned int offset) { return ((offset >= 0xc800) && (offset <= 0xcff8)) || ((offset >= 0xb020) && (offset <= 0xb0a0)); } -static int mocs_cmd_reg_handler(struct parser_exec_state *s, - unsigned int offset, unsigned int index) -{ - if (!is_mocs_mmio(offset)) - return -EINVAL; - vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1); - return 0; -} - static int is_cmd_update_pdps(unsigned int offset, struct parser_exec_state *s) { @@ -945,6 +889,7 @@ static int cmd_reg_handler(struct parser_exec_state *s, struct intel_vgpu *vgpu = s->vgpu; struct intel_gvt *gvt = vgpu->gvt; u32 ctx_sr_ctl; + u32 *vreg, vreg_old; if (offset + 4 > gvt->device_info.mmio_size) { gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", @@ -968,25 +913,6 @@ static int cmd_reg_handler(struct parser_exec_state *s, return -EBADRQC; } - if (is_shadowed_mmio(offset)) { - gvt_vgpu_err("found access of shadowed MMIO %x\n", offset); - return 0; - } - - if (is_mocs_mmio(offset) && - mocs_cmd_reg_handler(s, offset, index)) - return -EINVAL; - - if (is_force_nonpriv_mmio(offset) && - force_nonpriv_reg_handler(s, offset, index, cmd)) - return -EPERM; - - if (offset == i915_mmio_reg_offset(DERRMR) || - offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { - /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ - patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); - } - if (!strncmp(cmd, "srm", 3) || !strncmp(cmd, "lrm", 3)) { if (offset != i915_mmio_reg_offset(GEN8_L3SQCREG4) && @@ -1009,10 +935,64 @@ static int cmd_reg_handler(struct parser_exec_state *s, return 0; } + if (strncmp(cmd, "lri", 3)) + return -EPERM; + + /* below are all lri handlers */ + vreg = &vgpu_vreg(s->vgpu, offset); + if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) { + gvt_vgpu_err("%s access to non-render register (%x)\n", + cmd, offset); + return -EBADRQC; + } + if (is_cmd_update_pdps(offset, s) && cmd_pdp_mmio_update_handler(s, offset, index)) return -EINVAL; + if (offset == i915_mmio_reg_offset(DERRMR) || + offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { + /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ + patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); + } + + if (is_mocs_mmio(offset)) + *vreg = cmd_val(s, index + 1); + + vreg_old = *vreg; + + if (intel_gvt_mmio_is_cmd_write_patch(gvt, offset)) { + u32 cmdval_new, cmdval; + struct intel_gvt_mmio_info *mmio_info; + + cmdval = cmd_val(s, index + 1); + + mmio_info = intel_gvt_find_mmio_info(gvt, offset); + if (!mmio_info) { + cmdval_new = cmdval; + } else { + u64 ro_mask = mmio_info->ro_mask; + int ret; + + if (likely(!ro_mask)) + ret = mmio_info->write(s->vgpu, offset, + &cmdval, 4); + else { + gvt_vgpu_err("try to write RO reg %x\n", + offset); + ret = -EBADRQC; + } + if (ret) + return ret; + cmdval_new = *vreg; + } + if (cmdval_new != cmdval) + patch_value(s, cmd_ptr(s, index+1), cmdval_new); + } + + /* only patch cmd. restore vreg value if changed in mmio write handler*/ + *vreg = vreg_old; + /* TODO * In order to let workload with inhibit context to generate * correct image data into memory, vregs values will be loaded to