mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-19 17:14:40 +08:00
drm/i915/gvt: properly access enabled intel_engine_cs
Switch to use new for_each_engine() helper to properly access enabled intel_engine_cs as i915 core has changed that to be dynamic managed. At GVT-g init time would still depend on ring mask to determine engine list as it's earlier. Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
This commit is contained in:
parent
3eec872207
commit
0fac21e7e9
@ -817,10 +817,11 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
|
||||
|
||||
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
|
||||
{
|
||||
int i;
|
||||
enum intel_engine_id i;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
/* each ring has a virtual execlist engine */
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
|
||||
init_vgpu_execlist(vgpu, i);
|
||||
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
|
||||
}
|
||||
|
@ -132,12 +132,13 @@ static int new_mmio_info(struct intel_gvt *gvt,
|
||||
|
||||
static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
|
||||
{
|
||||
int i;
|
||||
enum intel_engine_id id;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
reg &= ~GENMASK(11, 0);
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
if (gvt->dev_priv->engine[i]->mmio_base == reg)
|
||||
return i;
|
||||
for_each_engine(engine, gvt->dev_priv, id) {
|
||||
if (engine->mmio_base == reg)
|
||||
return id;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
@ -1306,7 +1307,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
u32 data = *(u32 *)p_data;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(ring_id < 0))
|
||||
if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
|
||||
return -EINVAL;
|
||||
|
||||
execlist = &vgpu->execlist[ring_id];
|
||||
|
@ -37,9 +37,10 @@
|
||||
static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_vgpu_execlist *execlist;
|
||||
int i;
|
||||
enum intel_engine_id i;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
|
||||
execlist = &vgpu->execlist[i];
|
||||
if (!list_empty(workload_q_head(vgpu, i)))
|
||||
return true;
|
||||
@ -51,7 +52,8 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
|
||||
static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
int i;
|
||||
enum intel_engine_id i;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
/* no target to schedule */
|
||||
if (!scheduler->next_vgpu)
|
||||
@ -67,7 +69,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
|
||||
scheduler->need_reschedule = true;
|
||||
|
||||
/* still have uncompleted workload? */
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
for_each_engine(engine, gvt->dev_priv, i) {
|
||||
if (scheduler->current_workload[i]) {
|
||||
gvt_dbg_sched("still have running workload\n");
|
||||
return;
|
||||
@ -84,7 +86,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
|
||||
scheduler->need_reschedule = false;
|
||||
|
||||
/* wake up workload dispatch thread */
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++)
|
||||
for_each_engine(engine, gvt->dev_priv, i)
|
||||
wake_up(&scheduler->waitq[i]);
|
||||
}
|
||||
|
||||
|
@ -510,6 +510,10 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
|
||||
init_waitqueue_head(&scheduler->workload_complete_wq);
|
||||
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
/* check ring mask at init time */
|
||||
if (!HAS_ENGINE(gvt->dev_priv, i))
|
||||
continue;
|
||||
|
||||
init_waitqueue_head(&scheduler->waitq[i]);
|
||||
|
||||
param = kzalloc(sizeof(*param), GFP_KERNEL);
|
||||
|
Loading…
Reference in New Issue
Block a user