mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-07 06:14:24 +08:00
drm/i915/selftests: Drop vestigal struct_mutex guards
We no longer need struct_mutex to serialise request emission, so remove it from the gt selftests. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-20-chris@chris-wilson.co.uk
This commit is contained in:
parent
3d88f76dec
commit
2af402982a
@ -1639,7 +1639,6 @@ int i915_gem_huge_page_mock_selftests(void)
|
|||||||
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
|
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
|
||||||
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
|
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
|
||||||
|
|
||||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
|
||||||
ppgtt = i915_ppgtt_create(dev_priv);
|
ppgtt = i915_ppgtt_create(dev_priv);
|
||||||
if (IS_ERR(ppgtt)) {
|
if (IS_ERR(ppgtt)) {
|
||||||
err = PTR_ERR(ppgtt);
|
err = PTR_ERR(ppgtt);
|
||||||
@ -1665,9 +1664,7 @@ out_close:
|
|||||||
i915_vm_put(&ppgtt->vm);
|
i915_vm_put(&ppgtt->vm);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
||||||
drm_dev_put(&dev_priv->drm);
|
drm_dev_put(&dev_priv->drm);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1684,7 +1681,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
|
|||||||
struct drm_file *file;
|
struct drm_file *file;
|
||||||
struct i915_gem_context *ctx;
|
struct i915_gem_context *ctx;
|
||||||
struct i915_address_space *vm;
|
struct i915_address_space *vm;
|
||||||
intel_wakeref_t wakeref;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!HAS_PPGTT(i915)) {
|
if (!HAS_PPGTT(i915)) {
|
||||||
@ -1699,13 +1695,10 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
|
|||||||
if (IS_ERR(file))
|
if (IS_ERR(file))
|
||||||
return PTR_ERR(file);
|
return PTR_ERR(file);
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
|
||||||
|
|
||||||
ctx = live_context(i915, file);
|
ctx = live_context(i915, file);
|
||||||
if (IS_ERR(ctx)) {
|
if (IS_ERR(ctx)) {
|
||||||
err = PTR_ERR(ctx);
|
err = PTR_ERR(ctx);
|
||||||
goto out_unlock;
|
goto out_file;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&ctx->mutex);
|
mutex_lock(&ctx->mutex);
|
||||||
@ -1716,11 +1709,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
|
|||||||
|
|
||||||
err = i915_subtests(tests, ctx);
|
err = i915_subtests(tests, ctx);
|
||||||
|
|
||||||
out_unlock:
|
out_file:
|
||||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
|
|
||||||
mock_file_free(i915, file);
|
mock_file_free(i915, file);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -307,9 +307,7 @@ static int live_parallel_switch(void *arg)
|
|||||||
struct igt_live_test t;
|
struct igt_live_test t;
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
err = igt_live_test_begin(&t, i915, __func__, "");
|
err = igt_live_test_begin(&t, i915, __func__, "");
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
if (err)
|
if (err)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -341,10 +339,8 @@ static int live_parallel_switch(void *arg)
|
|||||||
data[n].tsk = NULL;
|
data[n].tsk = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
if (igt_live_test_end(&t))
|
if (igt_live_test_end(&t))
|
||||||
err = -EIO;
|
err = -EIO;
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -669,9 +669,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
err = make_obj_busy(obj);
|
err = make_obj_busy(obj);
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_err("[loop %d] Failed to busy the object\n", loop);
|
pr_err("[loop %d] Failed to busy the object\n", loop);
|
||||||
goto err_obj;
|
goto err_obj;
|
||||||
|
@ -25,9 +25,7 @@ static int mock_phys_object(void *arg)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
|
err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
|
pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
|
||||||
goto out_obj;
|
goto out_obj;
|
||||||
|
@ -26,17 +26,13 @@ static int live_sanitycheck(void *arg)
|
|||||||
struct i915_gem_context *ctx;
|
struct i915_gem_context *ctx;
|
||||||
struct intel_context *ce;
|
struct intel_context *ce;
|
||||||
struct igt_spinner spin;
|
struct igt_spinner spin;
|
||||||
intel_wakeref_t wakeref;
|
|
||||||
int err = -ENOMEM;
|
int err = -ENOMEM;
|
||||||
|
|
||||||
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
|
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
|
||||||
|
|
||||||
if (igt_spinner_init(&spin, &i915->gt))
|
if (igt_spinner_init(&spin, &i915->gt))
|
||||||
goto err_unlock;
|
return -ENOMEM;
|
||||||
|
|
||||||
ctx = kernel_context(i915);
|
ctx = kernel_context(i915);
|
||||||
if (!ctx)
|
if (!ctx)
|
||||||
@ -73,9 +69,6 @@ err_ctx:
|
|||||||
kernel_context_close(ctx);
|
kernel_context_close(ctx);
|
||||||
err_spin:
|
err_spin:
|
||||||
igt_spinner_fini(&spin);
|
igt_spinner_fini(&spin);
|
||||||
err_unlock:
|
|
||||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -402,7 +395,6 @@ static int live_timeslice_preempt(void *arg)
|
|||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = arg;
|
struct drm_i915_private *i915 = arg;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
intel_wakeref_t wakeref;
|
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
@ -417,14 +409,9 @@ static int live_timeslice_preempt(void *arg)
|
|||||||
* ready task.
|
* ready task.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
|
||||||
|
|
||||||
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
||||||
if (IS_ERR(obj)) {
|
if (IS_ERR(obj))
|
||||||
err = PTR_ERR(obj);
|
return PTR_ERR(obj);
|
||||||
goto err_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
|
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
|
||||||
if (IS_ERR(vma)) {
|
if (IS_ERR(vma)) {
|
||||||
@ -469,10 +456,6 @@ err_map:
|
|||||||
i915_gem_object_unpin_map(obj);
|
i915_gem_object_unpin_map(obj);
|
||||||
err_obj:
|
err_obj:
|
||||||
i915_gem_object_put(obj);
|
i915_gem_object_put(obj);
|
||||||
err_unlock:
|
|
||||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -484,7 +467,6 @@ static int live_busywait_preempt(void *arg)
|
|||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
intel_wakeref_t wakeref;
|
|
||||||
int err = -ENOMEM;
|
int err = -ENOMEM;
|
||||||
u32 *map;
|
u32 *map;
|
||||||
|
|
||||||
@ -493,12 +475,9 @@ static int live_busywait_preempt(void *arg)
|
|||||||
* preempt the busywaits used to synchronise between rings.
|
* preempt the busywaits used to synchronise between rings.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
|
||||||
|
|
||||||
ctx_hi = kernel_context(i915);
|
ctx_hi = kernel_context(i915);
|
||||||
if (!ctx_hi)
|
if (!ctx_hi)
|
||||||
goto err_unlock;
|
return -ENOMEM;
|
||||||
ctx_hi->sched.priority =
|
ctx_hi->sched.priority =
|
||||||
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
|
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
|
||||||
|
|
||||||
@ -652,9 +631,6 @@ err_ctx_lo:
|
|||||||
kernel_context_close(ctx_lo);
|
kernel_context_close(ctx_lo);
|
||||||
err_ctx_hi:
|
err_ctx_hi:
|
||||||
kernel_context_close(ctx_hi);
|
kernel_context_close(ctx_hi);
|
||||||
err_unlock:
|
|
||||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -683,7 +659,6 @@ static int live_preempt(void *arg)
|
|||||||
struct igt_spinner spin_hi, spin_lo;
|
struct igt_spinner spin_hi, spin_lo;
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
intel_wakeref_t wakeref;
|
|
||||||
int err = -ENOMEM;
|
int err = -ENOMEM;
|
||||||
|
|
||||||
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
||||||
@ -692,11 +667,8 @@ static int live_preempt(void *arg)
|
|||||||
if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
|
if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
|
||||||
pr_err("Logical preemption supported, but not exposed\n");
|
pr_err("Logical preemption supported, but not exposed\n");
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
|
||||||
|
|
||||||
if (igt_spinner_init(&spin_hi, &i915->gt))
|
if (igt_spinner_init(&spin_hi, &i915->gt))
|
||||||
goto err_unlock;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (igt_spinner_init(&spin_lo, &i915->gt))
|
if (igt_spinner_init(&spin_lo, &i915->gt))
|
||||||
goto err_spin_hi;
|
goto err_spin_hi;
|
||||||
@ -776,9 +748,6 @@ err_spin_lo:
|
|||||||
igt_spinner_fini(&spin_lo);
|
igt_spinner_fini(&spin_lo);
|
||||||
err_spin_hi:
|
err_spin_hi:
|
||||||
igt_spinner_fini(&spin_hi);
|
igt_spinner_fini(&spin_hi);
|
||||||
err_unlock:
|
|
||||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -790,17 +759,13 @@ static int live_late_preempt(void *arg)
|
|||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
struct i915_sched_attr attr = {};
|
struct i915_sched_attr attr = {};
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
intel_wakeref_t wakeref;
|
|
||||||
int err = -ENOMEM;
|
int err = -ENOMEM;
|
||||||
|
|
||||||
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
|
||||||
|
|
||||||
if (igt_spinner_init(&spin_hi, &i915->gt))
|
if (igt_spinner_init(&spin_hi, &i915->gt))
|
||||||
goto err_unlock;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (igt_spinner_init(&spin_lo, &i915->gt))
|
if (igt_spinner_init(&spin_lo, &i915->gt))
|
||||||
goto err_spin_hi;
|
goto err_spin_hi;
|
||||||
@ -882,9 +847,6 @@ err_spin_lo:
|
|||||||
igt_spinner_fini(&spin_lo);
|
igt_spinner_fini(&spin_lo);
|
||||||
err_spin_hi:
|
err_spin_hi:
|
||||||
igt_spinner_fini(&spin_hi);
|
igt_spinner_fini(&spin_hi);
|
||||||
err_unlock:
|
|
||||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err_wedged:
|
err_wedged:
|
||||||
@ -929,7 +891,6 @@ static int live_nopreempt(void *arg)
|
|||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
struct preempt_client a, b;
|
struct preempt_client a, b;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
intel_wakeref_t wakeref;
|
|
||||||
int err = -ENOMEM;
|
int err = -ENOMEM;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -940,11 +901,8 @@ static int live_nopreempt(void *arg)
|
|||||||
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
|
||||||
|
|
||||||
if (preempt_client_init(i915, &a))
|
if (preempt_client_init(i915, &a))
|
||||||
goto err_unlock;
|
return -ENOMEM;
|
||||||
if (preempt_client_init(i915, &b))
|
if (preempt_client_init(i915, &b))
|
||||||
goto err_client_a;
|
goto err_client_a;
|
||||||
b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
|
b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
|
||||||
@ -1018,9 +976,6 @@ err_client_b:
|
|||||||
preempt_client_fini(&b);
|
preempt_client_fini(&b);
|
||||||
err_client_a:
|
err_client_a:
|
||||||
preempt_client_fini(&a);
|
preempt_client_fini(&a);
|
||||||
err_unlock:
|
|
||||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err_wedged:
|
err_wedged:
|
||||||
@ -1040,7 +995,6 @@ static int live_suppress_self_preempt(void *arg)
|
|||||||
};
|
};
|
||||||
struct preempt_client a, b;
|
struct preempt_client a, b;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
intel_wakeref_t wakeref;
|
|
||||||
int err = -ENOMEM;
|
int err = -ENOMEM;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1059,11 +1013,8 @@ static int live_suppress_self_preempt(void *arg)
|
|||||||
if (intel_vgpu_active(i915))
|
if (intel_vgpu_active(i915))
|
||||||
return 0; /* GVT forces single port & request submission */
|
return 0; /* GVT forces single port & request submission */
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
|
||||||
|
|
||||||
if (preempt_client_init(i915, &a))
|
if (preempt_client_init(i915, &a))
|
||||||
goto err_unlock;
|
return -ENOMEM;
|
||||||
if (preempt_client_init(i915, &b))
|
if (preempt_client_init(i915, &b))
|
||||||
goto err_client_a;
|
goto err_client_a;
|
||||||
|
|
||||||
@ -1144,9 +1095,6 @@ err_client_b:
|
|||||||
preempt_client_fini(&b);
|
preempt_client_fini(&b);
|
||||||
err_client_a:
|
err_client_a:
|
||||||
preempt_client_fini(&a);
|
preempt_client_fini(&a);
|
||||||
err_unlock:
|
|
||||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err_wedged:
|
err_wedged:
|
||||||
@ -1216,7 +1164,6 @@ static int live_suppress_wait_preempt(void *arg)
|
|||||||
struct preempt_client client[4];
|
struct preempt_client client[4];
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
intel_wakeref_t wakeref;
|
|
||||||
int err = -ENOMEM;
|
int err = -ENOMEM;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -1229,11 +1176,8 @@ static int live_suppress_wait_preempt(void *arg)
|
|||||||
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
|
||||||
|
|
||||||
if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
|
if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
|
||||||
goto err_unlock;
|
return -ENOMEM;
|
||||||
if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
|
if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
|
||||||
goto err_client_0;
|
goto err_client_0;
|
||||||
if (preempt_client_init(i915, &client[2])) /* head of queue */
|
if (preempt_client_init(i915, &client[2])) /* head of queue */
|
||||||
@ -1319,9 +1263,6 @@ err_client_1:
|
|||||||
preempt_client_fini(&client[1]);
|
preempt_client_fini(&client[1]);
|
||||||
err_client_0:
|
err_client_0:
|
||||||
preempt_client_fini(&client[0]);
|
preempt_client_fini(&client[0]);
|
||||||
err_unlock:
|
|
||||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err_wedged:
|
err_wedged:
|
||||||
@ -1338,7 +1279,6 @@ static int live_chain_preempt(void *arg)
|
|||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
struct preempt_client hi, lo;
|
struct preempt_client hi, lo;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
intel_wakeref_t wakeref;
|
|
||||||
int err = -ENOMEM;
|
int err = -ENOMEM;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1350,11 +1290,8 @@ static int live_chain_preempt(void *arg)
|
|||||||
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
|
||||||
|
|
||||||
if (preempt_client_init(i915, &hi))
|
if (preempt_client_init(i915, &hi))
|
||||||
goto err_unlock;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (preempt_client_init(i915, &lo))
|
if (preempt_client_init(i915, &lo))
|
||||||
goto err_client_hi;
|
goto err_client_hi;
|
||||||
@ -1465,9 +1402,6 @@ err_client_lo:
|
|||||||
preempt_client_fini(&lo);
|
preempt_client_fini(&lo);
|
||||||
err_client_hi:
|
err_client_hi:
|
||||||
preempt_client_fini(&hi);
|
preempt_client_fini(&hi);
|
||||||
err_unlock:
|
|
||||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err_wedged:
|
err_wedged:
|
||||||
@ -1485,7 +1419,6 @@ static int live_preempt_hang(void *arg)
|
|||||||
struct igt_spinner spin_hi, spin_lo;
|
struct igt_spinner spin_hi, spin_lo;
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
intel_wakeref_t wakeref;
|
|
||||||
int err = -ENOMEM;
|
int err = -ENOMEM;
|
||||||
|
|
||||||
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
||||||
@ -1494,11 +1427,8 @@ static int live_preempt_hang(void *arg)
|
|||||||
if (!intel_has_reset_engine(&i915->gt))
|
if (!intel_has_reset_engine(&i915->gt))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
|
||||||
|
|
||||||
if (igt_spinner_init(&spin_hi, &i915->gt))
|
if (igt_spinner_init(&spin_hi, &i915->gt))
|
||||||
goto err_unlock;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (igt_spinner_init(&spin_lo, &i915->gt))
|
if (igt_spinner_init(&spin_lo, &i915->gt))
|
||||||
goto err_spin_hi;
|
goto err_spin_hi;
|
||||||
@ -1590,9 +1520,6 @@ err_spin_lo:
|
|||||||
igt_spinner_fini(&spin_lo);
|
igt_spinner_fini(&spin_lo);
|
||||||
err_spin_hi:
|
err_spin_hi:
|
||||||
igt_spinner_fini(&spin_hi);
|
igt_spinner_fini(&spin_hi);
|
||||||
err_unlock:
|
|
||||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1684,11 +1611,9 @@ static int smoke_crescendo_thread(void *arg)
|
|||||||
struct i915_gem_context *ctx = smoke_context(smoke);
|
struct i915_gem_context *ctx = smoke_context(smoke);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
mutex_lock(&smoke->i915->drm.struct_mutex);
|
|
||||||
err = smoke_submit(smoke,
|
err = smoke_submit(smoke,
|
||||||
ctx, count % I915_PRIORITY_MAX,
|
ctx, count % I915_PRIORITY_MAX,
|
||||||
smoke->batch);
|
smoke->batch);
|
||||||
mutex_unlock(&smoke->i915->drm.struct_mutex);
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@ -1709,8 +1634,6 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
|
|||||||
unsigned long count;
|
unsigned long count;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
mutex_unlock(&smoke->i915->drm.struct_mutex);
|
|
||||||
|
|
||||||
for_each_engine(engine, smoke->i915, id) {
|
for_each_engine(engine, smoke->i915, id) {
|
||||||
arg[id] = *smoke;
|
arg[id] = *smoke;
|
||||||
arg[id].engine = engine;
|
arg[id].engine = engine;
|
||||||
@ -1743,8 +1666,6 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
|
|||||||
put_task_struct(tsk[id]);
|
put_task_struct(tsk[id]);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&smoke->i915->drm.struct_mutex);
|
|
||||||
|
|
||||||
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
|
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
|
||||||
count, flags,
|
count, flags,
|
||||||
RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
|
RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
|
||||||
@ -1787,7 +1708,6 @@ static int live_preempt_smoke(void *arg)
|
|||||||
.ncontext = 1024,
|
.ncontext = 1024,
|
||||||
};
|
};
|
||||||
const unsigned int phase[] = { 0, BATCH };
|
const unsigned int phase[] = { 0, BATCH };
|
||||||
intel_wakeref_t wakeref;
|
|
||||||
struct igt_live_test t;
|
struct igt_live_test t;
|
||||||
int err = -ENOMEM;
|
int err = -ENOMEM;
|
||||||
u32 *cs;
|
u32 *cs;
|
||||||
@ -1802,13 +1722,10 @@ static int live_preempt_smoke(void *arg)
|
|||||||
if (!smoke.contexts)
|
if (!smoke.contexts)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
mutex_lock(&smoke.i915->drm.struct_mutex);
|
|
||||||
wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm);
|
|
||||||
|
|
||||||
smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
|
smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
|
||||||
if (IS_ERR(smoke.batch)) {
|
if (IS_ERR(smoke.batch)) {
|
||||||
err = PTR_ERR(smoke.batch);
|
err = PTR_ERR(smoke.batch);
|
||||||
goto err_unlock;
|
goto err_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
|
cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
|
||||||
@ -1855,9 +1772,7 @@ err_ctx:
|
|||||||
|
|
||||||
err_batch:
|
err_batch:
|
||||||
i915_gem_object_put(smoke.batch);
|
i915_gem_object_put(smoke.batch);
|
||||||
err_unlock:
|
err_free:
|
||||||
intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref);
|
|
||||||
mutex_unlock(&smoke.i915->drm.struct_mutex);
|
|
||||||
kfree(smoke.contexts);
|
kfree(smoke.contexts);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
@ -1995,19 +1910,17 @@ static int live_virtual_engine(void *arg)
|
|||||||
struct intel_gt *gt = &i915->gt;
|
struct intel_gt *gt = &i915->gt;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
unsigned int class, inst;
|
unsigned int class, inst;
|
||||||
int err = -ENODEV;
|
int err;
|
||||||
|
|
||||||
if (USES_GUC_SUBMISSION(i915))
|
if (USES_GUC_SUBMISSION(i915))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
|
|
||||||
for_each_engine(engine, i915, id) {
|
for_each_engine(engine, i915, id) {
|
||||||
err = nop_virtual_engine(i915, &engine, 1, 1, 0);
|
err = nop_virtual_engine(i915, &engine, 1, 1, 0);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_err("Failed to wrap engine %s: err=%d\n",
|
pr_err("Failed to wrap engine %s: err=%d\n",
|
||||||
engine->name, err);
|
engine->name, err);
|
||||||
goto out_unlock;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2028,17 +1941,15 @@ static int live_virtual_engine(void *arg)
|
|||||||
err = nop_virtual_engine(i915, siblings, nsibling,
|
err = nop_virtual_engine(i915, siblings, nsibling,
|
||||||
n, 0);
|
n, 0);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_unlock;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN);
|
err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_unlock;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
out_unlock:
|
return 0;
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mask_virtual_engine(struct drm_i915_private *i915,
|
static int mask_virtual_engine(struct drm_i915_private *i915,
|
||||||
@ -2117,9 +2028,6 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
|
|||||||
}
|
}
|
||||||
|
|
||||||
err = igt_live_test_end(&t);
|
err = igt_live_test_end(&t);
|
||||||
if (err)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (igt_flush_test(i915))
|
if (igt_flush_test(i915))
|
||||||
err = -EIO;
|
err = -EIO;
|
||||||
@ -2142,13 +2050,11 @@ static int live_virtual_mask(void *arg)
|
|||||||
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
|
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
|
||||||
struct intel_gt *gt = &i915->gt;
|
struct intel_gt *gt = &i915->gt;
|
||||||
unsigned int class, inst;
|
unsigned int class, inst;
|
||||||
int err = 0;
|
int err;
|
||||||
|
|
||||||
if (USES_GUC_SUBMISSION(i915))
|
if (USES_GUC_SUBMISSION(i915))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
|
|
||||||
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
|
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
|
||||||
unsigned int nsibling;
|
unsigned int nsibling;
|
||||||
|
|
||||||
@ -2164,12 +2070,10 @@ static int live_virtual_mask(void *arg)
|
|||||||
|
|
||||||
err = mask_virtual_engine(i915, siblings, nsibling);
|
err = mask_virtual_engine(i915, siblings, nsibling);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_unlock;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
out_unlock:
|
return 0;
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bond_virtual_engine(struct drm_i915_private *i915,
|
static int bond_virtual_engine(struct drm_i915_private *i915,
|
||||||
@ -2320,13 +2224,11 @@ static int live_virtual_bond(void *arg)
|
|||||||
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
|
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
|
||||||
struct intel_gt *gt = &i915->gt;
|
struct intel_gt *gt = &i915->gt;
|
||||||
unsigned int class, inst;
|
unsigned int class, inst;
|
||||||
int err = 0;
|
int err;
|
||||||
|
|
||||||
if (USES_GUC_SUBMISSION(i915))
|
if (USES_GUC_SUBMISSION(i915))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
|
|
||||||
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
|
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
|
||||||
const struct phase *p;
|
const struct phase *p;
|
||||||
int nsibling;
|
int nsibling;
|
||||||
@ -2349,14 +2251,12 @@ static int live_virtual_bond(void *arg)
|
|||||||
if (err) {
|
if (err) {
|
||||||
pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
|
pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
|
||||||
__func__, p->name, class, nsibling, err);
|
__func__, p->name, class, nsibling, err);
|
||||||
goto out_unlock;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out_unlock:
|
return 0;
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int intel_execlists_live_selftests(struct drm_i915_private *i915)
|
int intel_execlists_live_selftests(struct drm_i915_private *i915)
|
||||||
|
@ -708,9 +708,7 @@ static int live_dirty_whitelist(void *arg)
|
|||||||
|
|
||||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||||
|
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
file = mock_file(i915);
|
file = mock_file(i915);
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
if (IS_ERR(file)) {
|
if (IS_ERR(file)) {
|
||||||
err = PTR_ERR(file);
|
err = PTR_ERR(file);
|
||||||
goto out_rpm;
|
goto out_rpm;
|
||||||
@ -732,9 +730,7 @@ static int live_dirty_whitelist(void *arg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
out_file:
|
out_file:
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
mock_file_free(i915, file);
|
mock_file_free(i915, file);
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
out_rpm:
|
out_rpm:
|
||||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||||
return err;
|
return err;
|
||||||
@ -1274,14 +1270,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
|
|||||||
SUBTEST(live_gpu_reset_workarounds),
|
SUBTEST(live_gpu_reset_workarounds),
|
||||||
SUBTEST(live_engine_reset_workarounds),
|
SUBTEST(live_engine_reset_workarounds),
|
||||||
};
|
};
|
||||||
int err;
|
|
||||||
|
|
||||||
if (intel_gt_is_wedged(&i915->gt))
|
if (intel_gt_is_wedged(&i915->gt))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
return i915_subtests(tests, i915);
|
||||||
err = i915_subtests(tests, i915);
|
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
@ -116,7 +116,6 @@ static int igt_guc_clients(void *args)
|
|||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
GEM_BUG_ON(!HAS_GT_UC(dev_priv));
|
GEM_BUG_ON(!HAS_GT_UC(dev_priv));
|
||||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
|
||||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||||
|
|
||||||
guc = &dev_priv->gt.uc.guc;
|
guc = &dev_priv->gt.uc.guc;
|
||||||
@ -190,7 +189,6 @@ out:
|
|||||||
guc_clients_enable(guc);
|
guc_clients_enable(guc);
|
||||||
unlock:
|
unlock:
|
||||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -208,7 +206,6 @@ static int igt_guc_doorbells(void *arg)
|
|||||||
u16 db_id;
|
u16 db_id;
|
||||||
|
|
||||||
GEM_BUG_ON(!HAS_GT_UC(dev_priv));
|
GEM_BUG_ON(!HAS_GT_UC(dev_priv));
|
||||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
|
||||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||||
|
|
||||||
guc = &dev_priv->gt.uc.guc;
|
guc = &dev_priv->gt.uc.guc;
|
||||||
@ -299,7 +296,6 @@ out:
|
|||||||
}
|
}
|
||||||
unlock:
|
unlock:
|
||||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1797,7 +1797,6 @@ static int igt_cs_tlb(void *arg)
|
|||||||
if (IS_ERR(file))
|
if (IS_ERR(file))
|
||||||
return PTR_ERR(file);
|
return PTR_ERR(file);
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
|
||||||
ctx = live_context(i915, file);
|
ctx = live_context(i915, file);
|
||||||
if (IS_ERR(ctx)) {
|
if (IS_ERR(ctx)) {
|
||||||
err = PTR_ERR(ctx);
|
err = PTR_ERR(ctx);
|
||||||
@ -2020,7 +2019,6 @@ out_put_bbe:
|
|||||||
out_vm:
|
out_vm:
|
||||||
i915_vm_put(vm);
|
i915_vm_put(vm);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
mock_file_free(i915, file);
|
mock_file_free(i915, file);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user