mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-04 03:33:58 +08:00
drm/i915/selftests: Prepare execlists and lrc selftests for obj->mm.lock removal
Convert normal functions to unlocked versions where needed. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-54-maarten.lankhorst@linux.intel.com
This commit is contained in:
parent
17b7ab92be
commit
e09e903a6e
@ -989,7 +989,7 @@ static int live_timeslice_preempt(void *arg)
|
||||
goto err_obj;
|
||||
}
|
||||
|
||||
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
|
||||
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
|
||||
if (IS_ERR(vaddr)) {
|
||||
err = PTR_ERR(vaddr);
|
||||
goto err_obj;
|
||||
@ -1297,7 +1297,7 @@ static int live_timeslice_queue(void *arg)
|
||||
goto err_obj;
|
||||
}
|
||||
|
||||
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
|
||||
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
|
||||
if (IS_ERR(vaddr)) {
|
||||
err = PTR_ERR(vaddr);
|
||||
goto err_obj;
|
||||
@ -1544,7 +1544,7 @@ static int live_busywait_preempt(void *arg)
|
||||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
map = i915_gem_object_pin_map(obj, I915_MAP_WC);
|
||||
map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
|
||||
if (IS_ERR(map)) {
|
||||
err = PTR_ERR(map);
|
||||
goto err_obj;
|
||||
@ -2714,7 +2714,7 @@ static int create_gang(struct intel_engine_cs *engine,
|
||||
if (err)
|
||||
goto err_obj;
|
||||
|
||||
cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
|
||||
cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
|
||||
if (IS_ERR(cs)) {
|
||||
err = PTR_ERR(cs);
|
||||
goto err_obj;
|
||||
@ -2997,7 +2997,7 @@ static int live_preempt_gang(void *arg)
|
||||
* it will terminate the next lowest spinner until there
|
||||
* are no more spinners and the gang is complete.
|
||||
*/
|
||||
cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
|
||||
cs = i915_gem_object_pin_map_unlocked(rq->batch->obj, I915_MAP_WC);
|
||||
if (!IS_ERR(cs)) {
|
||||
*cs = 0;
|
||||
i915_gem_object_unpin_map(rq->batch->obj);
|
||||
@ -3062,7 +3062,7 @@ create_gpr_user(struct intel_engine_cs *engine,
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
|
||||
cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
|
||||
if (IS_ERR(cs)) {
|
||||
i915_vma_put(vma);
|
||||
return ERR_CAST(cs);
|
||||
@ -3269,7 +3269,7 @@ static int live_preempt_user(void *arg)
|
||||
if (IS_ERR(global))
|
||||
return PTR_ERR(global);
|
||||
|
||||
result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
|
||||
result = i915_gem_object_pin_map_unlocked(global->obj, I915_MAP_WC);
|
||||
if (IS_ERR(result)) {
|
||||
i915_vma_unpin_and_release(&global, 0);
|
||||
return PTR_ERR(result);
|
||||
@ -3658,7 +3658,7 @@ static int live_preempt_smoke(void *arg)
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
|
||||
cs = i915_gem_object_pin_map_unlocked(smoke.batch, I915_MAP_WB);
|
||||
if (IS_ERR(cs)) {
|
||||
err = PTR_ERR(cs);
|
||||
goto err_batch;
|
||||
@ -4263,7 +4263,7 @@ static int preserved_virtual_engine(struct intel_gt *gt,
|
||||
goto out_end;
|
||||
}
|
||||
|
||||
cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
|
||||
cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
|
||||
if (IS_ERR(cs)) {
|
||||
err = PTR_ERR(cs);
|
||||
goto out_end;
|
||||
|
@ -627,7 +627,7 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine,
|
||||
goto err_rq;
|
||||
}
|
||||
|
||||
cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
|
||||
cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
|
||||
if (IS_ERR(cs)) {
|
||||
err = PTR_ERR(cs);
|
||||
goto err_rq;
|
||||
@ -921,7 +921,7 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
|
||||
if (IS_ERR(batch))
|
||||
return batch;
|
||||
|
||||
cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
|
||||
cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
|
||||
if (IS_ERR(cs)) {
|
||||
i915_vma_put(batch);
|
||||
return ERR_CAST(cs);
|
||||
@ -1085,7 +1085,7 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
|
||||
if (IS_ERR(batch))
|
||||
return batch;
|
||||
|
||||
cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
|
||||
cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
|
||||
if (IS_ERR(cs)) {
|
||||
i915_vma_put(batch);
|
||||
return ERR_CAST(cs);
|
||||
@ -1199,29 +1199,29 @@ static int compare_isolation(struct intel_engine_cs *engine,
|
||||
u32 *defaults;
|
||||
int err = 0;
|
||||
|
||||
A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
|
||||
A[0] = i915_gem_object_pin_map_unlocked(ref[0]->obj, I915_MAP_WC);
|
||||
if (IS_ERR(A[0]))
|
||||
return PTR_ERR(A[0]);
|
||||
|
||||
A[1] = i915_gem_object_pin_map(ref[1]->obj, I915_MAP_WC);
|
||||
A[1] = i915_gem_object_pin_map_unlocked(ref[1]->obj, I915_MAP_WC);
|
||||
if (IS_ERR(A[1])) {
|
||||
err = PTR_ERR(A[1]);
|
||||
goto err_A0;
|
||||
}
|
||||
|
||||
B[0] = i915_gem_object_pin_map(result[0]->obj, I915_MAP_WC);
|
||||
B[0] = i915_gem_object_pin_map_unlocked(result[0]->obj, I915_MAP_WC);
|
||||
if (IS_ERR(B[0])) {
|
||||
err = PTR_ERR(B[0]);
|
||||
goto err_A1;
|
||||
}
|
||||
|
||||
B[1] = i915_gem_object_pin_map(result[1]->obj, I915_MAP_WC);
|
||||
B[1] = i915_gem_object_pin_map_unlocked(result[1]->obj, I915_MAP_WC);
|
||||
if (IS_ERR(B[1])) {
|
||||
err = PTR_ERR(B[1]);
|
||||
goto err_B0;
|
||||
}
|
||||
|
||||
lrc = i915_gem_object_pin_map(ce->state->obj,
|
||||
lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
|
||||
i915_coherent_map_type(engine->i915));
|
||||
if (IS_ERR(lrc)) {
|
||||
err = PTR_ERR(lrc);
|
||||
|
Loading…
Reference in New Issue
Block a user