drm/i915: Update move_to_gpu() to take a request structure

The plan is to pass requests around as the basic submission tracking structure
rather than rings and contexts. This patch updates the move_to_gpu() code paths.

For: VIZ-5115
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
John Harrison 2015-05-29 17:43:32 +01:00 committed by Daniel Vetter
parent 95c24161cd
commit 535fbe8233
2 changed files with 11 additions and 13 deletions

View File

@ -891,10 +891,10 @@ err:
}
static int
i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
const unsigned other_rings = ~intel_ring_flag(ring);
const unsigned other_rings = ~intel_ring_flag(req->ring);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@ -904,7 +904,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
struct drm_i915_gem_object *obj = vma->obj;
if (obj->active & other_rings) {
ret = i915_gem_object_sync(obj, ring);
ret = i915_gem_object_sync(obj, req->ring);
if (ret)
return ret;
}
@ -916,7 +916,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
}
if (flush_chipset)
i915_gem_chipset_flush(ring->dev);
i915_gem_chipset_flush(req->ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
@ -924,7 +924,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
return intel_ring_invalidate_all_caches(ring);
return intel_ring_invalidate_all_caches(req->ring);
}
static bool
@ -1246,7 +1246,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
}
}
ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
if (ret)
goto error;

View File

@ -624,12 +624,10 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
return 0;
}
static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
struct intel_engine_cs *ring = ringbuf->ring;
const unsigned other_rings = ~intel_ring_flag(ring);
const unsigned other_rings = ~intel_ring_flag(req->ring);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@ -639,7 +637,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
struct drm_i915_gem_object *obj = vma->obj;
if (obj->active & other_rings) {
ret = i915_gem_object_sync(obj, ring);
ret = i915_gem_object_sync(obj, req->ring);
if (ret)
return ret;
}
@ -656,7 +654,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
return logical_ring_invalidate_all_caches(ringbuf, ctx);
return logical_ring_invalidate_all_caches(req->ringbuf, req->ctx);
}
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@ -918,7 +916,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
return -EINVAL;
}
ret = execlists_move_to_gpu(ringbuf, params->ctx, vmas);
ret = execlists_move_to_gpu(params->request, vmas);
if (ret)
return ret;