2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 21:54:06 +08:00

drm/i915: Move intel_lrc_context_pin() to avoid the forward declaration

Just a simple move to avoid a forward declaration, though the diff likes
to present itself as a move of intel_logical_ring_alloc_request_extras()
in the opposite direction.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161218153724.8439-2-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2016-12-18 15:37:19 +00:00
parent 81147b07f2
commit ef11c01db4

View File

@ -230,8 +230,6 @@ enum {
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
static void execlists_init_reg_state(u32 *reg_state,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
@ -774,71 +772,6 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
/* XXX Do we need to preempt to make room for us and our deps? */
}
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_context *ce = &request->ctx->engine[engine->id];
int ret;
/* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
* have to repeat work.
*/
request->reserved_space += EXECLISTS_REQUEST_SIZE;
if (!ce->state) {
ret = execlists_context_deferred_alloc(request->ctx, engine);
if (ret)
return ret;
}
request->ring = ce->ring;
ret = intel_lr_context_pin(request->ctx, engine);
if (ret)
return ret;
if (i915.enable_guc_submission) {
/*
* Check that the GuC has space for the request before
* going any further, as the i915_add_request() call
* later on mustn't fail ...
*/
ret = i915_guc_wq_reserve(request);
if (ret)
goto err_unpin;
}
ret = intel_ring_begin(request, 0);
if (ret)
goto err_unreserve;
if (!ce->initialised) {
ret = engine->init_context(request);
if (ret)
goto err_unreserve;
ce->initialised = true;
}
/* Note that after this point, we have committed to using
* this request as it is being used to both track the
* state of engine initialisation and liveness of the
* golden renderstate above. Think twice before you try
* to cancel/unwind this request now.
*/
request->reserved_space -= EXECLISTS_REQUEST_SIZE;
return 0;
err_unreserve:
if (i915.enable_guc_submission)
i915_guc_wq_unreserve(request);
err_unpin:
intel_lr_context_unpin(request->ctx, engine);
return ret;
}
static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
@ -911,6 +844,71 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
i915_gem_context_put(ctx);
}
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_context *ce = &request->ctx->engine[engine->id];
int ret;
/* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
* have to repeat work.
*/
request->reserved_space += EXECLISTS_REQUEST_SIZE;
if (!ce->state) {
ret = execlists_context_deferred_alloc(request->ctx, engine);
if (ret)
return ret;
}
request->ring = ce->ring;
ret = intel_lr_context_pin(request->ctx, engine);
if (ret)
return ret;
if (i915.enable_guc_submission) {
/*
* Check that the GuC has space for the request before
* going any further, as the i915_add_request() call
* later on mustn't fail ...
*/
ret = i915_guc_wq_reserve(request);
if (ret)
goto err_unpin;
}
ret = intel_ring_begin(request, 0);
if (ret)
goto err_unreserve;
if (!ce->initialised) {
ret = engine->init_context(request);
if (ret)
goto err_unreserve;
ce->initialised = true;
}
/* Note that after this point, we have committed to using
* this request as it is being used to both track the
* state of engine initialisation and liveness of the
* golden renderstate above. Think twice before you try
* to cancel/unwind this request now.
*/
request->reserved_space -= EXECLISTS_REQUEST_SIZE;
return 0;
err_unreserve:
if (i915.enable_guc_submission)
i915_guc_wq_unreserve(request);
err_unpin:
intel_lr_context_unpin(request->ctx, engine);
return ret;
}
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;