mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 10:04:12 +08:00
drm/i915: Mark up protected uses of 'i915_request_completed'
When we know that we are inside the timeline mutex, or inside the submission flow (under active.lock or the holder's rcu lock), we know that the rq->hwsp is stable and we can use the simpler direct version. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Andi Shyti <andi.shyti@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210114135612.13210-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
d263dfa7d2
commit
163433e5c5
@ -408,7 +408,7 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active)
|
||||
}
|
||||
|
||||
if (i915_request_is_active(rq)) {
|
||||
if (!i915_request_completed(rq))
|
||||
if (!__i915_request_is_complete(rq))
|
||||
*active = locked;
|
||||
ret = true;
|
||||
}
|
||||
|
@ -517,8 +517,8 @@ static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
|
||||
list_for_each_entry_rcu(rq, &ce->signals, signal_link)
|
||||
drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
|
||||
rq->fence.context, rq->fence.seqno,
|
||||
i915_request_completed(rq) ? "!" :
|
||||
i915_request_started(rq) ? "*" :
|
||||
__i915_request_is_complete(rq) ? "!" :
|
||||
__i915_request_has_started(rq) ? "*" :
|
||||
"",
|
||||
jiffies_to_msecs(jiffies - rq->emitted_jiffies));
|
||||
}
|
||||
|
@ -1811,7 +1811,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
|
||||
struct intel_timeline *tl = request->context->timeline;
|
||||
|
||||
list_for_each_entry_from_reverse(request, &tl->requests, link) {
|
||||
if (i915_request_completed(request))
|
||||
if (__i915_request_is_complete(request))
|
||||
break;
|
||||
|
||||
active = request;
|
||||
@ -1822,10 +1822,10 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
|
||||
return active;
|
||||
|
||||
list_for_each_entry(request, &engine->active.requests, sched.link) {
|
||||
if (i915_request_completed(request))
|
||||
if (__i915_request_is_complete(request))
|
||||
continue;
|
||||
|
||||
if (!i915_request_started(request))
|
||||
if (!__i915_request_has_started(request))
|
||||
continue;
|
||||
|
||||
/* More than one preemptible request may match! */
|
||||
|
@ -3291,7 +3291,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
|
||||
|
||||
old = fetch_and_zero(&ve->request);
|
||||
if (old) {
|
||||
GEM_BUG_ON(!i915_request_completed(old));
|
||||
GEM_BUG_ON(!__i915_request_is_complete(old));
|
||||
__i915_request_submit(old);
|
||||
i915_request_put(old);
|
||||
}
|
||||
@ -3568,7 +3568,7 @@ static void virtual_submit_request(struct i915_request *rq)
|
||||
}
|
||||
|
||||
if (ve->request) { /* background completion from preempt-to-busy */
|
||||
GEM_BUG_ON(!i915_request_completed(ve->request));
|
||||
GEM_BUG_ON(!__i915_request_is_complete(ve->request));
|
||||
__i915_request_submit(ve->request);
|
||||
i915_request_put(ve->request);
|
||||
}
|
||||
|
@ -151,8 +151,7 @@ static void mark_innocent(struct i915_request *rq)
|
||||
void __i915_request_reset(struct i915_request *rq, bool guilty)
|
||||
{
|
||||
RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
|
||||
|
||||
GEM_BUG_ON(i915_request_completed(rq));
|
||||
GEM_BUG_ON(__i915_request_is_complete(rq));
|
||||
|
||||
rcu_read_lock(); /* protect the GEM context */
|
||||
if (guilty) {
|
||||
|
@ -389,12 +389,14 @@ static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
|
||||
|
||||
rq = NULL;
|
||||
spin_lock_irqsave(&engine->active.lock, flags);
|
||||
rcu_read_lock();
|
||||
list_for_each_entry(pos, &engine->active.requests, sched.link) {
|
||||
if (!i915_request_completed(pos)) {
|
||||
if (!__i915_request_is_complete(pos)) {
|
||||
rq = pos;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* The guilty request will get skipped on a hung engine.
|
||||
|
@ -582,11 +582,11 @@ int intel_timeline_read_hwsp(struct i915_request *from,
|
||||
|
||||
rcu_read_lock();
|
||||
cl = rcu_dereference(from->hwsp_cacheline);
|
||||
if (i915_request_completed(from)) /* confirm cacheline is valid */
|
||||
if (i915_request_signaled(from)) /* confirm cacheline is valid */
|
||||
goto unlock;
|
||||
if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
|
||||
goto unlock; /* seqno wrapped and completed! */
|
||||
if (unlikely(i915_request_completed(from)))
|
||||
if (unlikely(__i915_request_is_complete(from)))
|
||||
goto release;
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -276,7 +276,7 @@ static void remove_from_engine(struct i915_request *rq)
|
||||
|
||||
bool i915_request_retire(struct i915_request *rq)
|
||||
{
|
||||
if (!i915_request_completed(rq))
|
||||
if (!__i915_request_is_complete(rq))
|
||||
return false;
|
||||
|
||||
RQ_TRACE(rq, "\n");
|
||||
@ -342,8 +342,7 @@ void i915_request_retire_upto(struct i915_request *rq)
|
||||
struct i915_request *tmp;
|
||||
|
||||
RQ_TRACE(rq, "\n");
|
||||
|
||||
GEM_BUG_ON(!i915_request_completed(rq));
|
||||
GEM_BUG_ON(!__i915_request_is_complete(rq));
|
||||
|
||||
do {
|
||||
tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
|
||||
@ -552,7 +551,7 @@ bool __i915_request_submit(struct i915_request *request)
|
||||
* dropped upon retiring. (Otherwise if resubmit a *retired*
|
||||
* request, this would be a horrible use-after-free.)
|
||||
*/
|
||||
if (i915_request_completed(request))
|
||||
if (__i915_request_is_complete(request))
|
||||
goto xfer;
|
||||
|
||||
if (unlikely(intel_context_is_banned(request->context)))
|
||||
@ -652,7 +651,7 @@ void __i915_request_unsubmit(struct i915_request *request)
|
||||
i915_request_cancel_breadcrumb(request);
|
||||
|
||||
/* We've already spun, don't charge on resubmitting. */
|
||||
if (request->sched.semaphores && i915_request_started(request))
|
||||
if (request->sched.semaphores && __i915_request_has_started(request))
|
||||
request->sched.semaphores = 0;
|
||||
|
||||
/*
|
||||
@ -864,7 +863,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
|
||||
RCU_INIT_POINTER(rq->timeline, tl);
|
||||
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
|
||||
rq->hwsp_seqno = tl->hwsp_seqno;
|
||||
GEM_BUG_ON(i915_request_completed(rq));
|
||||
GEM_BUG_ON(__i915_request_is_complete(rq));
|
||||
|
||||
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
|
||||
|
||||
@ -978,7 +977,7 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
|
||||
struct i915_request *prev;
|
||||
|
||||
/* Confirm signal has not been retired, the link is valid */
|
||||
if (unlikely(i915_request_started(signal)))
|
||||
if (unlikely(__i915_request_has_started(signal)))
|
||||
break;
|
||||
|
||||
/* Is signal the earliest request on its timeline? */
|
||||
@ -1520,7 +1519,7 @@ __i915_request_add_to_timeline(struct i915_request *rq)
|
||||
*/
|
||||
prev = to_request(__i915_active_fence_set(&timeline->last_request,
|
||||
&rq->fence));
|
||||
if (prev && !i915_request_completed(prev)) {
|
||||
if (prev && !__i915_request_is_complete(prev)) {
|
||||
/*
|
||||
* The requests are supposed to be kept in order. However,
|
||||
* we need to be wary in case the timeline->last_request
|
||||
@ -1897,10 +1896,10 @@ static char queue_status(const struct i915_request *rq)
|
||||
|
||||
static const char *run_status(const struct i915_request *rq)
|
||||
{
|
||||
if (i915_request_completed(rq))
|
||||
if (__i915_request_is_complete(rq))
|
||||
return "!";
|
||||
|
||||
if (i915_request_started(rq))
|
||||
if (__i915_request_has_started(rq))
|
||||
return "*";
|
||||
|
||||
if (!i915_sw_fence_signaled(&rq->semaphore))
|
||||
|
@ -520,7 +520,7 @@ void i915_request_show_with_schedule(struct drm_printer *m,
|
||||
if (signaler->timeline == rq->timeline)
|
||||
continue;
|
||||
|
||||
if (i915_request_completed(signaler))
|
||||
if (__i915_request_is_complete(signaler))
|
||||
continue;
|
||||
|
||||
i915_request_show(m, signaler, prefix, indent + 2);
|
||||
|
Loading…
Reference in New Issue
Block a user