mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-23 02:54:32 +08:00
drm/i915: Avoid waitboosting on the active request
Watching a light workload on Baytrail (running glxgears and a 1080p decode), instead of the system remaining at low frequency, the glxgears would regularly trigger waitboosting after which it would have to spend a few seconds throttling back down. In this case, the waitboosting is counter productive as the minimal wait for glxgears doesn't prevent it from functioning correctly and delivering frames on time. In this case, glxgears happens to almost always be waiting on the current request, which we already expect to complete quickly (see i915_spin_request) and so avoiding the waitboost on the active request and spinning instead provides the best latency without overcommitting to upclocking. However, if the system falls behind we still force the waitboost. Similarly, we will also trigger upclocking if we detect the system is not delivering frames on time - again using a mechanism that tries to detect a miss and not preemptively upclock. v2: Also skip boosting for after missed vblank if the desired request is already active. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180118131609.16574-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
b6c51c3e28
commit
e9af4ea2b9
@ -369,7 +369,8 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
|
||||
if (i915_gem_request_completed(rq))
|
||||
goto out;
|
||||
|
||||
/* This client is about to stall waiting for the GPU. In many cases
|
||||
/*
|
||||
* This client is about to stall waiting for the GPU. In many cases
|
||||
* this is undesirable and limits the throughput of the system, as
|
||||
* many clients cannot continue processing user input/output whilst
|
||||
* blocked. RPS autotuning may take tens of milliseconds to respond
|
||||
@ -384,11 +385,9 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
|
||||
* forcing the clocks too high for the whole system, we only allow
|
||||
* each client to waitboost once in a busy period.
|
||||
*/
|
||||
if (rps_client) {
|
||||
if (rps_client && !i915_gem_request_started(rq)) {
|
||||
if (INTEL_GEN(rq->i915) >= 6)
|
||||
gen6_rps_boost(rq, rps_client);
|
||||
else
|
||||
rps_client = NULL;
|
||||
}
|
||||
|
||||
timeout = i915_wait_request(rq, flags, timeout);
|
||||
|
@ -329,6 +329,19 @@ i915_gem_request_completed(const struct drm_i915_gem_request *req)
|
||||
return __i915_gem_request_completed(req, seqno);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_request_started(const struct drm_i915_gem_request *req)
|
||||
{
|
||||
u32 seqno;
|
||||
|
||||
seqno = i915_gem_request_global_seqno(req);
|
||||
if (!seqno)
|
||||
return false;
|
||||
|
||||
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
|
||||
seqno - 1);
|
||||
}
|
||||
|
||||
static inline bool i915_priotree_signaled(const struct i915_priotree *pt)
|
||||
{
|
||||
const struct drm_i915_gem_request *rq =
|
||||
|
@ -12519,7 +12519,13 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
|
||||
struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
|
||||
struct drm_i915_gem_request *rq = wait->request;
|
||||
|
||||
gen6_rps_boost(rq, NULL);
|
||||
/*
|
||||
* If we missed the vblank, but the request is already running it
|
||||
* is reasonable to assume that it will complete before the next
|
||||
* vblank without our intervention, so leave RPS alone.
|
||||
*/
|
||||
if (!i915_gem_request_started(rq))
|
||||
gen6_rps_boost(rq, NULL);
|
||||
i915_gem_request_put(rq);
|
||||
|
||||
drm_crtc_vblank_put(wait->crtc);
|
||||
|
Loading…
Reference in New Issue
Block a user