mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-04 17:44:14 +08:00
drm/i915/gem: Almagamate clflushes on suspend
When flushing objects larger than the CPU cache it is preferrable to use a single wbinvd() rather than overlapping clflush(). At runtime, we avoid wbinvd() due to its system-wide latencies, but during singlethreaded suspend, no one will observe the imposed latency and we can opt for the faster wbinvd to clear all objects in a single hit. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210119214336.1463-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
95b98f004f
commit
ac05a22cd0
@ -11,6 +11,13 @@
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
#if defined(CONFIG_X86)
|
||||
#include <asm/smp.h>
|
||||
#else
|
||||
#define wbinvd_on_all_cpus() \
|
||||
pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__)
|
||||
#endif
|
||||
|
||||
void i915_gem_suspend(struct drm_i915_private *i915)
|
||||
{
|
||||
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
|
||||
@ -32,13 +39,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
|
||||
i915_gem_drain_freed_objects(i915);
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
|
||||
{
|
||||
return list_first_entry_or_null(list,
|
||||
struct drm_i915_gem_object,
|
||||
mm.link);
|
||||
}
|
||||
|
||||
void i915_gem_suspend_late(struct drm_i915_private *i915)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
@ -48,6 +48,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
|
||||
NULL
|
||||
}, **phase;
|
||||
unsigned long flags;
|
||||
bool flush = false;
|
||||
|
||||
/*
|
||||
* Neither the BIOS, ourselves or any other kernel
|
||||
@ -73,29 +74,15 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
|
||||
|
||||
spin_lock_irqsave(&i915->mm.obj_lock, flags);
|
||||
for (phase = phases; *phase; phase++) {
|
||||
LIST_HEAD(keep);
|
||||
|
||||
while ((obj = first_mm_object(*phase))) {
|
||||
list_move_tail(&obj->mm.link, &keep);
|
||||
|
||||
/* Beware the background _i915_gem_free_objects */
|
||||
if (!kref_get_unless_zero(&obj->base.refcount))
|
||||
continue;
|
||||
|
||||
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
|
||||
|
||||
i915_gem_object_lock(obj, NULL);
|
||||
drm_WARN_ON(&i915->drm,
|
||||
i915_gem_object_set_to_gtt_domain(obj, false));
|
||||
i915_gem_object_unlock(obj);
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
spin_lock_irqsave(&i915->mm.obj_lock, flags);
|
||||
list_for_each_entry(obj, *phase, mm.link) {
|
||||
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
|
||||
flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0;
|
||||
__start_cpu_write(obj); /* presume auto-hibernate */
|
||||
}
|
||||
|
||||
list_splice_tail(&keep, *phase);
|
||||
}
|
||||
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
|
||||
if (flush)
|
||||
wbinvd_on_all_cpus();
|
||||
}
|
||||
|
||||
void i915_gem_resume(struct drm_i915_private *i915)
|
||||
|
Loading…
Reference in New Issue
Block a user