drm/i915/gem: Make i915_gem_object_flush_write_domain() static

flush_write_domain() is only used within the GEM domain management code,
so move it to i915_gem_domain.c and drop the export.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210119144912.12653-5-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2021-01-19 14:49:11 +00:00
parent 8f47c8c3b0
commit d60d3374d3
3 changed files with 52 additions and 57 deletions

View File

@ -5,6 +5,7 @@
*/ */
#include "display/intel_frontbuffer.h" #include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_gem_clflush.h" #include "i915_gem_clflush.h"
@ -15,13 +16,58 @@
#include "i915_gem_lmem.h" #include "i915_gem_lmem.h"
#include "i915_gem_mman.h" #include "i915_gem_mman.h"
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
return !(obj->cache_level == I915_CACHE_NONE ||
obj->cache_level == I915_CACHE_WT);
}
static void
flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
{
struct i915_vma *vma;
assert_object_held(obj);
if (!(obj->write_domain & flush_domains))
return;
switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
if (i915_vma_unset_ggtt_write(vma))
intel_gt_flush_ggtt_writes(vma->vm->gt);
}
spin_unlock(&obj->vma.lock);
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
break;
case I915_GEM_DOMAIN_WC:
wmb();
break;
case I915_GEM_DOMAIN_CPU:
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
break;
case I915_GEM_DOMAIN_RENDER:
if (gpu_write_needs_clflush(obj))
obj->cache_dirty = true;
break;
}
obj->write_domain = 0;
}
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{ {
/* /*
* We manually flush the CPU domain so that we can override and * We manually flush the CPU domain so that we can override and
* force the flush for the display, and perform it asyncrhonously. * force the flush for the display, and perform it asyncrhonously.
*/ */
i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
if (obj->cache_dirty) if (obj->cache_dirty)
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
obj->write_domain = 0; obj->write_domain = 0;
@ -80,7 +126,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
if (ret) if (ret)
return ret; return ret;
i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC); flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
/* Serialise direct access to this object with the barriers for /* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the * coherent writes from the GPU, by effectively invalidating the
@ -141,7 +187,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret) if (ret)
return ret; return ret;
i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT); flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
/* Serialise direct access to this object with the barriers for /* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the * coherent writes from the GPU, by effectively invalidating the
@ -451,7 +497,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret) if (ret)
return ret; return ret;
i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* Flush the CPU cache if it's still invalid. */ /* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
@ -619,7 +665,7 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
goto out; goto out;
} }
i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* If we're not in the cpu read domain, set ourself into the gtt /* If we're not in the cpu read domain, set ourself into the gtt
* read domain and manually flush cachelines (if required). This * read domain and manually flush cachelines (if required). This
@ -670,7 +716,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
goto out; goto out;
} }
i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* If we're not in the cpu write domain, set ourself into the /* If we're not in the cpu write domain, set ourself into the
* gtt write domain and manually flush cachelines (as required). * gtt write domain and manually flush cachelines (as required).

View File

@ -25,7 +25,6 @@
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include "display/intel_frontbuffer.h" #include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_gem_clflush.h" #include "i915_gem_clflush.h"
#include "i915_gem_context.h" #include "i915_gem_context.h"
@ -313,52 +312,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
queue_work(i915->wq, &i915->mm.free_work); queue_work(i915->wq, &i915->mm.free_work);
} }
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
return !(obj->cache_level == I915_CACHE_NONE ||
obj->cache_level == I915_CACHE_WT);
}
void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
unsigned int flush_domains)
{
struct i915_vma *vma;
assert_object_held(obj);
if (!(obj->write_domain & flush_domains))
return;
switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
if (i915_vma_unset_ggtt_write(vma))
intel_gt_flush_ggtt_writes(vma->vm->gt);
}
spin_unlock(&obj->vma.lock);
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
break;
case I915_GEM_DOMAIN_WC:
wmb();
break;
case I915_GEM_DOMAIN_CPU:
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
break;
case I915_GEM_DOMAIN_RENDER:
if (gpu_write_needs_clflush(obj))
obj->cache_dirty = true;
break;
}
obj->write_domain = 0;
}
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
enum fb_op_origin origin) enum fb_op_origin origin)
{ {

View File

@ -427,10 +427,6 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
void __i915_gem_object_release_map(struct drm_i915_gem_object *obj); void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
unsigned int flush_domains);
int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush); unsigned int *needs_clflush);
int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,