mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-16 17:43:56 +08:00
drm/i915: Move i915_vma_move_to_active() to i915_vma.c
i915_vma_move_to_active() has grown beyond its execbuf origins, and should take its rightful place in i915_vma.c as a method for i915_vma! Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180706103947.15919-4-chris@chris-wilson.co.uk
This commit is contained in:
parent
a523697857
commit
e6bb1d7f1a
@ -3090,9 +3090,6 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
|
||||
}
|
||||
|
||||
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
|
||||
int __must_check i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct i915_request *rq,
|
||||
unsigned int flags);
|
||||
int i915_gem_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
@ -1868,67 +1868,6 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void export_fence(struct i915_vma *vma,
|
||||
struct i915_request *rq,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct reservation_object *resv = vma->resv;
|
||||
|
||||
/*
|
||||
* Ignore errors from failing to allocate the new fence, we can't
|
||||
* handle an error right now. Worst case should be missed
|
||||
* synchronisation leading to rendering corruption.
|
||||
*/
|
||||
reservation_object_lock(resv, NULL);
|
||||
if (flags & EXEC_OBJECT_WRITE)
|
||||
reservation_object_add_excl_fence(resv, &rq->fence);
|
||||
else if (reservation_object_reserve_shared(resv) == 0)
|
||||
reservation_object_add_shared_fence(resv, &rq->fence);
|
||||
reservation_object_unlock(resv);
|
||||
}
|
||||
|
||||
int i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct i915_request *rq,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
const unsigned int idx = rq->engine->id;
|
||||
|
||||
lockdep_assert_held(&rq->i915->drm.struct_mutex);
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
|
||||
/*
|
||||
* Add a reference if we're newly entering the active list.
|
||||
* The order in which we add operations to the retirement queue is
|
||||
* vital here: mark_active adds to the start of the callback list,
|
||||
* such that subsequent callbacks are called first. Therefore we
|
||||
* add the active reference first and queue for it to be dropped
|
||||
* *last*.
|
||||
*/
|
||||
if (!i915_vma_is_active(vma))
|
||||
obj->active_count++;
|
||||
i915_vma_set_active(vma, idx);
|
||||
i915_gem_active_set(&vma->last_read[idx], rq);
|
||||
list_move_tail(&vma->vm_link, &vma->vm->active_list);
|
||||
|
||||
obj->write_domain = 0;
|
||||
if (flags & EXEC_OBJECT_WRITE) {
|
||||
obj->write_domain = I915_GEM_DOMAIN_RENDER;
|
||||
|
||||
if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
|
||||
i915_gem_active_set(&obj->frontbuffer_write, rq);
|
||||
|
||||
obj->read_domains = 0;
|
||||
}
|
||||
obj->read_domains |= I915_GEM_GPU_DOMAINS;
|
||||
|
||||
if (flags & EXEC_OBJECT_NEEDS_FENCE)
|
||||
i915_gem_active_set(&vma->last_fence, rq);
|
||||
|
||||
export_fence(vma, rq, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
|
||||
{
|
||||
u32 *cs;
|
||||
|
@ -859,6 +859,67 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
|
||||
list_del(&vma->obj->userfault_link);
|
||||
}
|
||||
|
||||
static void export_fence(struct i915_vma *vma,
|
||||
struct i915_request *rq,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct reservation_object *resv = vma->resv;
|
||||
|
||||
/*
|
||||
* Ignore errors from failing to allocate the new fence, we can't
|
||||
* handle an error right now. Worst case should be missed
|
||||
* synchronisation leading to rendering corruption.
|
||||
*/
|
||||
reservation_object_lock(resv, NULL);
|
||||
if (flags & EXEC_OBJECT_WRITE)
|
||||
reservation_object_add_excl_fence(resv, &rq->fence);
|
||||
else if (reservation_object_reserve_shared(resv) == 0)
|
||||
reservation_object_add_shared_fence(resv, &rq->fence);
|
||||
reservation_object_unlock(resv);
|
||||
}
|
||||
|
||||
int i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct i915_request *rq,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
const unsigned int idx = rq->engine->id;
|
||||
|
||||
lockdep_assert_held(&rq->i915->drm.struct_mutex);
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
|
||||
/*
|
||||
* Add a reference if we're newly entering the active list.
|
||||
* The order in which we add operations to the retirement queue is
|
||||
* vital here: mark_active adds to the start of the callback list,
|
||||
* such that subsequent callbacks are called first. Therefore we
|
||||
* add the active reference first and queue for it to be dropped
|
||||
* *last*.
|
||||
*/
|
||||
if (!i915_vma_is_active(vma))
|
||||
obj->active_count++;
|
||||
i915_vma_set_active(vma, idx);
|
||||
i915_gem_active_set(&vma->last_read[idx], rq);
|
||||
list_move_tail(&vma->vm_link, &vma->vm->active_list);
|
||||
|
||||
obj->write_domain = 0;
|
||||
if (flags & EXEC_OBJECT_WRITE) {
|
||||
obj->write_domain = I915_GEM_DOMAIN_RENDER;
|
||||
|
||||
if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
|
||||
i915_gem_active_set(&obj->frontbuffer_write, rq);
|
||||
|
||||
obj->read_domains = 0;
|
||||
}
|
||||
obj->read_domains |= I915_GEM_GPU_DOMAINS;
|
||||
|
||||
if (flags & EXEC_OBJECT_NEEDS_FENCE)
|
||||
i915_gem_active_set(&vma->last_fence, rq);
|
||||
|
||||
export_fence(vma, rq, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_vma_unbind(struct i915_vma *vma)
|
||||
{
|
||||
unsigned long active;
|
||||
|
@ -215,6 +215,10 @@ static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
|
||||
return vma->active & BIT(engine);
|
||||
}
|
||||
|
||||
int __must_check i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct i915_request *rq,
|
||||
unsigned int flags);
|
||||
|
||||
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
|
||||
{
|
||||
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
|
||||
|
Loading…
Reference in New Issue
Block a user