2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-27 06:34:11 +08:00

drm/amdgpu: generalize the scheduler fence

Make it two events, one for the job being scheduled and one when it is finished.

Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2016-05-20 12:53:52 +02:00 committed by Alex Deucher
parent 0e9d239b8d
commit 6fc1367582
6 changed files with 79 additions and 49 deletions

View File

@ -85,7 +85,7 @@ static void amdgpu_job_free_resources(struct amdgpu_job *job)
unsigned i;
/* use sched fence if available */
f = job->base.s_fence ? &job->base.s_fence->base : job->fence;
f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
for (i = 0; i < job->num_ibs; ++i)
amdgpu_ib_free(job->adev, &job->ibs[i], f);
@ -143,7 +143,7 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
int r;
r = amdgpu_vm_grab_id(vm, ring, &job->sync,
&job->base.s_fence->base,
&job->base.s_fence->finished,
&job->vm_id, &job->vm_pd_addr);
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);

View File

@ -102,7 +102,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__entry->adev = job->adev;
__entry->sched_job = &job->base;
__entry->ib = job->ibs;
__entry->fence = &job->base.s_fence->base;
__entry->fence = &job->base.s_fence->finished;
__entry->ring_name = job->ring->name;
__entry->num_ibs = job->num_ibs;
),
@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__entry->adev = job->adev;
__entry->sched_job = &job->base;
__entry->ib = job->ibs;
__entry->fence = &job->base.s_fence->base;
__entry->fence = &job->base.s_fence->finished;
__entry->ring_name = job->ring->name;
__entry->num_ibs = job->num_ibs;
),

View File

@ -26,7 +26,7 @@ TRACE_EVENT(amd_sched_job,
TP_fast_assign(
__entry->entity = sched_job->s_entity;
__entry->sched_job = sched_job;
__entry->fence = &sched_job->s_fence->base;
__entry->fence = &sched_job->s_fence->finished;
__entry->name = sched_job->sched->name;
__entry->job_count = kfifo_len(
&sched_job->s_entity->job_queue) / sizeof(sched_job);
@ -46,7 +46,7 @@ TRACE_EVENT(amd_sched_process_job,
),
TP_fast_assign(
__entry->fence = &fence->base;
__entry->fence = &fence->finished;
),
TP_printk("fence=%p signaled", __entry->fence)
);

View File

@ -140,7 +140,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
return r;
atomic_set(&entity->fence_seq, 0);
entity->fence_context = fence_context_alloc(1);
entity->fence_context = fence_context_alloc(2);
return 0;
}
@ -251,17 +251,21 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
s_fence = to_amd_sched_fence(fence);
if (s_fence && s_fence->sched == sched) {
/* Fence is from the same scheduler */
if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
/* Ignore it when it is already scheduled */
fence_put(entity->dependency);
return false;
}
/* Wait for fence to be scheduled */
entity->cb.func = amd_sched_entity_clear_dep;
list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
return true;
/*
* Fence is from the same scheduler, only need to wait for
* it to be scheduled
*/
fence = fence_get(&s_fence->scheduled);
fence_put(entity->dependency);
entity->dependency = fence;
if (!fence_add_callback(fence, &entity->cb,
amd_sched_entity_clear_dep))
return true;
/* Ignore it when it is already scheduled */
fence_put(fence);
return false;
}
if (!fence_add_callback(entity->dependency, &entity->cb,
@ -389,7 +393,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
struct amd_sched_entity *entity = sched_job->s_entity;
trace_amd_sched_job(sched_job);
fence_add_callback(&sched_job->s_fence->base, &sched_job->finish_cb,
fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
@ -412,7 +416,7 @@ int amd_sched_job_init(struct amd_sched_job *job,
INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
if (fence)
*fence = &job->s_fence->base;
*fence = &job->s_fence->finished;
return 0;
}
@ -463,10 +467,10 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
struct amd_gpu_scheduler *sched = s_fence->sched;
atomic_dec(&sched->hw_rq_count);
amd_sched_fence_signal(s_fence);
amd_sched_fence_finished(s_fence);
trace_amd_sched_process_job(s_fence);
fence_put(&s_fence->base);
fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}

View File

@ -27,8 +27,6 @@
#include <linux/kfifo.h>
#include <linux/fence.h>
#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS
struct amd_gpu_scheduler;
struct amd_sched_rq;
@ -68,9 +66,9 @@ struct amd_sched_rq {
};
struct amd_sched_fence {
struct fence base;
struct fence scheduled;
struct fence finished;
struct fence_cb cb;
struct list_head scheduled_cb;
struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
@ -86,14 +84,15 @@ struct amd_sched_job {
struct delayed_work work_tdr;
};
extern const struct fence_ops amd_sched_fence_ops;
extern const struct fence_ops amd_sched_fence_ops_scheduled;
extern const struct fence_ops amd_sched_fence_ops_finished;
static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
{
struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence,
base);
if (f->ops == &amd_sched_fence_ops_scheduled)
return container_of(f, struct amd_sched_fence, scheduled);
if (__f->base.ops == &amd_sched_fence_ops)
return __f;
if (f->ops == &amd_sched_fence_ops_finished)
return container_of(f, struct amd_sched_fence, finished);
return NULL;
}
@ -148,7 +147,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_entity *s_entity, void *owner);
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
void amd_sched_fence_signal(struct amd_sched_fence *fence);
void amd_sched_fence_finished(struct amd_sched_fence *fence);
int amd_sched_job_init(struct amd_sched_job *job,
struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity,

View File

@ -37,36 +37,37 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
if (fence == NULL)
return NULL;
INIT_LIST_HEAD(&fence->scheduled_cb);
fence->owner = owner;
fence->sched = entity->sched;
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&entity->fence_seq);
fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
entity->fence_context, seq);
fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
&fence->lock, entity->fence_context, seq);
fence_init(&fence->finished, &amd_sched_fence_ops_finished,
&fence->lock, entity->fence_context + 1, seq);
return fence;
}
void amd_sched_fence_signal(struct amd_sched_fence *fence)
void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
{
int ret = fence_signal(&fence->base);
int ret = fence_signal(&fence->scheduled);
if (!ret)
FENCE_TRACE(&fence->base, "signaled from irq context\n");
FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
else
FENCE_TRACE(&fence->base, "was already signaled\n");
FENCE_TRACE(&fence->scheduled, "was already signaled\n");
}
void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
void amd_sched_fence_finished(struct amd_sched_fence *fence)
{
struct fence_cb *cur, *tmp;
int ret = fence_signal(&fence->finished);
set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
list_del_init(&cur->node);
cur->func(&s_fence->base, cur);
}
if (!ret)
FENCE_TRACE(&fence->finished, "signaled from irq context\n");
else
FENCE_TRACE(&fence->finished, "was already signaled\n");
}
static const char *amd_sched_fence_get_driver_name(struct fence *fence)
@ -96,6 +97,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
{
struct fence *f = container_of(rcu, struct fence, rcu);
struct amd_sched_fence *fence = to_amd_sched_fence(f);
kmem_cache_free(sched_fence_slab, fence);
}
@ -107,16 +109,41 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
static void amd_sched_fence_release(struct fence *f)
static void amd_sched_fence_release_scheduled(struct fence *f)
{
call_rcu(&f->rcu, amd_sched_fence_free);
struct amd_sched_fence *fence = to_amd_sched_fence(f);
call_rcu(&fence->finished.rcu, amd_sched_fence_free);
}
const struct fence_ops amd_sched_fence_ops = {
/**
* amd_sched_fence_release_scheduled - drop extra reference
*
* @f: fence
*
* Drop the extra reference from the scheduled fence to the base fence.
*/
static void amd_sched_fence_release_finished(struct fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
fence_put(&fence->scheduled);
}
const struct fence_ops amd_sched_fence_ops_scheduled = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
.wait = fence_default_wait,
.release = amd_sched_fence_release,
.release = amd_sched_fence_release_scheduled,
};
const struct fence_ops amd_sched_fence_ops_finished = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
.wait = fence_default_wait,
.release = amd_sched_fence_release_finished,
};