drm/amdgpu: move scheduler fence callback into fence v2

And call the processed callback directly after submitting the job.

v2: split adding error handling into separate patch.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
This commit is contained in:
Christian König 2015-08-31 17:02:52 +02:00 committed by Alex Deucher
parent 27439fcac0
commit 258f3f99d5
2 changed files with 12 additions and 11 deletions

View File

@ -319,15 +319,13 @@ amd_sched_select_job(struct amd_gpu_scheduler *sched)
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
{ {
struct amd_sched_job *sched_job = struct amd_sched_fence *s_fence =
container_of(cb, struct amd_sched_job, cb); container_of(cb, struct amd_sched_fence, cb);
struct amd_gpu_scheduler *sched; struct amd_gpu_scheduler *sched = s_fence->scheduler;
sched = sched_job->sched;
amd_sched_fence_signal(sched_job->s_fence);
atomic_dec(&sched->hw_rq_count); atomic_dec(&sched->hw_rq_count);
fence_put(&sched_job->s_fence->base); amd_sched_fence_signal(s_fence);
sched->ops->process_job(sched_job); fence_put(&s_fence->base);
wake_up_interruptible(&sched->wake_up_worker); wake_up_interruptible(&sched->wake_up_worker);
} }
@ -341,6 +339,7 @@ static int amd_sched_main(void *param)
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
struct amd_sched_entity *entity; struct amd_sched_entity *entity;
struct amd_sched_fence *s_fence;
struct amd_sched_job *job; struct amd_sched_job *job;
struct fence *fence; struct fence *fence;
@ -352,19 +351,21 @@ static int amd_sched_main(void *param)
continue; continue;
entity = job->s_entity; entity = job->s_entity;
s_fence = job->s_fence;
atomic_inc(&sched->hw_rq_count); atomic_inc(&sched->hw_rq_count);
fence = sched->ops->run_job(job); fence = sched->ops->run_job(job);
sched->ops->process_job(job);
if (fence) { if (fence) {
r = fence_add_callback(fence, &job->cb, r = fence_add_callback(fence, &s_fence->cb,
amd_sched_process_job); amd_sched_process_job);
if (r == -ENOENT) if (r == -ENOENT)
amd_sched_process_job(fence, &job->cb); amd_sched_process_job(fence, &s_fence->cb);
else if (r) else if (r)
DRM_ERROR("fence add callback failed (%d)\n", r); DRM_ERROR("fence add callback failed (%d)\n", r);
fence_put(fence); fence_put(fence);
} else { } else {
DRM_ERROR("Failed to run job!\n"); DRM_ERROR("Failed to run job!\n");
amd_sched_process_job(NULL, &job->cb); amd_sched_process_job(NULL, &s_fence->cb);
} }
count = kfifo_out(&entity->job_queue, &job, sizeof(job)); count = kfifo_out(&entity->job_queue, &job, sizeof(job));

View File

@ -62,13 +62,13 @@ struct amd_sched_rq {
struct amd_sched_fence { struct amd_sched_fence {
struct fence base; struct fence base;
struct fence_cb cb;
struct amd_gpu_scheduler *scheduler; struct amd_gpu_scheduler *scheduler;
spinlock_t lock; spinlock_t lock;
void *owner; void *owner;
}; };
struct amd_sched_job { struct amd_sched_job {
struct fence_cb cb;
struct amd_gpu_scheduler *sched; struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity; struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence; struct amd_sched_fence *s_fence;