mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-05 12:13:57 +08:00
drm/amdgpu: use amd_sched_job in its backend ops
Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Christian K?nig <christian.koenig@amd.com>
This commit is contained in:
parent
6f0e54a964
commit
953e8fd4e7
@ -29,10 +29,16 @@
|
|||||||
|
|
||||||
static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
|
static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
|
||||||
struct amd_sched_entity *entity,
|
struct amd_sched_entity *entity,
|
||||||
void *job)
|
struct amd_sched_job *job)
|
||||||
{
|
{
|
||||||
int r = 0;
|
int r = 0;
|
||||||
struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
|
struct amdgpu_cs_parser *sched_job;
|
||||||
|
if (!job || !job->data) {
|
||||||
|
DRM_ERROR("job is null\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
sched_job = (struct amdgpu_cs_parser *)job->data;
|
||||||
if (sched_job->prepare_job) {
|
if (sched_job->prepare_job) {
|
||||||
r = sched_job->prepare_job(sched_job);
|
r = sched_job->prepare_job(sched_job);
|
||||||
if (r) {
|
if (r) {
|
||||||
@ -51,11 +57,11 @@ static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
|
|||||||
struct amdgpu_cs_parser *sched_job;
|
struct amdgpu_cs_parser *sched_job;
|
||||||
struct amdgpu_fence *fence;
|
struct amdgpu_fence *fence;
|
||||||
|
|
||||||
if (!job || !job->job) {
|
if (!job || !job->data) {
|
||||||
DRM_ERROR("job is null\n");
|
DRM_ERROR("job is null\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
sched_job = (struct amdgpu_cs_parser *)job->job;
|
sched_job = (struct amdgpu_cs_parser *)job->data;
|
||||||
mutex_lock(&sched_job->job_lock);
|
mutex_lock(&sched_job->job_lock);
|
||||||
r = amdgpu_ib_schedule(sched_job->adev,
|
r = amdgpu_ib_schedule(sched_job->adev,
|
||||||
sched_job->num_ibs,
|
sched_job->num_ibs,
|
||||||
@ -83,22 +89,16 @@ err:
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job)
|
static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched,
|
||||||
|
struct amd_sched_job *job)
|
||||||
{
|
{
|
||||||
struct amdgpu_cs_parser *sched_job = NULL;
|
struct amdgpu_cs_parser *sched_job;
|
||||||
struct amdgpu_fence *fence = NULL;
|
|
||||||
struct amdgpu_ring *ring = NULL;
|
|
||||||
struct amdgpu_device *adev = NULL;
|
|
||||||
|
|
||||||
if (!job)
|
if (!job || !job->data) {
|
||||||
|
DRM_ERROR("job is null\n");
|
||||||
return;
|
return;
|
||||||
sched_job = (struct amdgpu_cs_parser *)job;
|
}
|
||||||
fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
|
sched_job = (struct amdgpu_cs_parser *)job->data;
|
||||||
if (!fence)
|
|
||||||
return;
|
|
||||||
ring = fence->ring;
|
|
||||||
adev = ring->adev;
|
|
||||||
|
|
||||||
schedule_work(&sched_job->job_work);
|
schedule_work(&sched_job->job_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -291,8 +291,15 @@ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
|
|||||||
*/
|
*/
|
||||||
int amd_sched_push_job(struct amd_gpu_scheduler *sched,
|
int amd_sched_push_job(struct amd_gpu_scheduler *sched,
|
||||||
struct amd_sched_entity *c_entity,
|
struct amd_sched_entity *c_entity,
|
||||||
void *job)
|
void *data)
|
||||||
{
|
{
|
||||||
|
struct amd_sched_job *job = kzalloc(sizeof(struct amd_sched_job),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!job)
|
||||||
|
return -ENOMEM;
|
||||||
|
job->sched = sched;
|
||||||
|
job->s_entity = c_entity;
|
||||||
|
job->data = data;
|
||||||
while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
|
while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
|
||||||
&c_entity->queue_lock) != sizeof(void *)) {
|
&c_entity->queue_lock) != sizeof(void *)) {
|
||||||
/**
|
/**
|
||||||
@ -366,7 +373,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
|
|||||||
atomic64_dec(&sched->hw_rq_count);
|
atomic64_dec(&sched->hw_rq_count);
|
||||||
spin_unlock_irqrestore(&sched->queue_lock, flags);
|
spin_unlock_irqrestore(&sched->queue_lock, flags);
|
||||||
|
|
||||||
sched->ops->process_job(sched, sched_job->job);
|
sched->ops->process_job(sched, sched_job);
|
||||||
kfree(sched_job);
|
kfree(sched_job);
|
||||||
wake_up_interruptible(&sched->wait_queue);
|
wake_up_interruptible(&sched->wait_queue);
|
||||||
}
|
}
|
||||||
@ -374,7 +381,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
|
|||||||
static int amd_sched_main(void *param)
|
static int amd_sched_main(void *param)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
void *job;
|
struct amd_sched_job *job;
|
||||||
struct sched_param sparam = {.sched_priority = 1};
|
struct sched_param sparam = {.sched_priority = 1};
|
||||||
struct amd_sched_entity *c_entity = NULL;
|
struct amd_sched_entity *c_entity = NULL;
|
||||||
struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
|
struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
|
||||||
@ -382,7 +389,6 @@ static int amd_sched_main(void *param)
|
|||||||
sched_setscheduler(current, SCHED_FIFO, &sparam);
|
sched_setscheduler(current, SCHED_FIFO, &sparam);
|
||||||
|
|
||||||
while (!kthread_should_stop()) {
|
while (!kthread_should_stop()) {
|
||||||
struct amd_sched_job *sched_job = NULL;
|
|
||||||
struct fence *fence;
|
struct fence *fence;
|
||||||
|
|
||||||
wait_event_interruptible(sched->wait_queue,
|
wait_event_interruptible(sched->wait_queue,
|
||||||
@ -394,26 +400,18 @@ static int amd_sched_main(void *param)
|
|||||||
r = sched->ops->prepare_job(sched, c_entity, job);
|
r = sched->ops->prepare_job(sched, c_entity, job);
|
||||||
if (!r) {
|
if (!r) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
sched_job = kzalloc(sizeof(struct amd_sched_job),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!sched_job) {
|
|
||||||
WARN(true, "No memory to allocate\n");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
sched_job->job = job;
|
|
||||||
sched_job->sched = sched;
|
|
||||||
spin_lock_irqsave(&sched->queue_lock, flags);
|
spin_lock_irqsave(&sched->queue_lock, flags);
|
||||||
list_add_tail(&sched_job->list, &sched->active_hw_rq);
|
list_add_tail(&job->list, &sched->active_hw_rq);
|
||||||
atomic64_inc(&sched->hw_rq_count);
|
atomic64_inc(&sched->hw_rq_count);
|
||||||
spin_unlock_irqrestore(&sched->queue_lock, flags);
|
spin_unlock_irqrestore(&sched->queue_lock, flags);
|
||||||
}
|
}
|
||||||
mutex_lock(&sched->sched_lock);
|
mutex_lock(&sched->sched_lock);
|
||||||
fence = sched->ops->run_job(sched, c_entity, sched_job);
|
fence = sched->ops->run_job(sched, c_entity, job);
|
||||||
if (fence) {
|
if (fence) {
|
||||||
r = fence_add_callback(fence, &sched_job->cb,
|
r = fence_add_callback(fence, &job->cb,
|
||||||
amd_sched_process_job);
|
amd_sched_process_job);
|
||||||
if (r == -ENOENT)
|
if (r == -ENOENT)
|
||||||
amd_sched_process_job(fence, &sched_job->cb);
|
amd_sched_process_job(fence, &job->cb);
|
||||||
else if (r)
|
else if (r)
|
||||||
DRM_ERROR("fence add callback failed (%d)\n", r);
|
DRM_ERROR("fence add callback failed (%d)\n", r);
|
||||||
fence_put(fence);
|
fence_put(fence);
|
||||||
|
@ -76,7 +76,8 @@ struct amd_sched_job {
|
|||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct fence_cb cb;
|
struct fence_cb cb;
|
||||||
struct amd_gpu_scheduler *sched;
|
struct amd_gpu_scheduler *sched;
|
||||||
void *job;
|
struct amd_sched_entity *s_entity;
|
||||||
|
void *data;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -86,11 +87,12 @@ struct amd_sched_job {
|
|||||||
struct amd_sched_backend_ops {
|
struct amd_sched_backend_ops {
|
||||||
int (*prepare_job)(struct amd_gpu_scheduler *sched,
|
int (*prepare_job)(struct amd_gpu_scheduler *sched,
|
||||||
struct amd_sched_entity *c_entity,
|
struct amd_sched_entity *c_entity,
|
||||||
void *job);
|
struct amd_sched_job *job);
|
||||||
struct fence *(*run_job)(struct amd_gpu_scheduler *sched,
|
struct fence *(*run_job)(struct amd_gpu_scheduler *sched,
|
||||||
struct amd_sched_entity *c_entity,
|
struct amd_sched_entity *c_entity,
|
||||||
struct amd_sched_job *job);
|
struct amd_sched_job *job);
|
||||||
void (*process_job)(struct amd_gpu_scheduler *sched, void *job);
|
void (*process_job)(struct amd_gpu_scheduler *sched,
|
||||||
|
struct amd_sched_job *job);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -120,12 +122,11 @@ struct amd_gpu_scheduler *amd_sched_create(void *device,
|
|||||||
uint32_t granularity,
|
uint32_t granularity,
|
||||||
uint32_t preemption,
|
uint32_t preemption,
|
||||||
uint32_t hw_submission);
|
uint32_t hw_submission);
|
||||||
|
|
||||||
int amd_sched_destroy(struct amd_gpu_scheduler *sched);
|
int amd_sched_destroy(struct amd_gpu_scheduler *sched);
|
||||||
|
|
||||||
int amd_sched_push_job(struct amd_gpu_scheduler *sched,
|
int amd_sched_push_job(struct amd_gpu_scheduler *sched,
|
||||||
struct amd_sched_entity *c_entity,
|
struct amd_sched_entity *c_entity,
|
||||||
void *job);
|
void *data);
|
||||||
|
|
||||||
int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
|
int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
|
||||||
uint64_t seq,
|
uint64_t seq,
|
||||||
|
Loading…
Reference in New Issue
Block a user