mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-05 12:13:57 +08:00
Revert "drm/amdgpu: return new seq_no for amd_sched_push_job"
This reverts commit d1d33da8eb86b8ca41dd9ed95738030df5267b95. Reviewed-by: Christian K?nig <christian.koenig@amd.com> Conflicts: drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
This commit is contained in:
parent
47f38501f1
commit
80de5913cf
@ -901,6 +901,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||||||
if (amdgpu_enable_scheduler && parser->num_ibs) {
|
if (amdgpu_enable_scheduler && parser->num_ibs) {
|
||||||
struct amdgpu_ring * ring =
|
struct amdgpu_ring * ring =
|
||||||
amdgpu_cs_parser_get_ring(adev, parser);
|
amdgpu_cs_parser_get_ring(adev, parser);
|
||||||
|
parser->ibs[parser->num_ibs - 1].sequence = atomic64_inc_return(
|
||||||
|
&parser->ctx->rings[ring->idx].c_entity.last_queued_v_seq);
|
||||||
if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) {
|
if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) {
|
||||||
r = amdgpu_cs_parser_prepare_job(parser);
|
r = amdgpu_cs_parser_prepare_job(parser);
|
||||||
if (r)
|
if (r)
|
||||||
@ -910,8 +912,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||||||
parser->ring = ring;
|
parser->ring = ring;
|
||||||
parser->run_job = amdgpu_cs_parser_run_job;
|
parser->run_job = amdgpu_cs_parser_run_job;
|
||||||
parser->free_job = amdgpu_cs_parser_free_job;
|
parser->free_job = amdgpu_cs_parser_free_job;
|
||||||
parser->ibs[parser->num_ibs - 1].sequence =
|
amd_sched_push_job(ring->scheduler,
|
||||||
amd_sched_push_job(ring->scheduler,
|
|
||||||
&parser->ctx->rings[ring->idx].c_entity,
|
&parser->ctx->rings[ring->idx].c_entity,
|
||||||
parser);
|
parser);
|
||||||
cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
|
cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
|
||||||
|
@ -121,6 +121,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
|
|||||||
{
|
{
|
||||||
int r = 0;
|
int r = 0;
|
||||||
if (amdgpu_enable_scheduler) {
|
if (amdgpu_enable_scheduler) {
|
||||||
|
uint64_t v_seq;
|
||||||
struct amdgpu_cs_parser *sched_job =
|
struct amdgpu_cs_parser *sched_job =
|
||||||
amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
|
amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
|
||||||
ibs, 1);
|
ibs, 1);
|
||||||
@ -128,12 +129,16 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
sched_job->free_job = free_job;
|
sched_job->free_job = free_job;
|
||||||
ibs[num_ibs - 1].sequence = amd_sched_push_job(ring->scheduler,
|
v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
|
||||||
|
ibs[num_ibs - 1].sequence = v_seq;
|
||||||
|
amd_sched_push_job(ring->scheduler,
|
||||||
&adev->kernel_ctx.rings[ring->idx].c_entity,
|
&adev->kernel_ctx.rings[ring->idx].c_entity,
|
||||||
sched_job);
|
sched_job);
|
||||||
r = amd_sched_wait_emit(
|
r = amd_sched_wait_emit(
|
||||||
&adev->kernel_ctx.rings[ring->idx].c_entity,
|
&adev->kernel_ctx.rings[ring->idx].c_entity,
|
||||||
ibs[num_ibs - 1].sequence, false, -1);
|
v_seq,
|
||||||
|
false,
|
||||||
|
-1);
|
||||||
if (r)
|
if (r)
|
||||||
WARN(true, "emit timeout\n");
|
WARN(true, "emit timeout\n");
|
||||||
} else
|
} else
|
||||||
|
@ -371,6 +371,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
if (amdgpu_enable_scheduler) {
|
if (amdgpu_enable_scheduler) {
|
||||||
int r;
|
int r;
|
||||||
|
uint64_t v_seq;
|
||||||
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
|
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
|
||||||
&adev->kernel_ctx, ib, 1);
|
&adev->kernel_ctx, ib, 1);
|
||||||
if(!sched_job)
|
if(!sched_job)
|
||||||
@ -378,11 +379,15 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|||||||
sched_job->job_param.vm.bo = bo;
|
sched_job->job_param.vm.bo = bo;
|
||||||
sched_job->run_job = amdgpu_vm_run_job;
|
sched_job->run_job = amdgpu_vm_run_job;
|
||||||
sched_job->free_job = amdgpu_vm_free_job;
|
sched_job->free_job = amdgpu_vm_free_job;
|
||||||
ib->sequence = amd_sched_push_job(ring->scheduler,
|
v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
|
||||||
|
ib->sequence = v_seq;
|
||||||
|
amd_sched_push_job(ring->scheduler,
|
||||||
&adev->kernel_ctx.rings[ring->idx].c_entity,
|
&adev->kernel_ctx.rings[ring->idx].c_entity,
|
||||||
sched_job);
|
sched_job);
|
||||||
r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity,
|
r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity,
|
||||||
ib->sequence, false, -1);
|
v_seq,
|
||||||
|
false,
|
||||||
|
-1);
|
||||||
if (r)
|
if (r)
|
||||||
DRM_ERROR("emit timeout\n");
|
DRM_ERROR("emit timeout\n");
|
||||||
|
|
||||||
@ -516,6 +521,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
if (amdgpu_enable_scheduler) {
|
if (amdgpu_enable_scheduler) {
|
||||||
int r;
|
int r;
|
||||||
|
uint64_t v_seq;
|
||||||
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
|
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
|
||||||
&adev->kernel_ctx,
|
&adev->kernel_ctx,
|
||||||
ib, 1);
|
ib, 1);
|
||||||
@ -524,11 +530,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
|||||||
sched_job->job_param.vm.bo = pd;
|
sched_job->job_param.vm.bo = pd;
|
||||||
sched_job->run_job = amdgpu_vm_run_job;
|
sched_job->run_job = amdgpu_vm_run_job;
|
||||||
sched_job->free_job = amdgpu_vm_free_job;
|
sched_job->free_job = amdgpu_vm_free_job;
|
||||||
ib->sequence = amd_sched_push_job(ring->scheduler,
|
v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
|
||||||
|
ib->sequence = v_seq;
|
||||||
|
amd_sched_push_job(ring->scheduler,
|
||||||
&adev->kernel_ctx.rings[ring->idx].c_entity,
|
&adev->kernel_ctx.rings[ring->idx].c_entity,
|
||||||
sched_job);
|
sched_job);
|
||||||
r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity,
|
r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity,
|
||||||
ib->sequence, false, -1);
|
v_seq,
|
||||||
|
false,
|
||||||
|
-1);
|
||||||
if (r)
|
if (r)
|
||||||
DRM_ERROR("emit timeout\n");
|
DRM_ERROR("emit timeout\n");
|
||||||
} else {
|
} else {
|
||||||
@ -862,6 +872,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
if (amdgpu_enable_scheduler) {
|
if (amdgpu_enable_scheduler) {
|
||||||
int r;
|
int r;
|
||||||
|
uint64_t v_seq;
|
||||||
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
|
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
|
||||||
&adev->kernel_ctx, ib, 1);
|
&adev->kernel_ctx, ib, 1);
|
||||||
if(!sched_job)
|
if(!sched_job)
|
||||||
@ -872,11 +883,15 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||||||
sched_job->job_param.vm_mapping.fence = fence;
|
sched_job->job_param.vm_mapping.fence = fence;
|
||||||
sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job;
|
sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job;
|
||||||
sched_job->free_job = amdgpu_vm_free_job;
|
sched_job->free_job = amdgpu_vm_free_job;
|
||||||
ib->sequence = amd_sched_push_job(ring->scheduler,
|
v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
|
||||||
|
ib->sequence = v_seq;
|
||||||
|
amd_sched_push_job(ring->scheduler,
|
||||||
&adev->kernel_ctx.rings[ring->idx].c_entity,
|
&adev->kernel_ctx.rings[ring->idx].c_entity,
|
||||||
sched_job);
|
sched_job);
|
||||||
r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity,
|
r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity,
|
||||||
ib->sequence, false, -1);
|
v_seq,
|
||||||
|
false,
|
||||||
|
-1);
|
||||||
if (r)
|
if (r)
|
||||||
DRM_ERROR("emit timeout\n");
|
DRM_ERROR("emit timeout\n");
|
||||||
} else {
|
} else {
|
||||||
|
@ -289,9 +289,12 @@ int amd_context_entity_fini(struct amd_gpu_scheduler *sched,
|
|||||||
* @sched The pointer to the scheduler
|
* @sched The pointer to the scheduler
|
||||||
* @c_entity The pointer to amd_context_entity
|
* @c_entity The pointer to amd_context_entity
|
||||||
* @job The pointer to job required to submit
|
* @job The pointer to job required to submit
|
||||||
* return the virtual sequence number
|
* return 0 if succeed. -1 if failed.
|
||||||
|
* -2 indicate queue is full for this client, client should wait untill
|
||||||
|
* scheduler consum some queued command.
|
||||||
|
* -1 other fail.
|
||||||
*/
|
*/
|
||||||
uint64_t amd_sched_push_job(struct amd_gpu_scheduler *sched,
|
int amd_sched_push_job(struct amd_gpu_scheduler *sched,
|
||||||
struct amd_context_entity *c_entity,
|
struct amd_context_entity *c_entity,
|
||||||
void *job)
|
void *job)
|
||||||
{
|
{
|
||||||
@ -305,8 +308,7 @@ uint64_t amd_sched_push_job(struct amd_gpu_scheduler *sched,
|
|||||||
}
|
}
|
||||||
|
|
||||||
wake_up_interruptible(&sched->wait_queue);
|
wake_up_interruptible(&sched->wait_queue);
|
||||||
|
return 0;
|
||||||
return atomic64_inc_return(&c_entity->last_queued_v_seq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -124,7 +124,7 @@ struct amd_gpu_scheduler *amd_sched_create(void *device,
|
|||||||
|
|
||||||
int amd_sched_destroy(struct amd_gpu_scheduler *sched);
|
int amd_sched_destroy(struct amd_gpu_scheduler *sched);
|
||||||
|
|
||||||
uint64_t amd_sched_push_job(struct amd_gpu_scheduler *sched,
|
int amd_sched_push_job(struct amd_gpu_scheduler *sched,
|
||||||
struct amd_context_entity *c_entity,
|
struct amd_context_entity *c_entity,
|
||||||
void *job);
|
void *job);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user