mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 14:14:24 +08:00
Short summary of fixes pull:
panthor: - Set FOP_UNSIGNED_OFFSET in fops instance - Acquire lock in panthor_vm_prepare_map_op_ctx() - Avoid ninitialized variable in tick_ctx_cleanup() - Do not block scheduler queue if work is pending - Do not add write fences to the shared BOs scheduler: - Fix locking in drm_sched_entity_modify_sched() - Fix pointer deref if entity queue changes -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEchf7rIzpz2NEoWjlaA3BHVMLeiMFAmb9YdQACgkQaA3BHVML eiPIDwf/b5evab0RNOW9E62Be58XYi2nxD+Wps9AH9DOZrM+HBGCMmtKchV5zUfh HTtchnCdVoMhUoPYd9X3hTEtjhWfglX/ghCcxphdURpiysUiFRU1am/SBpxgO1bh eRgqMD1+2r9NObsyqcpvtyfH2a9JH+3+4cv2UBA4kx6qQBsCbfLHcvjRaZJGyRnj XHKcHTP3CjgXVNKWAvkslESonmEWLLdDA1Pt4tpeXyLtr7aAui8oeE7GqbQHb5GL qo1dAQMYLxCTu3PYprpZ87FxsBs4FbdhWZnX2X8WOJZawdK9afVxVhhxU4mnlt0M z3LBes2bwFab9B4a6je8Efu+5r+7tQ== =iEH0 -----END PGP SIGNATURE----- Merge tag 'drm-misc-fixes-2024-10-02' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes Short summary of fixes pull: panthor: - Set FOP_UNSIGNED_OFFSET in fops instance - Acquire lock in panthor_vm_prepare_map_op_ctx() - Avoid ninitialized variable in tick_ctx_cleanup() - Do not block scheduler queue if work is pending - Do not add write fences to the shared BOs scheduler: - Fix locking in drm_sched_entity_modify_sched() - Fix pointer deref if entity queue changes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/20241002151528.GA300287@linux.fritz.box
This commit is contained in:
commit
475be51444
@ -1383,6 +1383,7 @@ static const struct file_operations panthor_drm_driver_fops = {
|
||||
.read = drm_read,
|
||||
.llseek = noop_llseek,
|
||||
.mmap = panthor_mmap,
|
||||
.fop_flags = FOP_UNSIGNED_OFFSET,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
@ -1251,9 +1251,17 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
|
||||
goto err_cleanup;
|
||||
}
|
||||
|
||||
/* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our
|
||||
* pre-allocated BO if the <BO,VM> association exists. Given we
|
||||
* only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will
|
||||
* be called immediately, and we have to hold the VM resv lock when
|
||||
* calling this function.
|
||||
*/
|
||||
dma_resv_lock(panthor_vm_resv(vm), NULL);
|
||||
mutex_lock(&bo->gpuva_list_lock);
|
||||
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
|
||||
mutex_unlock(&bo->gpuva_list_lock);
|
||||
dma_resv_unlock(panthor_vm_resv(vm));
|
||||
|
||||
/* If the a vm_bo for this <VM,BO> combination exists, it already
|
||||
* retains a pin ref, and we can release the one we took earlier.
|
||||
|
@ -1103,7 +1103,13 @@ cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs
|
||||
list_move_tail(&group->wait_node,
|
||||
&group->ptdev->scheduler->groups.waiting);
|
||||
}
|
||||
group->blocked_queues |= BIT(cs_id);
|
||||
|
||||
/* The queue is only blocked if there's no deferred operation
|
||||
* pending, which can be checked through the scoreboard status.
|
||||
*/
|
||||
if (!cs_iface->output->status_scoreboards)
|
||||
group->blocked_queues |= BIT(cs_id);
|
||||
|
||||
queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
|
||||
queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
|
||||
status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
|
||||
@ -2046,6 +2052,7 @@ static void
|
||||
tick_ctx_cleanup(struct panthor_scheduler *sched,
|
||||
struct panthor_sched_tick_ctx *ctx)
|
||||
{
|
||||
struct panthor_device *ptdev = sched->ptdev;
|
||||
struct panthor_group *group, *tmp;
|
||||
u32 i;
|
||||
|
||||
@ -2054,7 +2061,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
|
||||
/* If everything went fine, we should only have groups
|
||||
* to be terminated in the old_groups lists.
|
||||
*/
|
||||
drm_WARN_ON(&group->ptdev->base, !ctx->csg_upd_failed_mask &&
|
||||
drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
|
||||
group_can_run(group));
|
||||
|
||||
if (!group_can_run(group)) {
|
||||
@ -2077,7 +2084,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
|
||||
/* If everything went fine, the groups to schedule lists should
|
||||
* be empty.
|
||||
*/
|
||||
drm_WARN_ON(&group->ptdev->base,
|
||||
drm_WARN_ON(&ptdev->base,
|
||||
!ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
|
||||
|
||||
list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
|
||||
@ -3436,13 +3443,8 @@ void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched
|
||||
{
|
||||
struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
|
||||
|
||||
/* Still not sure why we want USAGE_WRITE for external objects, since I
|
||||
* was assuming this would be handled through explicit syncs being imported
|
||||
* to external BOs with DMA_BUF_IOCTL_IMPORT_SYNC_FILE, but other drivers
|
||||
* seem to pass DMA_RESV_USAGE_WRITE, so there must be a good reason.
|
||||
*/
|
||||
panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
|
||||
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
|
||||
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
|
||||
}
|
||||
|
||||
void panthor_sched_unplug(struct panthor_device *ptdev)
|
||||
|
@ -133,8 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
|
||||
{
|
||||
WARN_ON(!num_sched_list || !sched_list);
|
||||
|
||||
spin_lock(&entity->rq_lock);
|
||||
entity->sched_list = sched_list;
|
||||
entity->num_sched_list = num_sched_list;
|
||||
spin_unlock(&entity->rq_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
|
||||
|
||||
@ -597,6 +599,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
|
||||
|
||||
/* first job wakes up scheduler */
|
||||
if (first) {
|
||||
struct drm_gpu_scheduler *sched;
|
||||
struct drm_sched_rq *rq;
|
||||
|
||||
/* Add the entity to the run queue */
|
||||
spin_lock(&entity->rq_lock);
|
||||
if (entity->stopped) {
|
||||
@ -606,13 +611,16 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
|
||||
return;
|
||||
}
|
||||
|
||||
drm_sched_rq_add_entity(entity->rq, entity);
|
||||
rq = entity->rq;
|
||||
sched = rq->sched;
|
||||
|
||||
drm_sched_rq_add_entity(rq, entity);
|
||||
spin_unlock(&entity->rq_lock);
|
||||
|
||||
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
|
||||
drm_sched_rq_update_fifo(entity, submit_ts);
|
||||
|
||||
drm_sched_wakeup(entity->rq->sched);
|
||||
drm_sched_wakeup(sched);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_entity_push_job);
|
||||
|
Loading…
Reference in New Issue
Block a user