mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 05:34:00 +08:00
drm/msm: fix locking inconsistency for gpu->hw_init()
Most, but not all, paths where calling the with struct_mutex held. The fast-path in msm_gem_get_iova() (plus some sub-code-paths that only run the first time) was masking this issue. So lets just always hold struct_mutex for hw_init(). And sprinkle some WARN_ON()'s and might_lock() to avoid this sort of problem in the future. Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
parent
42a105e9cf
commit
cb1e38181a
@ -297,31 +297,28 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
|
||||
struct drm_gem_object *bo;
|
||||
void *ptr;
|
||||
|
||||
mutex_lock(&drm->struct_mutex);
|
||||
bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED);
|
||||
mutex_unlock(&drm->struct_mutex);
|
||||
|
||||
if (IS_ERR(bo))
|
||||
return bo;
|
||||
|
||||
ptr = msm_gem_get_vaddr(bo);
|
||||
ptr = msm_gem_get_vaddr_locked(bo);
|
||||
if (!ptr) {
|
||||
drm_gem_object_unreference_unlocked(bo);
|
||||
drm_gem_object_unreference(bo);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
if (iova) {
|
||||
int ret = msm_gem_get_iova(bo, gpu->id, iova);
|
||||
int ret = msm_gem_get_iova_locked(bo, gpu->id, iova);
|
||||
|
||||
if (ret) {
|
||||
drm_gem_object_unreference_unlocked(bo);
|
||||
drm_gem_object_unreference(bo);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(ptr, &fw->data[4], fw->size - 4);
|
||||
|
||||
msm_gem_put_vaddr(bo);
|
||||
msm_gem_put_vaddr_locked(bo);
|
||||
return bo;
|
||||
}
|
||||
|
||||
|
@ -294,17 +294,14 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
|
||||
*/
|
||||
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
|
||||
|
||||
mutex_lock(&drm->struct_mutex);
|
||||
a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED);
|
||||
mutex_unlock(&drm->struct_mutex);
|
||||
|
||||
if (IS_ERR(a5xx_gpu->gpmu_bo))
|
||||
goto err;
|
||||
|
||||
if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova))
|
||||
if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova))
|
||||
goto err;
|
||||
|
||||
ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
|
||||
ptr = msm_gem_get_vaddr_locked(a5xx_gpu->gpmu_bo);
|
||||
if (!ptr)
|
||||
goto err;
|
||||
|
||||
@ -323,7 +320,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
|
||||
cmds_size -= _size;
|
||||
}
|
||||
|
||||
msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
|
||||
msm_gem_put_vaddr_locked(a5xx_gpu->gpmu_bo);
|
||||
a5xx_gpu->gpmu_dwords = dwords;
|
||||
|
||||
goto out;
|
||||
@ -332,7 +329,7 @@ err:
|
||||
if (a5xx_gpu->gpmu_iova)
|
||||
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
|
||||
if (a5xx_gpu->gpmu_bo)
|
||||
drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
|
||||
drm_gem_object_unreference(a5xx_gpu->gpmu_bo);
|
||||
|
||||
a5xx_gpu->gpmu_bo = NULL;
|
||||
a5xx_gpu->gpmu_iova = 0;
|
||||
|
@ -159,7 +159,9 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
|
||||
int ret;
|
||||
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = msm_gpu_hw_init(gpu);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
|
||||
|
@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
|
||||
|
||||
DBG("%s", gpu->name);
|
||||
|
||||
ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
|
||||
ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
|
||||
if (ret) {
|
||||
gpu->rb_iova = 0;
|
||||
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
|
||||
|
@ -314,6 +314,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
int ret = 0;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
||||
|
||||
if (!msm_obj->domain[id].iova) {
|
||||
struct msm_drm_private *priv = obj->dev->dev_private;
|
||||
struct page **pages = get_pages(obj);
|
||||
@ -345,6 +347,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
|
||||
* bo is deleted:
|
||||
*/
|
||||
if (msm_obj->domain[id].iova) {
|
||||
might_lock(&obj->dev->struct_mutex);
|
||||
*iova = msm_obj->domain[id].iova;
|
||||
return 0;
|
||||
}
|
||||
|
@ -203,6 +203,8 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
|
||||
|
||||
if (!gpu->needs_hw_init)
|
||||
return 0;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user