2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 09:13:55 +08:00

drm/radeon: Make sure CS mutex is held across GPU reset.

This was only the case if the GPU reset was triggered from the CS ioctl,
otherwise other processes could happily enter the CS ioctl and wreak havoc
during the GPU reset.

This is a little complicated because the GPU reset can be triggered from the
CS ioctl, in which case we're already holding the mutex, or from other call
paths, in which case we need to lock the mutex. AFAICT the mutex API doesn't
allow recursive locking or finding out the mutex owner, so we need to handle
this with helper functions which allow recursive locking from the same
process.

Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Michel Dänzer 2011-11-10 18:57:26 +01:00 committed by Dave Airlie
parent 471dd2ef37
commit 7a1619b97e
3 changed files with 62 additions and 12 deletions

View File

@ -1142,6 +1142,48 @@ struct r600_vram_scratch {
u64 gpu_addr;
};
/*
* Mutex which allows recursive locking from the same process.
*/
struct radeon_mutex {
struct mutex mutex;
struct task_struct *owner;
int level;
};
static inline void radeon_mutex_init(struct radeon_mutex *mutex)
{
mutex_init(&mutex->mutex);
mutex->owner = NULL;
mutex->level = 0;
}
static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
{
if (mutex_trylock(&mutex->mutex)) {
/* The mutex was unlocked before, so it's ours now */
mutex->owner = current;
} else if (mutex->owner != current) {
/* Another process locked the mutex, take it */
mutex_lock(&mutex->mutex);
mutex->owner = current;
}
/* Otherwise the mutex was already locked by this process */
mutex->level++;
}
static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
{
if (--mutex->level > 0)
return;
mutex->owner = NULL;
mutex_unlock(&mutex->mutex);
}
/*
* Core structure, functions and helpers.
*/
@ -1197,7 +1239,7 @@ struct radeon_device {
struct radeon_gem gem;
struct radeon_pm pm;
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
struct mutex cs_mutex;
struct radeon_mutex cs_mutex;
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
bool gpu_lockup;

View File

@ -222,7 +222,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct radeon_cs_chunk *ib_chunk;
int r;
mutex_lock(&rdev->cs_mutex);
radeon_mutex_lock(&rdev->cs_mutex);
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
@ -233,14 +233,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r);
mutex_unlock(&rdev->cs_mutex);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_ib_get(rdev, &parser.ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
radeon_cs_parser_fini(&parser, r);
mutex_unlock(&rdev->cs_mutex);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_cs_parser_relocs(&parser);
@ -248,7 +248,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r);
mutex_unlock(&rdev->cs_mutex);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
/* Copy the packet into the IB, the parser will read from the
@ -260,14 +260,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r || parser.parser_error) {
DRM_ERROR("Invalid command stream !\n");
radeon_cs_parser_fini(&parser, r);
mutex_unlock(&rdev->cs_mutex);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_cs_finish_pages(&parser);
if (r) {
DRM_ERROR("Invalid command stream !\n");
radeon_cs_parser_fini(&parser, r);
mutex_unlock(&rdev->cs_mutex);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_ib_schedule(rdev, parser.ib);
@ -275,7 +275,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_ERROR("Failed to schedule IB !\n");
}
radeon_cs_parser_fini(&parser, r);
mutex_unlock(&rdev->cs_mutex);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}

View File

@ -716,7 +716,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
mutex_init(&rdev->cs_mutex);
radeon_mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex);
mutex_init(&rdev->cp.mutex);
mutex_init(&rdev->dc_hw_i2c_mutex);
@ -955,6 +955,9 @@ int radeon_gpu_reset(struct radeon_device *rdev)
int r;
int resched;
/* Prevent CS ioctl from interfering */
radeon_mutex_lock(&rdev->cs_mutex);
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@ -967,10 +970,15 @@ int radeon_gpu_reset(struct radeon_device *rdev)
radeon_restore_bios_scratch_regs(rdev);
drm_helper_resume_force_mode(rdev->ddev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
return 0;
}
/* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n");
radeon_mutex_unlock(&rdev->cs_mutex);
if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n");
}
return r;
}