mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 23:53:55 +08:00
drm/radeon: raise UVD clocks only on demand
That not only saves some power, but also solves problems with older chips where an idle UVD block on higher clocks can cause problems. Signed-off-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
4ed108352d
commit
55b51c88c5
@ -1143,6 +1143,7 @@ struct radeon_uvd {
|
||||
uint64_t gpu_addr;
|
||||
atomic_t handles[RADEON_MAX_UVD_HANDLES];
|
||||
struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
|
||||
struct delayed_work idle_work;
|
||||
};
|
||||
|
||||
int radeon_uvd_init(struct radeon_device *rdev);
|
||||
@ -1157,6 +1158,7 @@ void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo);
|
||||
void radeon_uvd_free_handles(struct radeon_device *rdev,
|
||||
struct drm_file *filp);
|
||||
int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
|
||||
void radeon_uvd_note_usage(struct radeon_device *rdev);
|
||||
|
||||
struct r600_audio {
|
||||
int channels;
|
||||
|
@ -549,6 +549,10 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
r = radeon_cs_handle_lockup(rdev, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (parser.ring == R600_RING_TYPE_UVD_INDEX)
|
||||
radeon_uvd_note_usage(rdev);
|
||||
|
||||
r = radeon_cs_ib_chunk(rdev, &parser);
|
||||
if (r) {
|
||||
goto out;
|
||||
|
@ -36,6 +36,9 @@
|
||||
#include "radeon.h"
|
||||
#include "r600d.h"
|
||||
|
||||
/* 1 second timeout */
|
||||
#define UVD_IDLE_TIMEOUT_MS 1000
|
||||
|
||||
/* Firmware Names */
|
||||
#define FIRMWARE_RV710 "radeon/RV710_uvd.bin"
|
||||
#define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin"
|
||||
@ -47,6 +50,8 @@ MODULE_FIRMWARE(FIRMWARE_CYPRESS);
|
||||
MODULE_FIRMWARE(FIRMWARE_SUMO);
|
||||
MODULE_FIRMWARE(FIRMWARE_TAHITI);
|
||||
|
||||
static void radeon_uvd_idle_work_handler(struct work_struct *work);
|
||||
|
||||
int radeon_uvd_init(struct radeon_device *rdev)
|
||||
{
|
||||
struct platform_device *pdev;
|
||||
@ -54,6 +59,8 @@ int radeon_uvd_init(struct radeon_device *rdev)
|
||||
const char *fw_name;
|
||||
int i, r;
|
||||
|
||||
INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
|
||||
|
||||
pdev = platform_device_register_simple("radeon_uvd", 0, NULL, 0);
|
||||
r = IS_ERR(pdev);
|
||||
if (r) {
|
||||
@ -188,8 +195,6 @@ int radeon_uvd_resume(struct radeon_device *rdev)
|
||||
|
||||
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
|
||||
|
||||
radeon_set_uvd_clocks(rdev, 53300, 40000);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -666,3 +671,24 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
|
||||
|
||||
return radeon_uvd_send_msg(rdev, ring, bo, fence);
|
||||
}
|
||||
|
||||
static void radeon_uvd_idle_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct radeon_device *rdev =
|
||||
container_of(work, struct radeon_device, uvd.idle_work.work);
|
||||
|
||||
if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0)
|
||||
radeon_set_uvd_clocks(rdev, 0, 0);
|
||||
else
|
||||
schedule_delayed_work(&rdev->uvd.idle_work,
|
||||
msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
|
||||
}
|
||||
|
||||
void radeon_uvd_note_usage(struct radeon_device *rdev)
|
||||
{
|
||||
bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
|
||||
set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
|
||||
msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
|
||||
if (set_clocks)
|
||||
radeon_set_uvd_clocks(rdev, 53300, 40000);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user