2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-18 02:04:05 +08:00

Merge branch 'drm-next-4.7' of git://people.freedesktop.org/~agd5f/linux into drm-next

This is the first big radeon/amdgpu pull request for 4.7.  Highlights:
    - Polaris support in amdgpu
      Current display stack on par with other asics, for advanced features DAL is required
      Power management support
      Support for GFX, Compute, SDMA, UVD, VCE
    - VCE and UVD init/fini cleanup in radeon
    - GPUVM improvements
    - Scheduler improvements
    - Clockgating improvements
    - Powerplay improvements
    - TTM changes to support driver specific LRU update mechanism
    - Radeon support for new Mesa features
    - ASYNC pageflip support for radeon
    - Lots of bug fixes and code cleanups

* 'drm-next-4.7' of git://people.freedesktop.org/~agd5f/linux: (180 commits)
  drm/amdgpu: Replace rcu_assign_pointer() with RCU_INIT_POINTER()
  drm/amdgpu: use drm_mode_vrefresh() rather than mode->vrefresh
  drm/amdgpu/uvd6: add bypass support for fiji (v3)
  drm/amdgpu/fiji: set UVD CG state when enabling UVD DPM (v2)
  drm/powerplay: add missing clockgating callback for tonga
  drm/amdgpu: Constify some tables
  drm/amd/powerplay: Delete dead struct declaration
  drm/amd/powerplay/hwmgr: don't add invalid voltage
  drm/amd/powerplay/hwmgr: prevent VDDC from exceeding 2V
  MAINTAINERS: Remove unneded wildcard for the Radeon/AMDGPU drivers
  drm/radeon: add cayman VM support for append packet.
  drm/amd/amdgpu: Add debugfs entries for smc/didt/pcie
  drm/amd/amdgpu: Drop print_status callbacks.
  drm/amd/powerplay: revise reading/writing pptable on Polaris10
  drm/amd/powerplay: revise reading/writing pptable on Tonga
  drm/amd/powerplay: revise reading/writing pptable on Fiji
  drm/amd/powerplay: revise caching the soft pptable and add it's size
  drm/amd/powerplay: add dpm force multiple levels on cz/tonga/fiji/polaris (v2)
  drm/amd/powerplay: fix fan speed percent setting error on Polaris10
  drm/amd/powerplay: fix bug dpm can't work when resume back on Polaris
  ...
This commit is contained in:
Dave Airlie 2016-05-06 14:17:22 +10:00
commit a64424d722
185 changed files with 61926 additions and 3587 deletions

View File

@ -3790,9 +3790,9 @@ L: dri-devel@lists.freedesktop.org
T: git git://people.freedesktop.org/~agd5f/linux
S: Supported
F: drivers/gpu/drm/radeon/
F: include/uapi/drm/radeon*
F: include/uapi/drm/radeon_drm.h
F: drivers/gpu/drm/amd/
F: include/uapi/drm/amdgpu*
F: include/uapi/drm/amdgpu_drm.h
DRM PANEL DRIVERS
M: Thierry Reding <thierry.reding@gmail.com>

View File

@ -34,7 +34,7 @@
#define mmACP_AZALIA_I2S_SELECT 0x51d4
int amd_acp_hw_init(void *cgs_device,
int amd_acp_hw_init(struct cgs_device *cgs_device,
unsigned acp_version_major, unsigned acp_version_minor)
{
unsigned int acp_mode = ACP_MODE_I2S;

View File

@ -28,7 +28,7 @@
#include "cgs_linux.h"
#include "cgs_common.h"
int amd_acp_hw_init(void *cgs_device,
int amd_acp_hw_init(struct cgs_device *cgs_device,
unsigned acp_version_major, unsigned acp_version_minor);
#endif /* _ACP_GFX_IF_H */

View File

@ -15,3 +15,13 @@ config DRM_AMDGPU_USERPTR
help
This option selects CONFIG_MMU_NOTIFIER if it isn't already
selected to enabled full userptr support.
config DRM_AMDGPU_GART_DEBUGFS
bool "Allow GART access through debugfs"
depends on DRM_AMDGPU
depends on DEBUG_FS
default n
help
Selecting this option creates a debugfs file to inspect the mapped
pages. Uses more memory for housekeeping, enable only for debugging.

View File

@ -302,6 +302,8 @@ struct amdgpu_ring_funcs {
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
/* pad the indirect buffer to the necessary number of dw */
void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
};
/*
@ -391,6 +393,14 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
/*
* TTM.
*/
#define AMDGPU_TTM_LRU_SIZE 20
struct amdgpu_mman_lru {
struct list_head *lru[TTM_NUM_MEM_TYPES];
struct list_head *swap_lru;
};
struct amdgpu_mman {
struct ttm_bo_global_ref bo_global_ref;
struct drm_global_reference mem_global_ref;
@ -408,6 +418,9 @@ struct amdgpu_mman {
struct amdgpu_ring *buffer_funcs_ring;
/* Scheduler entity for buffer moves */
struct amd_sched_entity entity;
/* custom LRU management */
struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
};
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
@ -586,6 +599,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner);
bool amdgpu_sync_is_idle(struct amdgpu_sync *sync);
int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
struct fence *fence);
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_sync *sync);
@ -609,8 +625,9 @@ struct amdgpu_gart {
unsigned num_gpu_pages;
unsigned num_cpu_pages;
unsigned table_size;
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
struct page **pages;
dma_addr_t *pages_addr;
#endif
bool ready;
const struct amdgpu_gart_funcs *gart_funcs;
};
@ -742,16 +759,19 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_VCE
};
extern struct amd_sched_backend_ops amdgpu_sched_ops;
extern const struct amd_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
struct amdgpu_job **job);
void amdgpu_job_free(struct amdgpu_job *job);
void amdgpu_job_free_func(struct kref *refcount);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct fence **f);
void amdgpu_job_timeout_func(struct work_struct *work);
struct amdgpu_ring {
struct amdgpu_device *adev;
@ -788,6 +808,9 @@ struct amdgpu_ring {
struct amdgpu_ctx *current_ctx;
enum amdgpu_ring_type type;
char name[16];
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
};
/*
@ -830,13 +853,6 @@ struct amdgpu_vm_pt {
uint64_t addr;
};
struct amdgpu_vm_id {
struct amdgpu_vm_manager_id *mgr_id;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct fence *flushed_updates;
};
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root va;
@ -862,7 +878,7 @@ struct amdgpu_vm {
struct amdgpu_vm_pt *page_tables;
/* for id and flush management per ring */
struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
/* protecting freed */
spinlock_t freed_lock;
@ -871,11 +887,18 @@ struct amdgpu_vm {
struct amd_sched_entity entity;
};
struct amdgpu_vm_manager_id {
struct amdgpu_vm_id {
struct list_head list;
struct fence *active;
struct fence *first;
struct amdgpu_sync active;
struct fence *last_flush;
struct amdgpu_ring *last_user;
atomic_long_t owner;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct fence *flushed_updates;
uint32_t gds_base;
uint32_t gds_size;
uint32_t gws_base;
@ -889,7 +912,7 @@ struct amdgpu_vm_manager {
struct mutex lock;
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vm_manager_id ids[AMDGPU_NUM_VM];
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
uint32_t max_pfn;
/* vram base address for page table entry */
@ -916,7 +939,7 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
unsigned *vm_id, uint64_t *vm_pd_addr);
void amdgpu_vm_flush(struct amdgpu_ring *ring,
int amdgpu_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
@ -1026,6 +1049,11 @@ void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
*/
#include "clearstate_defs.h"
struct amdgpu_rlc_funcs {
void (*enter_safe_mode)(struct amdgpu_device *adev);
void (*exit_safe_mode)(struct amdgpu_device *adev);
};
struct amdgpu_rlc {
/* for power gating */
struct amdgpu_bo *save_restore_obj;
@ -1044,6 +1072,24 @@ struct amdgpu_rlc {
uint64_t cp_table_gpu_addr;
volatile uint32_t *cp_table_ptr;
u32 cp_table_size;
/* safe mode for updating CG/PG state */
bool in_safe_mode;
const struct amdgpu_rlc_funcs *funcs;
/* for firmware data */
u32 save_and_restore_offset;
u32 clear_state_descriptor_offset;
u32 avail_scratch_ram_locations;
u32 reg_restore_list_size;
u32 reg_list_format_start;
u32 reg_list_format_separate_start;
u32 starting_offsets_start;
u32 reg_list_format_size_bytes;
u32 reg_list_size_bytes;
u32 *register_list_format;
u32 *register_restore;
};
struct amdgpu_mec {
@ -1582,9 +1628,11 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev);
/*
* UVD
*/
#define AMDGPU_MAX_UVD_HANDLES 10
#define AMDGPU_UVD_STACK_SIZE (1024*1024)
#define AMDGPU_UVD_HEAP_SIZE (1024*1024)
#define AMDGPU_DEFAULT_UVD_HANDLES 10
#define AMDGPU_MAX_UVD_HANDLES 40
#define AMDGPU_UVD_STACK_SIZE (200*1024)
#define AMDGPU_UVD_HEAP_SIZE (256*1024)
#define AMDGPU_UVD_SESSION_SIZE (50*1024)
#define AMDGPU_UVD_FIRMWARE_OFFSET 256
struct amdgpu_uvd {
@ -1592,6 +1640,7 @@ struct amdgpu_uvd {
void *cpu_addr;
uint64_t gpu_addr;
void *saved_bo;
unsigned max_handles;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
@ -1690,12 +1739,12 @@ static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
* Debugfs
*/
struct amdgpu_debugfs {
struct drm_info_list *files;
const struct drm_info_list *files;
unsigned num_files;
};
int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
struct drm_info_list *files,
const struct drm_info_list *files,
unsigned nfiles);
int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
@ -1854,15 +1903,8 @@ struct amdgpu_atcs {
/*
* CGS
*/
void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
void amdgpu_cgs_destroy_device(void *cgs_device);
/*
* CGS
*/
void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
void amdgpu_cgs_destroy_device(void *cgs_device);
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
/* GPU virtualization */
@ -1903,7 +1945,6 @@ struct amdgpu_device {
int usec_timeout;
const struct amdgpu_asic_funcs *asic_funcs;
bool shutdown;
bool suspend;
bool need_dma32;
bool accel_working;
struct work_struct reset_work;
@ -1912,7 +1953,7 @@ struct amdgpu_device {
struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
unsigned debugfs_count;
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_regs;
struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
#endif
struct amdgpu_atif atif;
struct amdgpu_atcs atcs;
@ -1925,7 +1966,6 @@ struct amdgpu_device {
/* BIOS */
uint8_t *bios;
bool is_atom_bios;
uint16_t bios_header_start;
struct amdgpu_bo *stollen_vga_memory;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@ -2181,6 +2221,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@ -2337,7 +2379,7 @@ static inline void amdgpu_unregister_atpx_handler(void) {}
* KMS
*/
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
extern int amdgpu_max_kms_ioctl;
extern const int amdgpu_max_kms_ioctl;
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
int amdgpu_driver_unload_kms(struct drm_device *dev);
@ -2396,5 +2438,4 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo);
#include "amdgpu_object.h"
#endif

View File

@ -463,13 +463,6 @@ static int acp_soft_reset(void *handle)
return 0;
}
static void acp_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "ACP STATUS\n");
}
static int acp_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@ -494,7 +487,6 @@ const struct amd_ip_funcs acp_ip_funcs = {
.is_idle = acp_is_idle,
.wait_for_idle = acp_wait_for_idle,
.soft_reset = acp_soft_reset,
.print_status = acp_print_status,
.set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state,
};

View File

@ -30,7 +30,7 @@
struct amdgpu_acp {
struct device *parent;
void *cgs_device;
struct cgs_device *cgs_device;
struct amd_acp_private *private;
struct mfd_cell *acp_cell;
struct resource *acp_res;

View File

@ -234,16 +234,6 @@ amdgpu_atombios_get_hpd_info_from_gpio(struct amdgpu_device *adev,
return hpd;
}
static bool amdgpu_atombios_apply_quirks(struct amdgpu_device *adev,
uint32_t supported_device,
int *connector_type,
struct amdgpu_i2c_bus_rec *i2c_bus,
uint16_t *line_mux,
struct amdgpu_hpd *hpd)
{
return true;
}
static const int object_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_DVII,
@ -514,11 +504,6 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
conn_id = le16_to_cpu(path->usConnObjectId);
if (!amdgpu_atombios_apply_quirks
(adev, le16_to_cpu(path->usDeviceTag), &connector_type,
&ddc_bus, &conn_id, &hpd))
continue;
amdgpu_display_add_connector(adev,
conn_id,
le16_to_cpu(path->usDeviceTag),
@ -699,6 +684,36 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
return ret;
}
union gfx_info {
ATOM_GFX_INFO_V2_1 info;
};
int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
int index = GetIndexIntoMasterTable(DATA, GFX_Info);
uint8_t frev, crev;
uint16_t data_offset;
int ret = -EINVAL;
if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
union gfx_info *gfx_info = (union gfx_info *)
(mode_info->atom_context->bios + data_offset);
adev->gfx.config.max_shader_engines = gfx_info->info.max_shader_engines;
adev->gfx.config.max_tile_pipes = gfx_info->info.max_tile_pipes;
adev->gfx.config.max_cu_per_sh = gfx_info->info.max_cu_per_sh;
adev->gfx.config.max_sh_per_se = gfx_info->info.max_sh_per_se;
adev->gfx.config.max_backends_per_se = gfx_info->info.max_backends_per_se;
adev->gfx.config.max_texture_channel_caches =
gfx_info->info.max_texture_channel_caches;
ret = 0;
}
return ret;
}
union igp_info {
struct _ATOM_INTEGRATED_SYSTEM_INFO info;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;

View File

@ -144,6 +144,8 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev);
bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
struct amdgpu_atom_ss *ss,
int id, u32 clock);

View File

@ -141,7 +141,7 @@ out_cleanup:
void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
{
int i;
int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
640 * 480 * 4,
720 * 480 * 4,
800 * 600 * 4,

View File

@ -349,7 +349,7 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
bool amdgpu_get_bios(struct amdgpu_device *adev)
{
bool r;
uint16_t tmp;
uint16_t tmp, bios_header_start;
r = amdgpu_atrm_get_bios(adev);
if (r == false)
@ -383,11 +383,11 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
goto free_bios;
}
adev->bios_header_start = RBIOS16(0x48);
if (!adev->bios_header_start) {
bios_header_start = RBIOS16(0x48);
if (!bios_header_start) {
goto free_bios;
}
tmp = adev->bios_header_start + 4;
tmp = bios_header_start + 4;
if (!memcmp(adev->bios + tmp, "ATOM", 4) ||
!memcmp(adev->bios + tmp, "MOTA", 4)) {
adev->is_atom_bios = true;

View File

@ -42,7 +42,7 @@ struct amdgpu_cgs_device {
struct amdgpu_device *adev = \
((struct amdgpu_cgs_device *)cgs_device)->adev
static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
static int amdgpu_cgs_gpu_mem_info(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
uint64_t *mc_start, uint64_t *mc_size,
uint64_t *mem_size)
{
@ -73,7 +73,7 @@ static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
return 0;
}
static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem,
uint64_t size,
uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *kmem_handle, uint64_t *mcaddr)
@ -102,7 +102,7 @@ static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
return ret;
}
static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t kmem_handle)
{
struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
@ -118,7 +118,7 @@ static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
return 0;
}
static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
uint64_t min_offset, uint64_t max_offset,
@ -208,7 +208,7 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
return ret;
}
static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
{
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@ -225,7 +225,7 @@ static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
return 0;
}
static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
uint64_t *mcaddr)
{
int r;
@ -246,7 +246,7 @@ static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
return r;
}
static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@ -258,7 +258,7 @@ static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
return r;
}
static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
void **map)
{
int r;
@ -271,7 +271,7 @@ static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
return r;
}
static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@ -283,20 +283,20 @@ static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
return r;
}
static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
{
CGS_FUNC_ADEV;
return RREG32(offset);
}
static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
uint32_t value)
{
CGS_FUNC_ADEV;
WREG32(offset, value);
}
static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
enum cgs_ind_reg space,
unsigned index)
{
@ -320,7 +320,7 @@ static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
return 0;
}
static void amdgpu_cgs_write_ind_register(void *cgs_device,
static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
enum cgs_ind_reg space,
unsigned index, uint32_t value)
{
@ -343,7 +343,7 @@ static void amdgpu_cgs_write_ind_register(void *cgs_device,
WARN(1, "Invalid indirect register space");
}
static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device *cgs_device, unsigned addr)
{
CGS_FUNC_ADEV;
uint8_t val;
@ -353,7 +353,7 @@ static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
return val;
}
static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device *cgs_device, unsigned addr)
{
CGS_FUNC_ADEV;
uint16_t val;
@ -363,7 +363,7 @@ static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
return val;
}
static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device *cgs_device,
unsigned addr)
{
CGS_FUNC_ADEV;
@ -374,7 +374,7 @@ static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
return val;
}
static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
static void amdgpu_cgs_write_pci_config_byte(struct cgs_device *cgs_device, unsigned addr,
uint8_t value)
{
CGS_FUNC_ADEV;
@ -382,7 +382,7 @@ static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
WARN(ret, "pci_write_config_byte error");
}
static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
static void amdgpu_cgs_write_pci_config_word(struct cgs_device *cgs_device, unsigned addr,
uint16_t value)
{
CGS_FUNC_ADEV;
@ -390,7 +390,7 @@ static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
WARN(ret, "pci_write_config_word error");
}
static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
static void amdgpu_cgs_write_pci_config_dword(struct cgs_device *cgs_device, unsigned addr,
uint32_t value)
{
CGS_FUNC_ADEV;
@ -399,7 +399,7 @@ static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
}
static int amdgpu_cgs_get_pci_resource(void *cgs_device,
static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
enum cgs_resource_type resource_type,
uint64_t size,
uint64_t offset,
@ -433,7 +433,7 @@ static int amdgpu_cgs_get_pci_resource(void *cgs_device,
}
}
static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
unsigned table, uint16_t *size,
uint8_t *frev, uint8_t *crev)
{
@ -449,7 +449,7 @@ static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
return NULL;
}
static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
uint8_t *frev, uint8_t *crev)
{
CGS_FUNC_ADEV;
@ -462,7 +462,7 @@ static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
return -EINVAL;
}
static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
void *args)
{
CGS_FUNC_ADEV;
@ -471,33 +471,33 @@ static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
adev->mode_info.atom_context, table, args);
}
static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
static int amdgpu_cgs_create_pm_request(struct cgs_device *cgs_device, cgs_handle_t *request)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
static int amdgpu_cgs_destroy_pm_request(struct cgs_device *cgs_device, cgs_handle_t request)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
static int amdgpu_cgs_set_pm_request(struct cgs_device *cgs_device, cgs_handle_t request,
int active)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
static int amdgpu_cgs_pm_request_clock(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_clock clock, unsigned freq)
{
/* TODO */
return 0;
}
static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
static int amdgpu_cgs_pm_request_engine(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_engine engine, int powered)
{
/* TODO */
@ -506,7 +506,7 @@ static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device *cgs_device,
enum cgs_clock clock,
struct cgs_clock_limits *limits)
{
@ -514,7 +514,7 @@ static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
return 0;
}
static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
static int amdgpu_cgs_set_camera_voltages(struct cgs_device *cgs_device, uint32_t mask,
const uint32_t *voltages)
{
DRM_ERROR("not implemented");
@ -565,7 +565,7 @@ static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
.process = cgs_process_irq,
};
static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src_id,
unsigned num_types,
cgs_irq_source_set_func_t set,
cgs_irq_handler_func_t handler,
@ -600,19 +600,19 @@ static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
return ret;
}
static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
static int amdgpu_cgs_irq_get(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
{
CGS_FUNC_ADEV;
return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
}
static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
{
CGS_FUNC_ADEV;
return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
}
int amdgpu_cgs_set_clockgating_state(void *cgs_device,
int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_clockgating_state state)
{
@ -633,7 +633,7 @@ int amdgpu_cgs_set_clockgating_state(void *cgs_device,
return r;
}
int amdgpu_cgs_set_powergating_state(void *cgs_device,
int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_powergating_state state)
{
@ -655,7 +655,7 @@ int amdgpu_cgs_set_powergating_state(void *cgs_device,
}
static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
{
CGS_FUNC_ADEV;
enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
@ -681,9 +681,10 @@ static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
result = AMDGPU_UCODE_ID_CP_MEC1;
break;
case CGS_UCODE_ID_CP_MEC_JT2:
if (adev->asic_type == CHIP_TONGA)
if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11
|| adev->asic_type == CHIP_POLARIS10)
result = AMDGPU_UCODE_ID_CP_MEC2;
else if (adev->asic_type == CHIP_CARRIZO)
else
result = AMDGPU_UCODE_ID_CP_MEC1;
break;
case CGS_UCODE_ID_RLC_G:
@ -695,13 +696,13 @@ static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
return result;
}
static int amdgpu_cgs_get_firmware_info(void *cgs_device,
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
{
CGS_FUNC_ADEV;
if (CGS_UCODE_ID_SMU != type) {
if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
uint64_t gpu_addr;
uint32_t data_size;
const struct gfx_firmware_header_v1_0 *header;
@ -734,6 +735,7 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device,
const uint8_t *src;
const struct smc_firmware_header_v1_0 *hdr;
if (!adev->pm.fw) {
switch (adev->asic_type) {
case CHIP_TONGA:
strcpy(fw_name, "amdgpu/tonga_smc.bin");
@ -741,6 +743,18 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device,
case CHIP_FIJI:
strcpy(fw_name, "amdgpu/fiji_smc.bin");
break;
case CHIP_POLARIS11:
if (type == CGS_UCODE_ID_SMU)
strcpy(fw_name, "amdgpu/polaris11_smc.bin");
else if (type == CGS_UCODE_ID_SMU_SK)
strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
break;
case CHIP_POLARIS10:
if (type == CGS_UCODE_ID_SMU)
strcpy(fw_name, "amdgpu/polaris10_smc.bin");
else if (type == CGS_UCODE_ID_SMU_SK)
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
break;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
@ -759,6 +773,7 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device,
adev->pm.fw = NULL;
return err;
}
}
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
@ -774,10 +789,11 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device,
return 0;
}
static int amdgpu_cgs_query_system_info(void *cgs_device,
static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info)
{
CGS_FUNC_ADEV;
struct amdgpu_cu_info cu_info;
if (NULL == sys_info)
return -ENODEV;
@ -801,6 +817,10 @@ static int amdgpu_cgs_query_system_info(void *cgs_device,
case CGS_SYSTEM_INFO_PG_FLAGS:
sys_info->value = adev->pg_flags;
break;
case CGS_SYSTEM_INFO_GFX_CU_INFO:
amdgpu_asic_get_cu_info(adev, &cu_info);
sys_info->value = cu_info.number;
break;
default:
return -ENODEV;
}
@ -808,7 +828,7 @@ static int amdgpu_cgs_query_system_info(void *cgs_device,
return 0;
}
static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
struct cgs_display_info *info)
{
CGS_FUNC_ADEV;
@ -851,7 +871,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
}
static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
{
CGS_FUNC_ADEV;
@ -867,7 +887,7 @@ static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
*/
#if defined(CONFIG_ACPI)
static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
struct cgs_acpi_method_info *info)
{
CGS_FUNC_ADEV;
@ -1030,14 +1050,14 @@ error:
return result;
}
#else
static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
struct cgs_acpi_method_info *info)
{
return -EIO;
}
#endif
int amdgpu_cgs_call_acpi_method(void *cgs_device,
int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
uint32_t acpi_method,
uint32_t acpi_function,
void *pinput, void *poutput,
@ -1121,7 +1141,7 @@ static const struct cgs_os_ops amdgpu_cgs_os_ops = {
amdgpu_cgs_irq_put
};
void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
{
struct amdgpu_cgs_device *cgs_device =
kmalloc(sizeof(*cgs_device), GFP_KERNEL);
@ -1135,10 +1155,10 @@ void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
cgs_device->adev = adev;
return cgs_device;
return (struct cgs_device *)cgs_device;
}
void amdgpu_cgs_destroy_device(void *cgs_device)
void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
{
kfree(cgs_device);
}

View File

@ -439,7 +439,7 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_display_mode *mode = NULL;
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
int i;
struct mode_size {
static const struct mode_size {
int w;
int h;
} common_modes[17] = {

View File

@ -24,7 +24,6 @@
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
#include <linux/list_sort.h>
#include <linux/pagemap.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
@ -527,16 +526,6 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
return 0;
}
static int cmp_size_smaller_first(void *priv, struct list_head *a,
struct list_head *b)
{
struct amdgpu_bo_list_entry *la = list_entry(a, struct amdgpu_bo_list_entry, tv.head);
struct amdgpu_bo_list_entry *lb = list_entry(b, struct amdgpu_bo_list_entry, tv.head);
/* Sort A before B if A is smaller. */
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
}
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
@ -553,18 +542,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
if (!error) {
amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
/* Sort the buffer list from the smallest to largest buffer,
* which affects the order of buffers in the LRU list.
* This assures that the smallest buffers are added first
* to the LRU list, so they are likely to be later evicted
* first, instead of large buffers whose eviction is more
* expensive.
*
* This slightly lowers the number of bytes moved by TTM
* per frame under memory pressure.
*/
list_sort(NULL, &parser->validated, cmp_size_smaller_first);
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
parser->fence);
@ -862,27 +839,26 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_fence *fence;
struct fence *fence;
struct amdgpu_job *job;
int r;
job = p->job;
p->job = NULL;
job->base.sched = &ring->sched;
job->base.s_entity = &p->ctx->rings[ring->idx].entity;
job->owner = p->filp;
fence = amd_sched_fence_create(job->base.s_entity, p->filp);
if (!fence) {
r = amd_sched_job_init(&job->base, &ring->sched,
&p->ctx->rings[ring->idx].entity,
amdgpu_job_timeout_func,
amdgpu_job_free_func,
p->filp, &fence);
if (r) {
amdgpu_job_free(job);
return -ENOMEM;
return r;
}
job->base.s_fence = fence;
p->fence = fence_get(&fence->base);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring,
&fence->base);
job->owner = p->filp;
p->fence = fence_get(fence);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
trace_amdgpu_cs_ioctl(job);

View File

@ -59,6 +59,8 @@ static const char *amdgpu_asic_name[] = {
"FIJI",
"CARRIZO",
"STONEY",
"POLARIS10",
"POLARIS11",
"LAST",
};
@ -942,15 +944,11 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
}
if (amdgpu_gart_size != -1) {
/* gtt size must be power of two and greater or equal to 32M */
/* gtt size must be greater or equal to 32M */
if (amdgpu_gart_size < 32) {
dev_warn(adev->dev, "gart size (%d) too small\n",
amdgpu_gart_size);
amdgpu_gart_size = -1;
} else if (!amdgpu_check_pot_argument(amdgpu_gart_size)) {
dev_warn(adev->dev, "gart size (%d) must be a power of 2\n",
amdgpu_gart_size);
amdgpu_gart_size = -1;
}
}
@ -1150,6 +1148,8 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
case CHIP_TOPAZ:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_CARRIZO:
case CHIP_STONEY:
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
@ -1338,15 +1338,24 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
{
int i, r;
/* ungate SMC block first */
r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_status[i].valid)
continue;
/* ungate blocks so that suspend can properly shut them down */
if (i != AMD_IP_BLOCK_TYPE_SMC) {
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r);
}
}
/* XXX handle errors */
r = adev->ip_blocks[i].funcs->suspend(adev);
/* XXX handle errors */
@ -2013,7 +2022,7 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev)
* Debugfs
*/
int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
struct drm_info_list *files,
const struct drm_info_list *files,
unsigned nfiles)
{
unsigned i;
@ -2125,32 +2134,246 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
return result;
}
static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
while (size) {
uint32_t value;
value = RREG32_PCIE(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
if (r)
return r;
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
return result;
}
static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
if (r)
return r;
WREG32_PCIE(*pos >> 2, value);
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
return result;
}
static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
while (size) {
uint32_t value;
value = RREG32_DIDT(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
if (r)
return r;
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
return result;
}
static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
if (r)
return r;
WREG32_DIDT(*pos >> 2, value);
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
return result;
}
static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
while (size) {
uint32_t value;
value = RREG32_SMC(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
if (r)
return r;
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
return result;
}
static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
if (r)
return r;
WREG32_SMC(*pos >> 2, value);
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
return result;
}
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
.write = amdgpu_debugfs_regs_write,
.llseek = default_llseek
};
static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_didt_read,
.write = amdgpu_debugfs_regs_didt_write,
.llseek = default_llseek
};
static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_pcie_read,
.write = amdgpu_debugfs_regs_pcie_write,
.llseek = default_llseek
};
static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_smc_read,
.write = amdgpu_debugfs_regs_smc_write,
.llseek = default_llseek
};
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_smc_fops,
};
static const char *debugfs_regs_names[] = {
"amdgpu_regs",
"amdgpu_regs_didt",
"amdgpu_regs_pcie",
"amdgpu_regs_smc",
};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
struct drm_minor *minor = adev->ddev->primary;
struct dentry *ent, *root = minor->debugfs_root;
unsigned i, j;
ent = debugfs_create_file("amdgpu_regs", S_IFREG | S_IRUGO, root,
adev, &amdgpu_debugfs_regs_fops);
if (IS_ERR(ent))
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
ent = debugfs_create_file(debugfs_regs_names[i],
S_IFREG | S_IRUGO, root,
adev, debugfs_regs[i]);
if (IS_ERR(ent)) {
for (j = 0; j < i; j++) {
debugfs_remove(adev->debugfs_regs[i]);
adev->debugfs_regs[i] = NULL;
}
return PTR_ERR(ent);
}
if (!i)
i_size_write(ent->d_inode, adev->rmmio_size);
adev->debugfs_regs = ent;
adev->debugfs_regs[i] = ent;
}
return 0;
}
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
{
debugfs_remove(adev->debugfs_regs);
adev->debugfs_regs = NULL;
unsigned i;
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
if (adev->debugfs_regs[i]) {
debugfs_remove(adev->debugfs_regs[i]);
adev->debugfs_regs[i] = NULL;
}
}
}
int amdgpu_debugfs_init(struct drm_minor *minor)

View File

@ -131,12 +131,17 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
vblank->framedur_ns / 1000,
vblank->linedur_ns / 1000, stat, vpos, hpos);
/* set the flip status */
/* Do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
/* Set the flip status */
amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
/* Do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
amdgpuCrtc->crtc_id, amdgpuCrtc, work);
}
/*
@ -252,6 +257,9 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
amdgpu_crtc->pflip_works = work;
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
/* update crtc fb */
crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
@ -588,20 +596,20 @@ const struct drm_mode_config_funcs amdgpu_mode_funcs = {
.output_poll_changed = amdgpu_output_poll_changed
};
static struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
{ { UNDERSCAN_OFF, "off" },
{ UNDERSCAN_ON, "on" },
{ UNDERSCAN_AUTO, "auto" },
};
static struct drm_prop_enum_list amdgpu_audio_enum_list[] =
static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
{ { AMDGPU_AUDIO_DISABLE, "off" },
{ AMDGPU_AUDIO_ENABLE, "on" },
{ AMDGPU_AUDIO_AUTO, "auto" },
};
/* XXX support different dither options? spatial, temporal, both, etc. */
static struct drm_prop_enum_list amdgpu_dither_enum_list[] =
static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
{ { AMDGPU_FMT_DITHER_DISABLE, "off" },
{ AMDGPU_FMT_DITHER_ENABLE, "on" },
};

View File

@ -150,7 +150,7 @@ u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
vrefresh = amdgpu_crtc->hw_mode.vrefresh;
vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
break;
}
}

View File

@ -166,7 +166,7 @@ module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
static struct pci_device_id pciidlist[] = {
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
@ -277,6 +277,16 @@ static struct pci_device_id pciidlist[] = {
{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
/* stoney */
{0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU},
/* Polaris11 */
{0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
/* Polaris10 */
{0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0, 0, 0}
};

View File

@ -198,7 +198,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
/* There is always exactly one thread signaling this fence slot */
fence = rcu_dereference_protected(*ptr, 1);
rcu_assign_pointer(*ptr, NULL);
RCU_INIT_POINTER(*ptr, NULL);
BUG_ON(!fence);
@ -352,9 +352,9 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
(unsigned long)ring);
ring->fence_drv.num_fences_mask = num_hw_submission - 1;
ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
spin_lock_init(&ring->fence_drv.lock);
ring->fence_drv.fences = kcalloc(num_hw_submission, sizeof(void *),
ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
GFP_KERNEL);
if (!ring->fence_drv.fences)
return -ENOMEM;
@ -639,7 +639,7 @@ static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
return 0;
}
static struct drm_info_list amdgpu_debugfs_fence_list[] = {
static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
{"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
};

View File

@ -238,10 +238,10 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
if (adev->gart.pages[p]) {
#ifdef CONFIG_AMDGPU_GART_DEBUGFS
adev->gart.pages[p] = NULL;
adev->gart.pages_addr[p] = adev->dummy_page.addr;
page_base = adev->gart.pages_addr[p];
#endif
page_base = adev->dummy_page.addr;
if (!adev->gart.ptr)
continue;
@ -251,7 +251,6 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
page_base += AMDGPU_GPU_PAGE_SIZE;
}
}
}
mb();
amdgpu_gart_flush_gpu_tlb(adev, 0);
}
@ -287,10 +286,11 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
adev->gart.pages_addr[p] = dma_addr[i];
#ifdef CONFIG_AMDGPU_GART_DEBUGFS
adev->gart.pages[p] = pagelist[i];
#endif
if (adev->gart.ptr) {
page_base = adev->gart.pages_addr[p];
page_base = dma_addr[i];
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
@ -312,11 +312,11 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
*/
int amdgpu_gart_init(struct amdgpu_device *adev)
{
int r, i;
int r;
if (adev->gart.pages) {
if (adev->dummy_page.page)
return 0;
}
/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("Page size is smaller than GPU page size!\n");
@ -330,22 +330,16 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
#ifdef CONFIG_AMDGPU_GART_DEBUGFS
/* Allocate pages table */
adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
if (adev->gart.pages == NULL) {
amdgpu_gart_fini(adev);
return -ENOMEM;
}
adev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
adev->gart.num_cpu_pages);
if (adev->gart.pages_addr == NULL) {
amdgpu_gart_fini(adev);
return -ENOMEM;
}
/* set GART entry to point to the dummy page by default */
for (i = 0; i < adev->gart.num_cpu_pages; i++) {
adev->gart.pages_addr[i] = adev->dummy_page.addr;
}
#endif
return 0;
}
@ -358,15 +352,14 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
*/
void amdgpu_gart_fini(struct amdgpu_device *adev)
{
if (adev->gart.pages && adev->gart.pages_addr && adev->gart.ready) {
if (adev->gart.ready) {
/* unbind pages */
amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
}
adev->gart.ready = false;
#ifdef CONFIG_AMDGPU_GART_DEBUGFS
vfree(adev->gart.pages);
vfree(adev->gart.pages_addr);
adev->gart.pages = NULL;
adev->gart.pages_addr = NULL;
#endif
amdgpu_dummy_page_fini(adev);
}

View File

@ -797,7 +797,7 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
return 0;
}
static struct drm_info_list amdgpu_debugfs_gem_list[] = {
static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
};
#endif

View File

@ -124,7 +124,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ctx *ctx, *old_ctx;
struct amdgpu_vm *vm;
struct fence *hwf;
unsigned i;
unsigned i, patch_offset = ~0;
int r = 0;
if (num_ibs == 0)
@ -149,17 +150,27 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return r;
}
if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (vm) {
/* do context switch */
amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
r = amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
ib->gds_base, ib->gds_size,
ib->gws_base, ib->gws_size,
ib->oa_base, ib->oa_size);
if (r) {
amdgpu_ring_undo(ring);
return r;
}
if (ring->funcs->emit_hdp_flush)
amdgpu_ring_emit_hdp_flush(ring);
}
/* always set cond_exec_polling to CONTINUE */
*ring->cond_exe_cpu_addr = 1;
old_ctx = ring->current_ctx;
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
@ -201,6 +212,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (f)
*f = fence_get(hwf);
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
amdgpu_ring_commit(ring);
return 0;
}
@ -315,7 +329,7 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
}
static struct drm_info_list amdgpu_debugfs_sa_list[] = {
static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
{"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
};

View File

@ -498,7 +498,7 @@ static int amdgpu_irqdomain_map(struct irq_domain *d,
return 0;
}
static struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
.map = amdgpu_irqdomain_map,
};

View File

@ -28,6 +28,23 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
static void amdgpu_job_free_handler(struct work_struct *ws)
{
struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job);
amd_sched_job_put(&job->base);
}
void amdgpu_job_timeout_func(struct work_struct *work)
{
struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
job->base.sched->name,
(uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
job->ring->fence_drv.sync_seq);
amd_sched_job_put(&job->base);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job)
{
@ -45,6 +62,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->adev = adev;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
amdgpu_sync_create(&(*job)->sync);
@ -80,6 +98,14 @@ void amdgpu_job_free(struct amdgpu_job *job)
amdgpu_bo_unref(&job->uf.bo);
amdgpu_sync_free(&job->sync);
if (!job->base.use_sched)
kfree(job);
}
void amdgpu_job_free_func(struct kref *refcount)
{
struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount);
kfree(job);
}
@ -87,16 +113,23 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct fence **f)
{
struct fence *fence;
int r;
job->ring = ring;
job->base.sched = &ring->sched;
job->base.s_entity = entity;
job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
if (!job->base.s_fence)
return -ENOMEM;
*f = fence_get(&job->base.s_fence->base);
if (!f)
return -EINVAL;
r = amd_sched_job_init(&job->base, &ring->sched,
entity,
amdgpu_job_timeout_func,
amdgpu_job_free_func,
owner, &fence);
if (r)
return r;
job->owner = owner;
*f = fence_get(fence);
amd_sched_entity_push_job(&job->base);
return 0;
@ -165,7 +198,9 @@ err:
return fence;
}
struct amd_sched_backend_ops amdgpu_sched_ops = {
const struct amd_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
.begin_job = amd_sched_job_begin,
.finish_job = amd_sched_job_finish,
};

View File

@ -755,4 +755,4 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);

View File

@ -53,7 +53,7 @@ struct amdgpu_hpd;
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
#define AMDGPU_MAX_AFMT_BLOCKS 7
#define AMDGPU_MAX_AFMT_BLOCKS 9
enum amdgpu_rmx_type {
RMX_OFF,
@ -309,8 +309,8 @@ struct amdgpu_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
bool mode_config_initialized;
struct amdgpu_crtc *crtcs[6];
struct amdgpu_afmt *afmt[7];
struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */

View File

@ -71,7 +71,7 @@ static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
{
int r;
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(bo->adev->dev, "%p reserve failed\n", bo);

View File

@ -362,16 +362,23 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret;
long level;
uint32_t i, mask = 0;
char sub_str[2];
ret = kstrtol(buf, 0, &level);
for (i = 0; i < strlen(buf) - 1; i++) {
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
if (ret) {
count = -EINVAL;
goto fail;
}
mask |= 1 << level;
}
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_SCLK, level);
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
fail:
return count;
}
@ -399,16 +406,23 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret;
long level;
uint32_t i, mask = 0;
char sub_str[2];
ret = kstrtol(buf, 0, &level);
for (i = 0; i < strlen(buf) - 1; i++) {
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
if (ret) {
count = -EINVAL;
goto fail;
}
mask |= 1 << level;
}
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_MCLK, level);
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
fail:
return count;
}
@ -436,16 +450,23 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret;
long level;
uint32_t i, mask = 0;
char sub_str[2];
ret = kstrtol(buf, 0, &level);
for (i = 0; i < strlen(buf) - 1; i++) {
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
if (ret) {
count = -EINVAL;
goto fail;
}
mask |= 1 << level;
}
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_PCIE, level);
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
fail:
return count;
}
@ -1212,7 +1233,7 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
return 0;
}
static struct drm_info_list amdgpu_pm_info_list[] = {
static const struct drm_info_list amdgpu_pm_info_list[] = {
{"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
};
#endif

View File

@ -99,6 +99,10 @@ static int amdgpu_pp_early_init(void *handle)
#ifdef CONFIG_DRM_AMD_POWERPLAY
switch (adev->asic_type) {
case CHIP_POLARIS11:
case CHIP_POLARIS10:
adev->pp_enabled = true;
break;
case CHIP_TONGA:
case CHIP_FIJI:
adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
@ -299,15 +303,6 @@ static int amdgpu_pp_soft_reset(void *handle)
return ret;
}
static void amdgpu_pp_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->print_status)
adev->powerplay.ip_funcs->print_status(
adev->powerplay.pp_handle);
}
const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.early_init = amdgpu_pp_early_init,
.late_init = amdgpu_pp_late_init,
@ -320,7 +315,6 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.is_idle = amdgpu_pp_is_idle,
.wait_for_idle = amdgpu_pp_wait_for_idle,
.soft_reset = amdgpu_pp_soft_reset,
.print_status = amdgpu_pp_print_status,
.set_clockgating_state = amdgpu_pp_set_clockgating_state,
.set_powergating_state = amdgpu_pp_set_powergating_state,
};

View File

@ -46,7 +46,8 @@
* wptr. The GPU then starts fetching commands and executes
* them until the pointers are equal again.
*/
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
/**
* amdgpu_ring_alloc - allocate space on the ring buffer
@ -215,18 +216,17 @@ int amdgpu_ring_restore(struct amdgpu_ring *ring,
*
* @adev: amdgpu_device pointer
* @ring: amdgpu_ring structure holding ring information
* @ring_size: size of the ring
* @max_ndw: maximum number of dw for ring alloc
* @nop: nop packet for this ring
*
* Initialize the driver information for the selected ring (all asics).
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, u32 nop, u32 align_mask,
unsigned max_dw, u32 nop, u32 align_mask,
struct amdgpu_irq_src *irq_src, unsigned irq_type,
enum amdgpu_ring_type ring_type)
{
u32 rb_bufsz;
int r;
if (ring->adev == NULL) {
@ -265,8 +265,17 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
return r;
}
ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4;
ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
if (r) {
dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
return r;
}
ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
spin_lock_init(&ring->fence_lock);
r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
if (r) {
@ -274,10 +283,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
/* Align ring size */
rb_bufsz = order_base_2(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
ring->ring_size = ring_size;
ring->ring_size = roundup_pow_of_two(max_dw * 4 *
amdgpu_sched_hw_submission);
ring->align_mask = align_mask;
ring->nop = nop;
ring->type = ring_type;
@ -310,8 +317,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->max_dw = DIV_ROUND_UP(ring->ring_size / 4,
amdgpu_sched_hw_submission);
ring->max_dw = max_dw;
if (amdgpu_debugfs_ring_init(adev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
@ -363,9 +369,8 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
int roffset = *(int*)node->info_ent->data;
int roffset = (unsigned long)node->info_ent->data;
struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
uint32_t rptr, wptr, rptr_next;
unsigned i;
@ -408,46 +413,37 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
return 0;
}
/* TODO: clean this up !*/
static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]);
static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]);
static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]);
static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring);
static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring);
static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring);
static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]);
static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]);
static struct drm_info_list amdgpu_debugfs_ring_info_list[] = {
{"amdgpu_ring_gfx", amdgpu_debugfs_ring_info, 0, &amdgpu_gfx_index},
{"amdgpu_ring_cp1", amdgpu_debugfs_ring_info, 0, &cayman_cp1_index},
{"amdgpu_ring_cp2", amdgpu_debugfs_ring_info, 0, &cayman_cp2_index},
{"amdgpu_ring_dma1", amdgpu_debugfs_ring_info, 0, &amdgpu_dma1_index},
{"amdgpu_ring_dma2", amdgpu_debugfs_ring_info, 0, &amdgpu_dma2_index},
{"amdgpu_ring_uvd", amdgpu_debugfs_ring_info, 0, &r600_uvd_index},
{"amdgpu_ring_vce1", amdgpu_debugfs_ring_info, 0, &si_vce1_index},
{"amdgpu_ring_vce2", amdgpu_debugfs_ring_info, 0, &si_vce2_index},
};
static struct drm_info_list amdgpu_debugfs_ring_info_list[AMDGPU_MAX_RINGS];
static char amdgpu_debugfs_ring_names[AMDGPU_MAX_RINGS][32];
#endif
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring)
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
unsigned offset = (uint8_t*)ring - (uint8_t*)adev;
unsigned i;
struct drm_info_list *info;
char *name;
for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
struct drm_info_list *info = &amdgpu_debugfs_ring_info_list[i];
int roffset = *(int*)amdgpu_debugfs_ring_info_list[i].data;
struct amdgpu_ring *other = (void *)(((uint8_t*)adev) + roffset);
unsigned r;
if (other != ring)
continue;
r = amdgpu_debugfs_add_files(adev, info, 1);
if (r)
return r;
info = &amdgpu_debugfs_ring_info_list[i];
if (!info->data)
break;
}
if (i == ARRAY_SIZE(amdgpu_debugfs_ring_info_list))
return -ENOSPC;
name = &amdgpu_debugfs_ring_names[i][0];
sprintf(name, "amdgpu_ring_%s", ring->name);
info->name = name;
info->show = amdgpu_debugfs_ring_info;
info->driver_features = 0;
info->data = (void*)(uintptr_t)offset;
return amdgpu_debugfs_add_files(adev, info, 1);
#endif
return 0;
}

View File

@ -108,6 +108,29 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
*keep = fence_get(fence);
}
/**
* amdgpu_sync_add_later - add the fence to the hash
*
* @sync: sync object to add the fence to
* @f: fence to add
*
* Tries to add the fence to an existing hash entry. Returns true when an entry
* was found, false otherwise.
*/
static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
{
struct amdgpu_sync_entry *e;
hash_for_each_possible(sync->fences, e, node, f->context) {
if (unlikely(e->fence->context != f->context))
continue;
amdgpu_sync_keep_later(&e->fence, f);
return true;
}
return false;
}
/**
* amdgpu_sync_fence - remember to sync to this fence
*
@ -127,13 +150,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
amdgpu_sync_keep_later(&sync->last_vm_update, f);
hash_for_each_possible(sync->fences, e, node, f->context) {
if (unlikely(e->fence->context != f->context))
continue;
amdgpu_sync_keep_later(&e->fence, f);
if (amdgpu_sync_add_later(sync, f))
return 0;
}
e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
if (!e)
@ -204,6 +222,81 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
return r;
}
/**
* amdgpu_sync_is_idle - test if all fences are signaled
*
* @sync: the sync object
*
* Returns true if all fences in the sync object are signaled.
*/
bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
struct fence *f = e->fence;
if (fence_is_signaled(f)) {
hash_del(&e->node);
fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
return false;
}
return true;
}
/**
* amdgpu_sync_cycle_fences - move fences from one sync object into another
*
* @dst: the destination sync object
* @src: the source sync object
* @fence: fence to add to source
*
* Remove all fences from source and put them into destination and add
* fence as new one into source.
*/
int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
struct fence *fence)
{
struct amdgpu_sync_entry *e, *newone;
struct hlist_node *tmp;
int i;
/* Allocate the new entry before moving the old ones */
newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
if (!newone)
return -ENOMEM;
hash_for_each_safe(src->fences, i, tmp, e, node) {
struct fence *f = e->fence;
hash_del(&e->node);
if (fence_is_signaled(f)) {
fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
if (amdgpu_sync_add_later(dst, f)) {
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
hash_add(dst->fences, &e->node, f->context);
}
hash_add(src->fences, &newone->node, fence->context);
newone->fence = fence_get(fence);
return 0;
}
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;

View File

@ -909,6 +909,52 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
return flags;
}
static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
unsigned i, j;
for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
if (&tbo->lru == lru->lru[j])
lru->lru[j] = tbo->lru.prev;
if (&tbo->swap == lru->swap_lru)
lru->swap_lru = tbo->swap.prev;
}
}
static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
unsigned log2_size = min(ilog2(tbo->num_pages),
AMDGPU_TTM_LRU_SIZE - 1);
return &adev->mman.log2_size[log2_size];
}
static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
{
struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
struct list_head *res = lru->lru[tbo->mem.mem_type];
lru->lru[tbo->mem.mem_type] = &tbo->lru;
return res;
}
static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
{
struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
struct list_head *res = lru->swap_lru;
lru->swap_lru = &tbo->swap;
return res;
}
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
@ -922,10 +968,14 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
.lru_removal = &amdgpu_ttm_lru_removal,
.lru_tail = &amdgpu_ttm_lru_tail,
.swap_lru_tail = &amdgpu_ttm_swap_lru_tail,
};
int amdgpu_ttm_init(struct amdgpu_device *adev)
{
unsigned i, j;
int r;
r = amdgpu_ttm_global_init(adev);
@ -943,6 +993,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
}
for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
lru->lru[j] = &adev->mman.bdev.man[j].lru;
lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
}
adev->mman.initialized = true;
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
adev->mc.real_vram_size >> PAGE_SHIFT);
@ -1165,7 +1224,7 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
static int ttm_pl_vram = TTM_PL_VRAM;
static int ttm_pl_tt = TTM_PL_TT;
static struct drm_info_list amdgpu_ttm_debugfs_list[] = {
static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
@ -1216,6 +1275,8 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
.llseek = default_llseek
};
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@ -1263,6 +1324,8 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
#endif
#endif
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
@ -1278,6 +1341,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
i_size_write(ent->d_inode, adev->mc.mc_vram_size);
adev->mman.vram = ent;
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
adev, &amdgpu_ttm_gtt_fops);
if (IS_ERR(ent))
@ -1285,6 +1349,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
i_size_write(ent->d_inode, adev->mc.gtt_size);
adev->mman.gtt = ent;
#endif
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
#ifdef CONFIG_SWIOTLB
@ -1306,7 +1371,10 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
debugfs_remove(adev->mman.vram);
adev->mman.vram = NULL;
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
debugfs_remove(adev->mman.gtt);
adev->mman.gtt = NULL;
#endif
#endif
}

View File

@ -54,6 +54,8 @@
#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
/**
* amdgpu_uvd_cs_ctx - Command submission parser context
@ -85,6 +87,8 @@ MODULE_FIRMWARE(FIRMWARE_TONGA);
MODULE_FIRMWARE(FIRMWARE_CARRIZO);
MODULE_FIRMWARE(FIRMWARE_FIJI);
MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
@ -131,6 +135,12 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
case CHIP_STONEY:
fw_name = FIRMWARE_STONEY;
break;
case CHIP_POLARIS10:
fw_name = FIRMWARE_POLARIS10;
break;
case CHIP_POLARIS11:
fw_name = FIRMWARE_POLARIS11;
break;
default:
return -EINVAL;
}
@ -151,6 +161,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return r;
}
/* Set the default UVD handles that the firmware can handle */
adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
@ -158,8 +171,19 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
version_major, version_minor, family_id);
/*
* Limit the number of UVD handles depending on microcode major
* and minor versions. The firmware version which has 40 UVD
* instances support is 1.80. So all subsequent versions should
* also have the same support.
*/
if ((version_major > 0x01) ||
((version_major == 0x01) && (version_minor >= 0x50)))
adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+ AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
@ -202,7 +226,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return r;
}
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
for (i = 0; i < adev->uvd.max_handles; ++i) {
atomic_set(&adev->uvd.handles[i], 0);
adev->uvd.filp[i] = NULL;
}
@ -248,7 +272,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (adev->uvd.vcpu_bo == NULL)
return 0;
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.handles[i]))
break;
@ -303,7 +327,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
struct amdgpu_ring *ring = &adev->uvd.ring;
int i, r;
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
if (handle != 0 && adev->uvd.filp[i] == filp) {
struct fence *fence;
@ -563,7 +587,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
amdgpu_bo_kunmap(bo);
/* try to alloc a new handle */
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.handles[i]) == handle) {
DRM_ERROR("Handle 0x%x already in use!\n", handle);
return -EINVAL;
@ -586,7 +610,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
return r;
/* validate the handle */
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.handles[i]) == handle) {
if (adev->uvd.filp[i] != ctx->parser->filp) {
DRM_ERROR("UVD handle collision detected!\n");
@ -601,7 +625,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
case 2:
/* it's a destroy msg, free the handle */
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
for (i = 0; i < adev->uvd.max_handles; ++i)
atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
amdgpu_bo_kunmap(bo);
return 0;
@ -1013,7 +1037,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.handles[i]))
++handles;

View File

@ -50,6 +50,8 @@
#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
#define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
#ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@ -62,6 +64,8 @@ MODULE_FIRMWARE(FIRMWARE_TONGA);
MODULE_FIRMWARE(FIRMWARE_CARRIZO);
MODULE_FIRMWARE(FIRMWARE_FIJI);
MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
@ -113,6 +117,12 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
case CHIP_STONEY:
fw_name = FIRMWARE_STONEY;
break;
case CHIP_POLARIS10:
fw_name = FIRMWARE_POLARIS10;
break;
case CHIP_POLARIS11:
fw_name = FIRMWARE_POLARIS11;
break;
default:
return -EINVAL;

View File

@ -166,74 +166,109 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
{
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vm_id *id = &vm->ids[ring->idx];
struct fence *updates = sync->last_vm_update;
struct amdgpu_vm_id *id;
unsigned i = ring->idx;
int r;
mutex_lock(&adev->vm_manager.lock);
/* check if the id is still valid */
if (id->mgr_id) {
struct fence *flushed = id->flushed_updates;
bool is_later;
long owner;
/* Check if we can use a VMID already assigned to this VM */
do {
struct fence *flushed;
if (!flushed)
is_later = true;
else if (!updates)
is_later = false;
else
is_later = fence_is_later(updates, flushed);
id = vm->ids[i++];
if (i == AMDGPU_MAX_RINGS)
i = 0;
owner = atomic_long_read(&id->mgr_id->owner);
if (!is_later && owner == (long)id &&
pd_addr == id->pd_gpu_addr) {
/* Check all the prerequisites to using this VMID */
if (!id)
continue;
if (atomic_long_read(&id->owner) != (long)vm)
continue;
if (pd_addr != id->pd_gpu_addr)
continue;
if (id->last_user != ring &&
(!id->last_flush || !fence_is_signaled(id->last_flush)))
continue;
flushed = id->flushed_updates;
if (updates && (!flushed || fence_is_later(updates, flushed)))
continue;
/* Good we can use this VMID */
if (id->last_user == ring) {
r = amdgpu_sync_fence(ring->adev, sync,
id->mgr_id->active);
if (r) {
mutex_unlock(&adev->vm_manager.lock);
return r;
id->first);
if (r)
goto error;
}
fence_put(id->mgr_id->active);
id->mgr_id->active = fence_get(fence);
/* And remember this submission as user of the VMID */
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
if (r)
goto error;
list_move_tail(&id->mgr_id->list,
&adev->vm_manager.ids_lru);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
vm->ids[ring->idx] = id;
*vm_id = id->mgr_id - adev->vm_manager.ids;
*vm_id = id - adev->vm_manager.ids;
*vm_pd_addr = AMDGPU_VM_NO_FLUSH;
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id,
*vm_pd_addr);
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
mutex_unlock(&adev->vm_manager.lock);
return 0;
}
}
id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru,
struct amdgpu_vm_manager_id,
} while (i != ring->idx);
id = list_first_entry(&adev->vm_manager.ids_lru,
struct amdgpu_vm_id,
list);
r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active);
if (!r) {
fence_put(id->mgr_id->active);
id->mgr_id->active = fence_get(fence);
if (!amdgpu_sync_is_idle(&id->active)) {
struct list_head *head = &adev->vm_manager.ids_lru;
struct amdgpu_vm_id *tmp;
list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru,
list) {
if (amdgpu_sync_is_idle(&id->active)) {
list_move(&id->list, head);
head = &id->list;
}
}
id = list_first_entry(&adev->vm_manager.ids_lru,
struct amdgpu_vm_id,
list);
}
r = amdgpu_sync_cycle_fences(sync, &id->active, fence);
if (r)
goto error;
fence_put(id->first);
id->first = fence_get(fence);
fence_put(id->last_flush);
id->last_flush = NULL;
fence_put(id->flushed_updates);
id->flushed_updates = fence_get(updates);
id->pd_gpu_addr = pd_addr;
list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru);
atomic_long_set(&id->mgr_id->owner, (long)id);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
id->last_user = ring;
atomic_long_set(&id->owner, (long)vm);
vm->ids[ring->idx] = id;
*vm_id = id->mgr_id - adev->vm_manager.ids;
*vm_id = id - adev->vm_manager.ids;
*vm_pd_addr = pd_addr;
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
}
error:
mutex_unlock(&adev->vm_manager.lock);
return r;
}
@ -247,43 +282,60 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
*
* Emit a VM flush when it is necessary.
*/
void amdgpu_vm_flush(struct amdgpu_ring *ring,
int amdgpu_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
uint32_t oa_base, uint32_t oa_size)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
mgr_id->gds_base != gds_base ||
mgr_id->gds_size != gds_size ||
mgr_id->gws_base != gws_base ||
mgr_id->gws_size != gws_size ||
mgr_id->oa_base != oa_base ||
mgr_id->oa_size != oa_size);
id->gds_base != gds_base ||
id->gds_size != gds_size ||
id->gws_base != gws_base ||
id->gws_size != gws_size ||
id->oa_base != oa_base ||
id->oa_size != oa_size);
int r;
if (ring->funcs->emit_pipeline_sync && (
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
amdgpu_ring_emit_pipeline_sync(ring);
if (pd_addr != AMDGPU_VM_NO_FLUSH) {
struct fence *fence;
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
mutex_lock(&adev->vm_manager.lock);
if ((id->pd_gpu_addr == pd_addr) && (id->last_user == ring)) {
r = amdgpu_fence_emit(ring, &fence);
if (r) {
mutex_unlock(&adev->vm_manager.lock);
return r;
}
fence_put(id->last_flush);
id->last_flush = fence;
}
mutex_unlock(&adev->vm_manager.lock);
}
if (gds_switch_needed) {
mgr_id->gds_base = gds_base;
mgr_id->gds_size = gds_size;
mgr_id->gws_base = gws_base;
mgr_id->gws_size = gws_size;
mgr_id->oa_base = oa_base;
mgr_id->oa_size = oa_size;
id->gds_base = gds_base;
id->gds_size = gds_size;
id->gws_base = gws_base;
id->gws_size = gws_size;
id->oa_base = oa_base;
id->oa_size = oa_size;
amdgpu_ring_emit_gds_switch(ring, vm_id,
gds_base, gds_size,
gws_base, gws_size,
oa_base, oa_size);
}
return 0;
}
/**
@ -296,14 +348,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
*/
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
{
struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
mgr_id->gds_base = 0;
mgr_id->gds_size = 0;
mgr_id->gws_base = 0;
mgr_id->gws_size = 0;
mgr_id->oa_base = 0;
mgr_id->oa_size = 0;
id->gds_base = 0;
id->gds_size = 0;
id->gws_base = 0;
id->gws_size = 0;
id->oa_base = 0;
id->oa_size = 0;
}
/**
@ -335,8 +387,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* amdgpu_vm_update_pages - helper to call the right asic function
*
* @adev: amdgpu_device pointer
* @gtt: GART instance to use for mapping
* @gtt_flags: GTT hw access flags
* @src: address where to copy page table entries from
* @pages_addr: DMA addresses to use for mapping
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
@ -348,8 +400,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* to setup the page table using the DMA.
*/
static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
struct amdgpu_gart *gtt,
uint32_t gtt_flags,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_ib *ib,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
@ -357,12 +409,11 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
{
trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
if ((gtt == &adev->gart) && (flags == gtt_flags)) {
uint64_t src = gtt->table_addr + (addr >> 12) * 8;
if (src) {
src += (addr >> 12) * 8;
amdgpu_vm_copy_pte(adev, ib, pe, src, count);
} else if (gtt) {
dma_addr_t *pages_addr = gtt->pages_addr;
} else if (pages_addr) {
amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
count, incr, flags);
@ -412,7 +463,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error;
amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries,
amdgpu_vm_update_pages(adev, 0, NULL, &job->ibs[0], addr, 0, entries,
0, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
@ -522,7 +573,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
((last_pt + incr * count) != pt)) {
if (count) {
amdgpu_vm_update_pages(adev, NULL, 0, ib,
amdgpu_vm_update_pages(adev, 0, NULL, ib,
last_pde, last_pt,
count, incr,
AMDGPU_PTE_VALID);
@ -537,7 +588,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
}
if (count)
amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt,
amdgpu_vm_update_pages(adev, 0, NULL, ib, last_pde, last_pt,
count, incr, AMDGPU_PTE_VALID);
if (ib->length_dw != 0) {
@ -570,8 +621,8 @@ error_free:
* amdgpu_vm_frag_ptes - add fragment information to PTEs
*
* @adev: amdgpu_device pointer
* @gtt: GART instance to use for mapping
* @gtt_flags: GTT hw mapping flags
* @src: address where to copy page table entries from
* @pages_addr: DMA addresses to use for mapping
* @ib: IB for the update
* @pe_start: first PTE to handle
* @pe_end: last PTE to handle
@ -579,8 +630,8 @@ error_free:
* @flags: hw mapping flags
*/
static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
struct amdgpu_gart *gtt,
uint32_t gtt_flags,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_ib *ib,
uint64_t pe_start, uint64_t pe_end,
uint64_t addr, uint32_t flags)
@ -618,10 +669,11 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
return;
/* system pages are non continuously */
if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
if (src || pages_addr || !(flags & AMDGPU_PTE_VALID) ||
(frag_start >= frag_end)) {
count = (pe_end - pe_start) / 8;
amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start,
amdgpu_vm_update_pages(adev, src, pages_addr, ib, pe_start,
addr, count, AMDGPU_GPU_PAGE_SIZE,
flags);
return;
@ -630,21 +682,21 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
/* handle the 4K area at the beginning */
if (pe_start != frag_start) {
count = (frag_start - pe_start) / 8;
amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr,
amdgpu_vm_update_pages(adev, 0, NULL, ib, pe_start, addr,
count, AMDGPU_GPU_PAGE_SIZE, flags);
addr += AMDGPU_GPU_PAGE_SIZE * count;
}
/* handle the area in the middle */
count = (frag_end - frag_start) / 8;
amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count,
amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_start, addr, count,
AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
/* handle the 4K area at the end */
if (frag_end != pe_end) {
addr += AMDGPU_GPU_PAGE_SIZE * count;
count = (pe_end - frag_end) / 8;
amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr,
amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_end, addr,
count, AMDGPU_GPU_PAGE_SIZE, flags);
}
}
@ -653,8 +705,8 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
* @adev: amdgpu_device pointer
* @gtt: GART instance to use for mapping
* @gtt_flags: GTT hw mapping flags
* @src: address where to copy page table entries from
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
@ -664,8 +716,8 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
* Update the page tables in the range @start - @end.
*/
static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
struct amdgpu_gart *gtt,
uint32_t gtt_flags,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_ib *ib,
uint64_t start, uint64_t end,
@ -693,7 +745,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
if (last_pe_end != pe_start) {
amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
amdgpu_vm_frag_ptes(adev, src, pages_addr, ib,
last_pe_start, last_pe_end,
last_dst, flags);
@ -708,17 +760,16 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
}
amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
last_pe_start, last_pe_end,
last_dst, flags);
amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, last_pe_start,
last_pe_end, last_dst, flags);
}
/**
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
* @gtt: GART instance to use for mapping
* @gtt_flags: flags as they are used for GTT
* @src: address where to copy page table entries from
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @start: start of mapped range
* @last: last mapped entry
@ -730,8 +781,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct amdgpu_gart *gtt,
uint32_t gtt_flags,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
uint32_t flags, uint64_t addr,
@ -762,11 +813,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* padding, etc. */
ndw = 64;
if ((gtt == &adev->gart) && (flags == gtt_flags)) {
if (src) {
/* only copy commands needed */
ndw += ncmds * 7;
} else if (gtt) {
} else if (pages_addr) {
/* header for write data commands */
ndw += ncmds * 4;
@ -796,8 +847,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1,
addr, flags);
amdgpu_vm_update_ptes(adev, src, pages_addr, vm, ib, start,
last + 1, addr, flags);
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > ndw);
@ -823,11 +874,12 @@ error_free:
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
*
* @adev: amdgpu_device pointer
* @gtt: GART instance to use for mapping
* @gtt_flags: flags as they are used for GTT
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
* @addr: addr to set the area to
* @gtt_flags: flags as they are used for GTT
* @flags: HW flags for the mapping
* @fence: optional resulting fence
*
* Split the mapping into smaller chunks so that each update fits
@ -835,16 +887,16 @@ error_free:
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct amdgpu_gart *gtt,
uint32_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
uint64_t addr, struct fence **fence)
uint32_t flags, uint64_t addr,
struct fence **fence)
{
const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
uint64_t start = mapping->it.start;
uint32_t flags = gtt_flags;
uint64_t src = 0, start = mapping->it.start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@ -857,10 +909,15 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_update(mapping);
if (pages_addr) {
if (flags == gtt_flags)
src = adev->gart.table_addr + (addr >> 12) * 8;
addr = 0;
}
addr += mapping->offset;
if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags)))
return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
if (!pages_addr || src)
return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
start, mapping->it.last,
flags, addr, fence);
@ -868,7 +925,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
uint64_t last;
last = min((uint64_t)mapping->it.last, start + max_size - 1);
r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
start, last, flags, addr,
fence);
if (r)
@ -899,16 +956,20 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
{
struct amdgpu_vm *vm = bo_va->vm;
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_gart *gtt = NULL;
uint32_t flags;
dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags;
uint64_t addr;
int r;
if (mem) {
struct ttm_dma_tt *ttm;
addr = (u64)mem->start << PAGE_SHIFT;
switch (mem->mem_type) {
case TTM_PL_TT:
gtt = &bo_va->bo->adev->gart;
ttm = container_of(bo_va->bo->tbo.ttm, struct
ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
break;
case TTM_PL_VRAM:
@ -923,6 +984,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
spin_lock(&vm->status_lock);
if (!list_empty(&bo_va->vm_status))
@ -930,7 +992,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_unlock(&vm->status_lock);
list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr,
r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm,
mapping, flags, addr,
&bo_va->last_pt_update);
if (r)
return r;
@ -976,8 +1039,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping,
0, NULL);
r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping,
0, 0, NULL);
kfree(mapping);
if (r)
return r;
@ -1320,10 +1383,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct amd_sched_rq *rq;
int i, r;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
vm->ids[i].mgr_id = NULL;
vm->ids[i].flushed_updates = NULL;
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
vm->ids[i] = NULL;
vm->va = RB_ROOT;
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated);
@ -1418,12 +1479,12 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
fence_put(vm->page_directory_fence);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_vm_id *id = &vm->ids[i];
struct amdgpu_vm_id *id = vm->ids[i];
if (id->mgr_id)
atomic_long_cmpxchg(&id->mgr_id->owner,
(long)id, 0);
fence_put(id->flushed_updates);
if (!id)
continue;
atomic_long_cmpxchg(&id->owner, (long)vm, 0);
}
}
@ -1443,6 +1504,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < adev->vm_manager.num_ids; ++i) {
amdgpu_vm_reset_id(adev, i);
amdgpu_sync_create(&adev->vm_manager.ids[i].active);
list_add_tail(&adev->vm_manager.ids[i].list,
&adev->vm_manager.ids_lru);
}
@ -1461,6 +1523,11 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
unsigned i;
for (i = 0; i < AMDGPU_NUM_VM; ++i)
fence_put(adev->vm_manager.ids[i].active);
for (i = 0; i < AMDGPU_NUM_VM; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
fence_put(adev->vm_manager.ids[i].first);
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
fence_put(id->flushed_updates);
}
}

View File

@ -461,6 +461,7 @@ union set_pixel_clock {
PIXEL_CLOCK_PARAMETERS_V3 v3;
PIXEL_CLOCK_PARAMETERS_V5 v5;
PIXEL_CLOCK_PARAMETERS_V6 v6;
PIXEL_CLOCK_PARAMETERS_V7 v7;
};
/* on DCE5, make sure the voltage is high enough to support the
@ -510,6 +511,49 @@ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
union set_dce_clock {
SET_DCE_CLOCK_PS_ALLOCATION_V1_1 v1_1;
SET_DCE_CLOCK_PS_ALLOCATION_V2_1 v2_1;
};
u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev,
u32 freq, u8 clk_type, u8 clk_src)
{
u8 frev, crev;
int index;
union set_dce_clock args;
u32 ret_freq = 0;
memset(&args, 0, sizeof(args));
index = GetIndexIntoMasterTable(COMMAND, SetDCEClock);
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev,
&crev))
return 0;
switch (frev) {
case 2:
switch (crev) {
case 1:
args.v2_1.asParam.ulDCEClkFreq = cpu_to_le32(freq); /* 10kHz units */
args.v2_1.asParam.ucDCEClkType = clk_type;
args.v2_1.asParam.ucDCEClkSrc = clk_src;
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
ret_freq = le32_to_cpu(args.v2_1.asParam.ulDCEClkFreq) * 10;
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return 0;
}
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return 0;
}
return ret_freq;
}
static bool is_pixel_clock_source_from_pll(u32 encoder_mode, int pll_id)
{
if (ENCODER_MODE_IS_DP(encoder_mode)) {
@ -652,6 +696,34 @@ void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v6.ucEncoderMode = encoder_mode;
args.v6.ucPpll = pll_id;
break;
case 7:
args.v7.ulPixelClock = cpu_to_le32(clock * 10); /* 100 hz units */
args.v7.ucMiscInfo = 0;
if ((encoder_mode == ATOM_ENCODER_MODE_DVI) &&
(clock > 165000))
args.v7.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
args.v7.ucCRTC = crtc_id;
if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
switch (bpc) {
case 8:
default:
args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS;
break;
case 10:
args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4;
break;
case 12:
args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2;
break;
case 16:
args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1;
break;
}
}
args.v7.ucTransmitterID = encoder_id;
args.v7.ucEncoderMode = encoder_mode;
args.v7.ucPpll = pll_id;
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;

View File

@ -37,6 +37,8 @@ void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
struct drm_display_mode *mode);
void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
u32 dispclk);
u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev,
u32 freq, u8 clk_type, u8 clk_src);
void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
u32 crtc_id,
int pll_id,

View File

@ -563,6 +563,7 @@ union dig_encoder_control {
DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
DIG_ENCODER_CONTROL_PARAMETERS_V5 v5;
};
void
@ -690,6 +691,47 @@ amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
else
args.v4.ucHPD_ID = hpd_id + 1;
break;
case 5:
switch (action) {
case ATOM_ENCODER_CMD_SETUP_PANEL_MODE:
args.v5.asDPPanelModeParam.ucAction = action;
args.v5.asDPPanelModeParam.ucPanelMode = panel_mode;
args.v5.asDPPanelModeParam.ucDigId = dig->dig_encoder;
break;
case ATOM_ENCODER_CMD_STREAM_SETUP:
args.v5.asStreamParam.ucAction = action;
args.v5.asStreamParam.ucDigId = dig->dig_encoder;
args.v5.asStreamParam.ucDigMode =
amdgpu_atombios_encoder_get_encoder_mode(encoder);
if (ENCODER_MODE_IS_DP(args.v5.asStreamParam.ucDigMode))
args.v5.asStreamParam.ucLaneNum = dp_lane_count;
else if (amdgpu_dig_monitor_is_duallink(encoder,
amdgpu_encoder->pixel_clock))
args.v5.asStreamParam.ucLaneNum = 8;
else
args.v5.asStreamParam.ucLaneNum = 4;
args.v5.asStreamParam.ulPixelClock =
cpu_to_le32(amdgpu_encoder->pixel_clock / 10);
args.v5.asStreamParam.ucBitPerColor =
amdgpu_atombios_encoder_get_bpc(encoder);
args.v5.asStreamParam.ucLinkRateIn270Mhz = dp_clock / 27000;
break;
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_START:
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1:
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2:
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3:
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN4:
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE:
case ATOM_ENCODER_CMD_DP_VIDEO_OFF:
case ATOM_ENCODER_CMD_DP_VIDEO_ON:
args.v5.asCmdParam.ucAction = action;
args.v5.asCmdParam.ucDigId = dig->dig_encoder;
break;
default:
DRM_ERROR("Unsupported action 0x%x\n", action);
break;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
@ -710,6 +752,7 @@ union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6 v6;
};
void
@ -1066,6 +1109,54 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
args.v5.ucDigEncoderSel = 1 << dig_encoder;
args.v5.ucDPLaneSet = lane_set;
break;
case 6:
args.v6.ucAction = action;
if (is_dp)
args.v6.ulSymClock = cpu_to_le32(dp_clock / 10);
else
args.v6.ulSymClock = cpu_to_le32(amdgpu_encoder->pixel_clock / 10);
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYB;
else
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYA;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYD;
else
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYC;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYF;
else
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYE;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYG;
break;
}
if (is_dp)
args.v6.ucLaneNum = dp_lane_count;
else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v6.ucLaneNum = 8;
else
args.v6.ucLaneNum = 4;
args.v6.ucConnObjId = connector_object_id;
if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH)
args.v6.ucDPLaneSet = lane_set;
else
args.v6.ucDigMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
if (hpd_id == AMDGPU_HPD_NONE)
args.v6.ucHPDSel = 0;
else
args.v6.ucHPDSel = hpd_id + 1;
args.v6.ucDigEncoderSel = 1 << dig_encoder;
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;

View File

@ -6309,215 +6309,6 @@ static int ci_dpm_wait_for_idle(void *handle)
return 0;
}
static void ci_dpm_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "CIK DPM registers\n");
dev_info(adev->dev, " BIOS_SCRATCH_4=0x%08X\n",
RREG32(mmBIOS_SCRATCH_4));
dev_info(adev->dev, " MC_ARB_DRAM_TIMING=0x%08X\n",
RREG32(mmMC_ARB_DRAM_TIMING));
dev_info(adev->dev, " MC_ARB_DRAM_TIMING2=0x%08X\n",
RREG32(mmMC_ARB_DRAM_TIMING2));
dev_info(adev->dev, " MC_ARB_BURST_TIME=0x%08X\n",
RREG32(mmMC_ARB_BURST_TIME));
dev_info(adev->dev, " MC_ARB_DRAM_TIMING_1=0x%08X\n",
RREG32(mmMC_ARB_DRAM_TIMING_1));
dev_info(adev->dev, " MC_ARB_DRAM_TIMING2_1=0x%08X\n",
RREG32(mmMC_ARB_DRAM_TIMING2_1));
dev_info(adev->dev, " MC_CG_CONFIG=0x%08X\n",
RREG32(mmMC_CG_CONFIG));
dev_info(adev->dev, " MC_ARB_CG=0x%08X\n",
RREG32(mmMC_ARB_CG));
dev_info(adev->dev, " DIDT_SQ_CTRL0=0x%08X\n",
RREG32_DIDT(ixDIDT_SQ_CTRL0));
dev_info(adev->dev, " DIDT_DB_CTRL0=0x%08X\n",
RREG32_DIDT(ixDIDT_DB_CTRL0));
dev_info(adev->dev, " DIDT_TD_CTRL0=0x%08X\n",
RREG32_DIDT(ixDIDT_TD_CTRL0));
dev_info(adev->dev, " DIDT_TCP_CTRL0=0x%08X\n",
RREG32_DIDT(ixDIDT_TCP_CTRL0));
dev_info(adev->dev, " CG_THERMAL_INT=0x%08X\n",
RREG32_SMC(ixCG_THERMAL_INT));
dev_info(adev->dev, " CG_THERMAL_CTRL=0x%08X\n",
RREG32_SMC(ixCG_THERMAL_CTRL));
dev_info(adev->dev, " GENERAL_PWRMGT=0x%08X\n",
RREG32_SMC(ixGENERAL_PWRMGT));
dev_info(adev->dev, " MC_SEQ_CNTL_3=0x%08X\n",
RREG32(mmMC_SEQ_CNTL_3));
dev_info(adev->dev, " LCAC_MC0_CNTL=0x%08X\n",
RREG32_SMC(ixLCAC_MC0_CNTL));
dev_info(adev->dev, " LCAC_MC1_CNTL=0x%08X\n",
RREG32_SMC(ixLCAC_MC1_CNTL));
dev_info(adev->dev, " LCAC_CPL_CNTL=0x%08X\n",
RREG32_SMC(ixLCAC_CPL_CNTL));
dev_info(adev->dev, " SCLK_PWRMGT_CNTL=0x%08X\n",
RREG32_SMC(ixSCLK_PWRMGT_CNTL));
dev_info(adev->dev, " BIF_LNCNT_RESET=0x%08X\n",
RREG32(mmBIF_LNCNT_RESET));
dev_info(adev->dev, " FIRMWARE_FLAGS=0x%08X\n",
RREG32_SMC(ixFIRMWARE_FLAGS));
dev_info(adev->dev, " CG_SPLL_FUNC_CNTL=0x%08X\n",
RREG32_SMC(ixCG_SPLL_FUNC_CNTL));
dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_2=0x%08X\n",
RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2));
dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_3=0x%08X\n",
RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3));
dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_4=0x%08X\n",
RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4));
dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM=0x%08X\n",
RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM));
dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM_2=0x%08X\n",
RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2));
dev_info(adev->dev, " DLL_CNTL=0x%08X\n",
RREG32(mmDLL_CNTL));
dev_info(adev->dev, " MCLK_PWRMGT_CNTL=0x%08X\n",
RREG32(mmMCLK_PWRMGT_CNTL));
dev_info(adev->dev, " MPLL_AD_FUNC_CNTL=0x%08X\n",
RREG32(mmMPLL_AD_FUNC_CNTL));
dev_info(adev->dev, " MPLL_DQ_FUNC_CNTL=0x%08X\n",
RREG32(mmMPLL_DQ_FUNC_CNTL));
dev_info(adev->dev, " MPLL_FUNC_CNTL=0x%08X\n",
RREG32(mmMPLL_FUNC_CNTL));
dev_info(adev->dev, " MPLL_FUNC_CNTL_1=0x%08X\n",
RREG32(mmMPLL_FUNC_CNTL_1));
dev_info(adev->dev, " MPLL_FUNC_CNTL_2=0x%08X\n",
RREG32(mmMPLL_FUNC_CNTL_2));
dev_info(adev->dev, " MPLL_SS1=0x%08X\n",
RREG32(mmMPLL_SS1));
dev_info(adev->dev, " MPLL_SS2=0x%08X\n",
RREG32(mmMPLL_SS2));
dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL=0x%08X\n",
RREG32_SMC(ixCG_DISPLAY_GAP_CNTL));
dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL2=0x%08X\n",
RREG32_SMC(ixCG_DISPLAY_GAP_CNTL2));
dev_info(adev->dev, " CG_STATIC_SCREEN_PARAMETER=0x%08X\n",
RREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_0=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_1=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_1));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_2=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_2));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_3=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_3));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_4=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_4));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_5=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_5));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_6=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_6));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_7=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_7));
dev_info(adev->dev, " RCU_UC_EVENTS=0x%08X\n",
RREG32_SMC(ixRCU_UC_EVENTS));
dev_info(adev->dev, " DPM_TABLE_475=0x%08X\n",
RREG32_SMC(ixDPM_TABLE_475));
dev_info(adev->dev, " MC_SEQ_RAS_TIMING_LP=0x%08X\n",
RREG32(mmMC_SEQ_RAS_TIMING_LP));
dev_info(adev->dev, " MC_SEQ_RAS_TIMING=0x%08X\n",
RREG32(mmMC_SEQ_RAS_TIMING));
dev_info(adev->dev, " MC_SEQ_CAS_TIMING_LP=0x%08X\n",
RREG32(mmMC_SEQ_CAS_TIMING_LP));
dev_info(adev->dev, " MC_SEQ_CAS_TIMING=0x%08X\n",
RREG32(mmMC_SEQ_CAS_TIMING));
dev_info(adev->dev, " MC_SEQ_DLL_STBY_LP=0x%08X\n",
RREG32(mmMC_SEQ_DLL_STBY_LP));
dev_info(adev->dev, " MC_SEQ_DLL_STBY=0x%08X\n",
RREG32(mmMC_SEQ_DLL_STBY));
dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0_LP=0x%08X\n",
RREG32(mmMC_SEQ_G5PDX_CMD0_LP));
dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0=0x%08X\n",
RREG32(mmMC_SEQ_G5PDX_CMD0));
dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1_LP=0x%08X\n",
RREG32(mmMC_SEQ_G5PDX_CMD1_LP));
dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1=0x%08X\n",
RREG32(mmMC_SEQ_G5PDX_CMD1));
dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL_LP=0x%08X\n",
RREG32(mmMC_SEQ_G5PDX_CTRL_LP));
dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL=0x%08X\n",
RREG32(mmMC_SEQ_G5PDX_CTRL));
dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD_LP=0x%08X\n",
RREG32(mmMC_SEQ_PMG_DVS_CMD_LP));
dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD=0x%08X\n",
RREG32(mmMC_SEQ_PMG_DVS_CMD));
dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL_LP=0x%08X\n",
RREG32(mmMC_SEQ_PMG_DVS_CTL_LP));
dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL=0x%08X\n",
RREG32(mmMC_SEQ_PMG_DVS_CTL));
dev_info(adev->dev, " MC_SEQ_MISC_TIMING_LP=0x%08X\n",
RREG32(mmMC_SEQ_MISC_TIMING_LP));
dev_info(adev->dev, " MC_SEQ_MISC_TIMING=0x%08X\n",
RREG32(mmMC_SEQ_MISC_TIMING));
dev_info(adev->dev, " MC_SEQ_MISC_TIMING2_LP=0x%08X\n",
RREG32(mmMC_SEQ_MISC_TIMING2_LP));
dev_info(adev->dev, " MC_SEQ_MISC_TIMING2=0x%08X\n",
RREG32(mmMC_SEQ_MISC_TIMING2));
dev_info(adev->dev, " MC_SEQ_PMG_CMD_EMRS_LP=0x%08X\n",
RREG32(mmMC_SEQ_PMG_CMD_EMRS_LP));
dev_info(adev->dev, " MC_PMG_CMD_EMRS=0x%08X\n",
RREG32(mmMC_PMG_CMD_EMRS));
dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS_LP=0x%08X\n",
RREG32(mmMC_SEQ_PMG_CMD_MRS_LP));
dev_info(adev->dev, " MC_PMG_CMD_MRS=0x%08X\n",
RREG32(mmMC_PMG_CMD_MRS));
dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS1_LP=0x%08X\n",
RREG32(mmMC_SEQ_PMG_CMD_MRS1_LP));
dev_info(adev->dev, " MC_PMG_CMD_MRS1=0x%08X\n",
RREG32(mmMC_PMG_CMD_MRS1));
dev_info(adev->dev, " MC_SEQ_WR_CTL_D0_LP=0x%08X\n",
RREG32(mmMC_SEQ_WR_CTL_D0_LP));
dev_info(adev->dev, " MC_SEQ_WR_CTL_D0=0x%08X\n",
RREG32(mmMC_SEQ_WR_CTL_D0));
dev_info(adev->dev, " MC_SEQ_WR_CTL_D1_LP=0x%08X\n",
RREG32(mmMC_SEQ_WR_CTL_D1_LP));
dev_info(adev->dev, " MC_SEQ_WR_CTL_D1=0x%08X\n",
RREG32(mmMC_SEQ_WR_CTL_D1));
dev_info(adev->dev, " MC_SEQ_RD_CTL_D0_LP=0x%08X\n",
RREG32(mmMC_SEQ_RD_CTL_D0_LP));
dev_info(adev->dev, " MC_SEQ_RD_CTL_D0=0x%08X\n",
RREG32(mmMC_SEQ_RD_CTL_D0));
dev_info(adev->dev, " MC_SEQ_RD_CTL_D1_LP=0x%08X\n",
RREG32(mmMC_SEQ_RD_CTL_D1_LP));
dev_info(adev->dev, " MC_SEQ_RD_CTL_D1=0x%08X\n",
RREG32(mmMC_SEQ_RD_CTL_D1));
dev_info(adev->dev, " MC_SEQ_PMG_TIMING_LP=0x%08X\n",
RREG32(mmMC_SEQ_PMG_TIMING_LP));
dev_info(adev->dev, " MC_SEQ_PMG_TIMING=0x%08X\n",
RREG32(mmMC_SEQ_PMG_TIMING));
dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS2_LP=0x%08X\n",
RREG32(mmMC_SEQ_PMG_CMD_MRS2_LP));
dev_info(adev->dev, " MC_PMG_CMD_MRS2=0x%08X\n",
RREG32(mmMC_PMG_CMD_MRS2));
dev_info(adev->dev, " MC_SEQ_WR_CTL_2_LP=0x%08X\n",
RREG32(mmMC_SEQ_WR_CTL_2_LP));
dev_info(adev->dev, " MC_SEQ_WR_CTL_2=0x%08X\n",
RREG32(mmMC_SEQ_WR_CTL_2));
dev_info(adev->dev, " PCIE_LC_SPEED_CNTL=0x%08X\n",
RREG32_PCIE(ixPCIE_LC_SPEED_CNTL));
dev_info(adev->dev, " PCIE_LC_LINK_WIDTH_CNTL=0x%08X\n",
RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL));
dev_info(adev->dev, " SMC_IND_INDEX_0=0x%08X\n",
RREG32(mmSMC_IND_INDEX_0));
dev_info(adev->dev, " SMC_IND_DATA_0=0x%08X\n",
RREG32(mmSMC_IND_DATA_0));
dev_info(adev->dev, " SMC_IND_ACCESS_CNTL=0x%08X\n",
RREG32(mmSMC_IND_ACCESS_CNTL));
dev_info(adev->dev, " SMC_RESP_0=0x%08X\n",
RREG32(mmSMC_RESP_0));
dev_info(adev->dev, " SMC_MESSAGE_0=0x%08X\n",
RREG32(mmSMC_MESSAGE_0));
dev_info(adev->dev, " SMC_SYSCON_RESET_CNTL=0x%08X\n",
RREG32_SMC(ixSMC_SYSCON_RESET_CNTL));
dev_info(adev->dev, " SMC_SYSCON_CLOCK_CNTL_0=0x%08X\n",
RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0));
dev_info(adev->dev, " SMC_SYSCON_MISC_CNTL=0x%08X\n",
RREG32_SMC(ixSMC_SYSCON_MISC_CNTL));
dev_info(adev->dev, " SMC_PC_C=0x%08X\n",
RREG32_SMC(ixSMC_PC_C));
}
static int ci_dpm_soft_reset(void *handle)
{
return 0;
@ -6625,7 +6416,6 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
.is_idle = ci_dpm_is_idle,
.wait_for_idle = ci_dpm_wait_for_idle,
.soft_reset = ci_dpm_soft_reset,
.print_status = ci_dpm_print_status,
.set_clockgating_state = ci_dpm_set_clockgating_state,
.set_powergating_state = ci_dpm_set_powergating_state,
};

View File

@ -962,7 +962,7 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS, false},
{mmGB_ADDR_CONFIG, false},
{mmMC_ARB_RAMCFG, false},
@ -2214,11 +2214,6 @@ static int cik_common_wait_for_idle(void *handle)
return 0;
}
static void cik_common_print_status(void *handle)
{
}
static int cik_common_soft_reset(void *handle)
{
/* XXX hard reset?? */
@ -2249,7 +2244,6 @@ const struct amd_ip_funcs cik_common_ip_funcs = {
.is_idle = cik_common_is_idle,
.wait_for_idle = cik_common_wait_for_idle,
.soft_reset = cik_common_soft_reset,
.print_status = cik_common_print_status,
.set_clockgating_state = cik_common_set_clockgating_state,
.set_powergating_state = cik_common_set_powergating_state,
};

View File

@ -372,35 +372,6 @@ static int cik_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
static void cik_ih_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "CIK IH registers\n");
dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(mmSRBM_STATUS));
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2));
dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n",
RREG32(mmINTERRUPT_CNTL));
dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n",
RREG32(mmINTERRUPT_CNTL2));
dev_info(adev->dev, " IH_CNTL=0x%08X\n",
RREG32(mmIH_CNTL));
dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n",
RREG32(mmIH_RB_CNTL));
dev_info(adev->dev, " IH_RB_BASE=0x%08X\n",
RREG32(mmIH_RB_BASE));
dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n",
RREG32(mmIH_RB_WPTR_ADDR_LO));
dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n",
RREG32(mmIH_RB_WPTR_ADDR_HI));
dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n",
RREG32(mmIH_RB_RPTR));
dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n",
RREG32(mmIH_RB_WPTR));
}
static int cik_ih_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -412,8 +383,6 @@ static int cik_ih_soft_reset(void *handle)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
if (srbm_soft_reset) {
cik_ih_print_status((void *)adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -428,8 +397,6 @@ static int cik_ih_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
cik_ih_print_status((void *)adev);
}
return 0;
@ -459,7 +426,6 @@ const struct amd_ip_funcs cik_ih_ip_funcs = {
.is_idle = cik_ih_is_idle,
.wait_for_idle = cik_ih_wait_for_idle,
.soft_reset = cik_ih_soft_reset,
.print_status = cik_ih_print_status,
.set_clockgating_state = cik_ih_set_clockgating_state,
.set_powergating_state = cik_ih_set_powergating_state,
};

View File

@ -976,7 +976,7 @@ static int cik_sdma_sw_init(void *handle)
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 256 * 1024,
r = amdgpu_ring_init(adev, ring, 1024,
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
@ -1064,57 +1064,6 @@ static int cik_sdma_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
static void cik_sdma_print_status(void *handle)
{
int i, j;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "CIK SDMA registers\n");
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2));
for (i = 0; i < adev->sdma.num_instances; i++) {
dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n",
i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
mutex_lock(&adev->srbm_mutex);
for (j = 0; j < 16; j++) {
cik_srbm_select(adev, 0, 0, 0, j);
dev_info(adev->dev, " VM %d:\n", j);
dev_info(adev->dev, " SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n",
RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
dev_info(adev->dev, " SDMA0_GFX_APE1_CNTL=0x%08X\n",
RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
}
cik_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
}
static int cik_sdma_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
@ -1137,8 +1086,6 @@ static int cik_sdma_soft_reset(void *handle)
}
if (srbm_soft_reset) {
cik_sdma_print_status((void *)adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -1153,8 +1100,6 @@ static int cik_sdma_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
cik_sdma_print_status((void *)adev);
}
return 0;
@ -1289,7 +1234,6 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = {
.is_idle = cik_sdma_is_idle,
.wait_for_idle = cik_sdma_wait_for_idle,
.soft_reset = cik_sdma_soft_reset,
.print_status = cik_sdma_print_status,
.set_clockgating_state = cik_sdma_set_clockgating_state,
.set_powergating_state = cik_sdma_set_powergating_state,
};

View File

@ -2241,7 +2241,6 @@ const struct amd_ip_funcs cz_dpm_ip_funcs = {
.is_idle = NULL,
.wait_for_idle = NULL,
.soft_reset = NULL,
.print_status = NULL,
.set_clockgating_state = cz_dpm_set_clockgating_state,
.set_powergating_state = cz_dpm_set_powergating_state,
};

View File

@ -351,35 +351,6 @@ static int cz_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
static void cz_ih_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "CZ IH registers\n");
dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(mmSRBM_STATUS));
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2));
dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n",
RREG32(mmINTERRUPT_CNTL));
dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n",
RREG32(mmINTERRUPT_CNTL2));
dev_info(adev->dev, " IH_CNTL=0x%08X\n",
RREG32(mmIH_CNTL));
dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n",
RREG32(mmIH_RB_CNTL));
dev_info(adev->dev, " IH_RB_BASE=0x%08X\n",
RREG32(mmIH_RB_BASE));
dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n",
RREG32(mmIH_RB_WPTR_ADDR_LO));
dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n",
RREG32(mmIH_RB_WPTR_ADDR_HI));
dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n",
RREG32(mmIH_RB_RPTR));
dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n",
RREG32(mmIH_RB_WPTR));
}
static int cz_ih_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
@ -391,8 +362,6 @@ static int cz_ih_soft_reset(void *handle)
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
cz_ih_print_status((void *)adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -407,8 +376,6 @@ static int cz_ih_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
cz_ih_print_status((void *)adev);
}
return 0;
@ -440,7 +407,6 @@ const struct amd_ip_funcs cz_ih_ip_funcs = {
.is_idle = cz_ih_is_idle,
.wait_for_idle = cz_ih_wait_for_idle,
.soft_reset = cz_ih_soft_reset,
.print_status = cz_ih_print_status,
.set_clockgating_state = cz_ih_set_clockgating_state,
.set_powergating_state = cz_ih_set_powergating_state,
};

View File

@ -3130,14 +3130,6 @@ static int dce_v10_0_wait_for_idle(void *handle)
return 0;
}
static void dce_v10_0_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "DCE 10.x registers\n");
/* XXX todo */
}
static int dce_v10_0_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0, tmp;
@ -3147,8 +3139,6 @@ static int dce_v10_0_soft_reset(void *handle)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
if (srbm_soft_reset) {
dce_v10_0_print_status((void *)adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -3163,7 +3153,6 @@ static int dce_v10_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
dce_v10_0_print_status((void *)adev);
}
return 0;
}
@ -3512,7 +3501,6 @@ const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.is_idle = dce_v10_0_is_idle,
.wait_for_idle = dce_v10_0_wait_for_idle,
.soft_reset = dce_v10_0_soft_reset,
.print_status = dce_v10_0_print_status,
.set_clockgating_state = dce_v10_0_set_clockgating_state,
.set_powergating_state = dce_v10_0_set_powergating_state,
};

View File

@ -132,6 +132,22 @@ static const u32 stoney_golden_settings_a11[] =
mmFBC_MISC, 0x1f311fff, 0x14302000,
};
static const u32 polaris11_golden_settings_a11[] =
{
mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
mmFBC_DEBUG1, 0xffffffff, 0x00000008,
mmFBC_MISC, 0x9f313fff, 0x14300008,
mmHDMI_CONTROL, 0x313f031f, 0x00000011,
};
static const u32 polaris10_golden_settings_a11[] =
{
mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
mmFBC_MISC, 0x9f313fff, 0x14300008,
mmHDMI_CONTROL, 0x313f031f, 0x00000011,
};
static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
{
@ -149,6 +165,16 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
stoney_golden_settings_a11,
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
break;
case CHIP_POLARIS11:
amdgpu_program_register_sequence(adev,
polaris11_golden_settings_a11,
(const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
break;
case CHIP_POLARIS10:
amdgpu_program_register_sequence(adev,
polaris10_golden_settings_a11,
(const u32)ARRAY_SIZE(polaris10_golden_settings_a11));
break;
default:
break;
}
@ -565,35 +591,14 @@ static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
CRTC_CONTROL, CRTC_MASTER_EN);
if (crtc_enabled) {
#if 0
u32 frame_count;
int j;
#if 1
save->crtc_enabled[i] = true;
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
amdgpu_display_vblank_wait(adev, i);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
/*it is correct only for RGB ; black is 0*/
WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = amdgpu_display_vblank_get_counter(adev, i);
for (j = 0; j < adev->usec_timeout; j++) {
if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
break;
udelay(1);
}
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
}
tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
#else
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
@ -614,54 +619,20 @@ static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
u32 tmp, frame_count;
int i, j;
u32 tmp;
int i;
/* update crtc base addresses */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
if (save->crtc_enabled[i]) {
tmp = RREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
WREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
}
tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
for (j = 0; j < adev->usec_timeout; j++) {
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
break;
udelay(1);
}
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
/* wait for the next frame */
frame_count = amdgpu_display_vblank_get_counter(adev, i);
for (j = 0; j < adev->usec_timeout; j++) {
if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
break;
udelay(1);
}
}
}
@ -1635,7 +1606,20 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
adev->mode_info.audio.enabled = true;
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
adev->mode_info.audio.num_pins = 7;
break;
case CHIP_POLARIS10:
adev->mode_info.audio.num_pins = 8;
break;
case CHIP_POLARIS11:
adev->mode_info.audio.num_pins = 6;
break;
default:
return -EINVAL;
}
for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
adev->mode_info.audio.pin[i].channels = -1;
@ -2427,6 +2411,44 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
u32 pll_in_use;
int pll;
if ((adev->asic_type == CHIP_POLARIS10) ||
(adev->asic_type == CHIP_POLARIS11)) {
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
return ATOM_DP_DTO;
/* use the same PPLL for all monitors with the same clock */
pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
return ATOM_COMBOPHY_PLL1;
else
return ATOM_COMBOPHY_PLL0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
return ATOM_COMBOPHY_PLL3;
else
return ATOM_COMBOPHY_PLL2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
return ATOM_COMBOPHY_PLL5;
else
return ATOM_COMBOPHY_PLL4;
break;
default:
DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
return ATOM_PPLL_INVALID;
}
}
if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
if (adev->clock.dp_extclk)
/* skip PPLL programming if using ext clock */
@ -2784,6 +2806,16 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
case ATOM_COMBOPHY_PLL0:
case ATOM_COMBOPHY_PLL1:
case ATOM_COMBOPHY_PLL2:
case ATOM_COMBOPHY_PLL3:
case ATOM_COMBOPHY_PLL4:
case ATOM_COMBOPHY_PLL5:
/* disable the ppll */
amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
default:
break;
}
@ -2800,11 +2832,28 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
if (!amdgpu_crtc->adjusted_clock)
return -EINVAL;
if ((adev->asic_type == CHIP_POLARIS10) ||
(adev->asic_type == CHIP_POLARIS11)) {
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
int encoder_mode =
amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
/* SetPixelClock calculates the plls and ss values now */
amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
amdgpu_crtc->pll_id,
encoder_mode, amdgpu_encoder->encoder_id,
adjusted_mode->clock, 0, 0, 0, 0,
amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
} else {
amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
}
amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
@ -2955,6 +3004,16 @@ static int dce_v11_0_early_init(void *handle)
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
break;
case CHIP_POLARIS10:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_POLARIS11:
adev->mode_info.num_crtc = 5;
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@ -3057,7 +3116,15 @@ static int dce_v11_0_hw_init(void *handle)
/* init dig PHYs, disp eng pll */
amdgpu_atombios_crtc_powergate_init(adev);
amdgpu_atombios_encoder_init_dig(adev);
if ((adev->asic_type == CHIP_POLARIS10) ||
(adev->asic_type == CHIP_POLARIS11)) {
amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
amdgpu_atombios_crtc_set_dce_clock(adev, 0,
DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
} else {
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
}
/* initialize hpd */
dce_v11_0_hpd_init(adev);
@ -3126,14 +3193,6 @@ static int dce_v11_0_wait_for_idle(void *handle)
return 0;
}
static void dce_v11_0_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "DCE 10.x registers\n");
/* XXX todo */
}
static int dce_v11_0_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0, tmp;
@ -3143,8 +3202,6 @@ static int dce_v11_0_soft_reset(void *handle)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
if (srbm_soft_reset) {
dce_v11_0_print_status((void *)adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -3159,7 +3216,6 @@ static int dce_v11_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
dce_v11_0_print_status((void *)adev);
}
return 0;
}
@ -3508,7 +3564,6 @@ const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.is_idle = dce_v11_0_is_idle,
.wait_for_idle = dce_v11_0_wait_for_idle,
.soft_reset = dce_v11_0_soft_reset,
.print_status = dce_v11_0_print_status,
.set_clockgating_state = dce_v11_0_set_clockgating_state,
.set_powergating_state = dce_v11_0_set_powergating_state,
};

View File

@ -3038,14 +3038,6 @@ static int dce_v8_0_wait_for_idle(void *handle)
return 0;
}
static void dce_v8_0_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "DCE 8.x registers\n");
/* XXX todo */
}
static int dce_v8_0_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0, tmp;
@ -3055,8 +3047,6 @@ static int dce_v8_0_soft_reset(void *handle)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
if (srbm_soft_reset) {
dce_v8_0_print_status((void *)adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -3071,7 +3061,6 @@ static int dce_v8_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
dce_v8_0_print_status((void *)adev);
}
return 0;
}
@ -3442,7 +3431,6 @@ const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.is_idle = dce_v8_0_is_idle,
.wait_for_idle = dce_v8_0_wait_for_idle,
.soft_reset = dce_v8_0_soft_reset,
.print_status = dce_v8_0_print_status,
.set_clockgating_state = dce_v8_0_set_clockgating_state,
.set_powergating_state = dce_v8_0_set_powergating_state,
};

View File

@ -154,7 +154,6 @@ const struct amd_ip_funcs fiji_dpm_ip_funcs = {
.is_idle = NULL,
.wait_for_idle = NULL,
.soft_reset = NULL,
.print_status = NULL,
.set_clockgating_state = fiji_dpm_set_clockgating_state,
.set_powergating_state = fiji_dpm_set_powergating_state,
};

View File

@ -4414,7 +4414,7 @@ static int gfx_v7_0_sw_init(void *handle)
ring = &adev->gfx.gfx_ring[i];
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024 * 1024,
r = amdgpu_ring_init(adev, ring, 1024,
PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
AMDGPU_RING_TYPE_GFX);
@ -4438,10 +4438,10 @@ static int gfx_v7_0_sw_init(void *handle)
ring->me = 1; /* first MEC */
ring->pipe = i / 8;
ring->queue = i % 8;
sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024 * 1024,
r = amdgpu_ring_init(adev, ring, 1024,
PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
&adev->gfx.eop_irq, irq_type,
AMDGPU_RING_TYPE_COMPUTE);
@ -4572,256 +4572,6 @@ static int gfx_v7_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
static void gfx_v7_0_print_status(void *handle)
{
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "GFX 7.x registers\n");
dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(mmGRBM_STATUS));
dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
RREG32(mmGRBM_STATUS2));
dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
RREG32(mmGRBM_STATUS_SE0));
dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
RREG32(mmGRBM_STATUS_SE1));
dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
RREG32(mmGRBM_STATUS_SE2));
dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
RREG32(mmGRBM_STATUS_SE3));
dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
RREG32(mmCP_STALLED_STAT1));
dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
RREG32(mmCP_STALLED_STAT2));
dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
RREG32(mmCP_STALLED_STAT3));
dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
RREG32(mmCP_CPF_BUSY_STAT));
dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
RREG32(mmCP_CPF_STALLED_STAT1));
dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
RREG32(mmCP_CPC_STALLED_STAT1));
dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
for (i = 0; i < 32; i++) {
dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n",
i, RREG32(mmGB_TILE_MODE0 + (i * 4)));
}
for (i = 0; i < 16; i++) {
dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n",
i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4)));
}
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
dev_info(adev->dev, " se: %d\n", i);
gfx_v7_0_select_se_sh(adev, i, 0xffffffff);
dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n",
RREG32(mmPA_SC_RASTER_CONFIG));
dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n",
RREG32(mmPA_SC_RASTER_CONFIG_1));
}
gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n",
RREG32(mmGB_ADDR_CONFIG));
dev_info(adev->dev, " HDP_ADDR_CONFIG=0x%08X\n",
RREG32(mmHDP_ADDR_CONFIG));
dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n",
RREG32(mmDMIF_ADDR_CALC));
dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n",
RREG32(mmCP_MEQ_THRESHOLDS));
dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n",
RREG32(mmSX_DEBUG_1));
dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n",
RREG32(mmTA_CNTL_AUX));
dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n",
RREG32(mmSPI_CONFIG_CNTL));
dev_info(adev->dev, " SQ_CONFIG=0x%08X\n",
RREG32(mmSQ_CONFIG));
dev_info(adev->dev, " DB_DEBUG=0x%08X\n",
RREG32(mmDB_DEBUG));
dev_info(adev->dev, " DB_DEBUG2=0x%08X\n",
RREG32(mmDB_DEBUG2));
dev_info(adev->dev, " DB_DEBUG3=0x%08X\n",
RREG32(mmDB_DEBUG3));
dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n",
RREG32(mmCB_HW_CONTROL));
dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n",
RREG32(mmSPI_CONFIG_CNTL_1));
dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n",
RREG32(mmPA_SC_FIFO_SIZE));
dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n",
RREG32(mmVGT_NUM_INSTANCES));
dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n",
RREG32(mmCP_PERFMON_CNTL));
dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS));
dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n",
RREG32(mmVGT_CACHE_INVALIDATION));
dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n",
RREG32(mmVGT_GS_VERTEX_REUSE));
dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
RREG32(mmPA_SC_LINE_STIPPLE_STATE));
dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n",
RREG32(mmPA_CL_ENHANCE));
dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n",
RREG32(mmPA_SC_ENHANCE));
dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n",
RREG32(mmCP_ME_CNTL));
dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n",
RREG32(mmCP_MAX_CONTEXT));
dev_info(adev->dev, " CP_ENDIAN_SWAP=0x%08X\n",
RREG32(mmCP_ENDIAN_SWAP));
dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n",
RREG32(mmCP_DEVICE_ID));
dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n",
RREG32(mmCP_SEM_WAIT_TIMER));
if (adev->asic_type != CHIP_HAWAII)
dev_info(adev->dev, " CP_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
RREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL));
dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n",
RREG32(mmCP_RB_WPTR_DELAY));
dev_info(adev->dev, " CP_RB_VMID=0x%08X\n",
RREG32(mmCP_RB_VMID));
dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
RREG32(mmCP_RB0_CNTL));
dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n",
RREG32(mmCP_RB0_WPTR));
dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n",
RREG32(mmCP_RB0_RPTR_ADDR));
dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n",
RREG32(mmCP_RB0_RPTR_ADDR_HI));
dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
RREG32(mmCP_RB0_CNTL));
dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n",
RREG32(mmCP_RB0_BASE));
dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n",
RREG32(mmCP_RB0_BASE_HI));
dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n",
RREG32(mmCP_MEC_CNTL));
dev_info(adev->dev, " CP_CPF_DEBUG=0x%08X\n",
RREG32(mmCP_CPF_DEBUG));
dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n",
RREG32(mmSCRATCH_ADDR));
dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n",
RREG32(mmSCRATCH_UMSK));
/* init the pipes */
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
int me = (i < 4) ? 1 : 2;
int pipe = (i < 4) ? i : (i - 4);
int queue;
dev_info(adev->dev, " me: %d, pipe: %d\n", me, pipe);
cik_srbm_select(adev, me, pipe, 0, 0);
dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR=0x%08X\n",
RREG32(mmCP_HPD_EOP_BASE_ADDR));
dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR_HI=0x%08X\n",
RREG32(mmCP_HPD_EOP_BASE_ADDR_HI));
dev_info(adev->dev, " CP_HPD_EOP_VMID=0x%08X\n",
RREG32(mmCP_HPD_EOP_VMID));
dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n",
RREG32(mmCP_HPD_EOP_CONTROL));
for (queue = 0; queue < 8; queue++) {
cik_srbm_select(adev, me, pipe, queue, 0);
dev_info(adev->dev, " queue: %d\n", queue);
dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
RREG32(mmCP_PQ_WPTR_POLL_CNTL));
dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL));
dev_info(adev->dev, " CP_HQD_ACTIVE=0x%08X\n",
RREG32(mmCP_HQD_ACTIVE));
dev_info(adev->dev, " CP_HQD_DEQUEUE_REQUEST=0x%08X\n",
RREG32(mmCP_HQD_DEQUEUE_REQUEST));
dev_info(adev->dev, " CP_HQD_PQ_RPTR=0x%08X\n",
RREG32(mmCP_HQD_PQ_RPTR));
dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n",
RREG32(mmCP_HQD_PQ_WPTR));
dev_info(adev->dev, " CP_HQD_PQ_BASE=0x%08X\n",
RREG32(mmCP_HQD_PQ_BASE));
dev_info(adev->dev, " CP_HQD_PQ_BASE_HI=0x%08X\n",
RREG32(mmCP_HQD_PQ_BASE_HI));
dev_info(adev->dev, " CP_HQD_PQ_CONTROL=0x%08X\n",
RREG32(mmCP_HQD_PQ_CONTROL));
dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR=0x%08X\n",
RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR));
dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR_HI=0x%08X\n",
RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI));
dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR=0x%08X\n",
RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR));
dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR_HI=0x%08X\n",
RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI));
dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL));
dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n",
RREG32(mmCP_HQD_PQ_WPTR));
dev_info(adev->dev, " CP_HQD_VMID=0x%08X\n",
RREG32(mmCP_HQD_VMID));
dev_info(adev->dev, " CP_MQD_BASE_ADDR=0x%08X\n",
RREG32(mmCP_MQD_BASE_ADDR));
dev_info(adev->dev, " CP_MQD_BASE_ADDR_HI=0x%08X\n",
RREG32(mmCP_MQD_BASE_ADDR_HI));
dev_info(adev->dev, " CP_MQD_CONTROL=0x%08X\n",
RREG32(mmCP_MQD_CONTROL));
}
}
cik_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n",
RREG32(mmCP_INT_CNTL_RING0));
dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
RREG32(mmRLC_LB_CNTL));
dev_info(adev->dev, " RLC_CNTL=0x%08X\n",
RREG32(mmRLC_CNTL));
dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n",
RREG32(mmRLC_CGCG_CGLS_CTRL));
dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n",
RREG32(mmRLC_LB_CNTR_INIT));
dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n",
RREG32(mmRLC_LB_CNTR_MAX));
dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n",
RREG32(mmRLC_LB_INIT_CU_MASK));
dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n",
RREG32(mmRLC_LB_PARAMS));
dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
RREG32(mmRLC_LB_CNTL));
dev_info(adev->dev, " RLC_MC_CNTL=0x%08X\n",
RREG32(mmRLC_MC_CNTL));
dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n",
RREG32(mmRLC_UCODE_CNTL));
if (adev->asic_type == CHIP_BONAIRE)
dev_info(adev->dev, " RLC_DRIVER_CPDMA_STATUS=0x%08X\n",
RREG32(mmRLC_DRIVER_CPDMA_STATUS));
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < 16; i++) {
cik_srbm_select(adev, 0, 0, 0, i);
dev_info(adev->dev, " VM %d:\n", i);
dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n",
RREG32(mmSH_MEM_CONFIG));
dev_info(adev->dev, " SH_MEM_APE1_BASE=0x%08X\n",
RREG32(mmSH_MEM_APE1_BASE));
dev_info(adev->dev, " SH_MEM_APE1_LIMIT=0x%08X\n",
RREG32(mmSH_MEM_APE1_LIMIT));
dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n",
RREG32(mmSH_MEM_BASES));
}
cik_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
static int gfx_v7_0_soft_reset(void *handle)
{
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
@ -4855,7 +4605,6 @@ static int gfx_v7_0_soft_reset(void *handle)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
if (grbm_soft_reset || srbm_soft_reset) {
gfx_v7_0_print_status((void *)adev);
/* disable CG/PG */
gfx_v7_0_fini_pg(adev);
gfx_v7_0_update_cg(adev, false);
@ -4898,7 +4647,6 @@ static int gfx_v7_0_soft_reset(void *handle)
}
/* Wait a little for things to settle down */
udelay(50);
gfx_v7_0_print_status((void *)adev);
}
return 0;
}
@ -5161,7 +4909,6 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.is_idle = gfx_v7_0_is_idle,
.wait_for_idle = gfx_v7_0_wait_for_idle,
.soft_reset = gfx_v7_0_soft_reset,
.print_status = gfx_v7_0_print_status,
.set_clockgating_state = gfx_v7_0_set_clockgating_state,
.set_powergating_state = gfx_v7_0_set_powergating_state,
};

File diff suppressed because it is too large Load Diff

View File

@ -1114,114 +1114,6 @@ static int gmc_v7_0_wait_for_idle(void *handle)
}
static void gmc_v7_0_print_status(void *handle)
{
int i, j;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "GMC 8.x registers\n");
dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(mmSRBM_STATUS));
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2));
dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
RREG32(mmMC_VM_MX_L1_TLB_CNTL));
dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n",
RREG32(mmVM_L2_CNTL));
dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n",
RREG32(mmVM_L2_CNTL2));
dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n",
RREG32(mmVM_L2_CNTL3));
dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n",
RREG32(mmVM_CONTEXT0_CNTL2));
dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n",
RREG32(mmVM_CONTEXT0_CNTL));
dev_info(adev->dev, " 0x15D4=0x%08X\n",
RREG32(0x575));
dev_info(adev->dev, " 0x15D8=0x%08X\n",
RREG32(0x576));
dev_info(adev->dev, " 0x15DC=0x%08X\n",
RREG32(0x577));
dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n",
RREG32(mmVM_CONTEXT1_CNTL2));
dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n",
RREG32(mmVM_CONTEXT1_CNTL));
for (i = 0; i < 16; i++) {
if (i < 8)
dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
else
dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
}
dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n",
RREG32(mmMC_VM_FB_LOCATION));
dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n",
RREG32(mmMC_VM_AGP_BASE));
dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n",
RREG32(mmMC_VM_AGP_TOP));
dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n",
RREG32(mmMC_VM_AGP_BOT));
if (adev->asic_type == CHIP_KAVERI) {
dev_info(adev->dev, " CHUB_CONTROL=0x%08X\n",
RREG32(mmCHUB_CONTROL));
}
dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n",
RREG32(mmHDP_NONSURFACE_BASE));
dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n",
RREG32(mmHDP_NONSURFACE_INFO));
dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n",
RREG32(mmHDP_NONSURFACE_SIZE));
dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n",
RREG32(mmHDP_MISC_CNTL));
dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n",
RREG32(mmHDP_HOST_PATH_CNTL));
for (i = 0, j = 0; i < 32; i++, j += 0x6) {
dev_info(adev->dev, " %d:\n", i);
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb05 + j, RREG32(0xb05 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb06 + j, RREG32(0xb06 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb07 + j, RREG32(0xb07 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb08 + j, RREG32(0xb08 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb09 + j, RREG32(0xb09 + j));
}
dev_info(adev->dev, " BIF_FB_EN=0x%08X\n",
RREG32(mmBIF_FB_EN));
}
static int gmc_v7_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -1241,8 +1133,6 @@ static int gmc_v7_0_soft_reset(void *handle)
}
if (srbm_soft_reset) {
gmc_v7_0_print_status((void *)adev);
gmc_v7_0_mc_stop(adev, &save);
if (gmc_v7_0_wait_for_idle(adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
@ -1266,8 +1156,6 @@ static int gmc_v7_0_soft_reset(void *handle)
gmc_v7_0_mc_resume(adev, &save);
udelay(50);
gmc_v7_0_print_status((void *)adev);
}
return 0;
@ -1381,7 +1269,6 @@ const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.is_idle = gmc_v7_0_is_idle,
.wait_for_idle = gmc_v7_0_wait_for_idle,
.soft_reset = gmc_v7_0_soft_reset,
.print_status = gmc_v7_0_print_status,
.set_clockgating_state = gmc_v7_0_set_clockgating_state,
.set_powergating_state = gmc_v7_0_set_powergating_state,
};

View File

@ -43,6 +43,8 @@ static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
@ -73,6 +75,23 @@ static const u32 fiji_mgcg_cgcg_init[] =
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
static const u32 golden_settings_polaris11_a11[] =
{
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
};
static const u32 golden_settings_polaris10_a11[] =
{
mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
};
static const u32 cz_mgcg_cgcg_init[] =
{
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
@ -103,6 +122,16 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_tonga_a11,
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
amdgpu_program_register_sequence(adev,
golden_settings_polaris11_a11,
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
break;
case CHIP_POLARIS10:
amdgpu_program_register_sequence(adev,
golden_settings_polaris10_a11,
(const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
break;
case CHIP_CARRIZO:
amdgpu_program_register_sequence(adev,
cz_mgcg_cgcg_init,
@ -209,6 +238,12 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_TONGA:
chip_name = "tonga";
break;
case CHIP_POLARIS11:
chip_name = "polaris11";
break;
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
case CHIP_FIJI:
case CHIP_CARRIZO:
case CHIP_STONEY:
@ -1082,111 +1117,6 @@ static int gmc_v8_0_wait_for_idle(void *handle)
}
static void gmc_v8_0_print_status(void *handle)
{
int i, j;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "GMC 8.x registers\n");
dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(mmSRBM_STATUS));
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2));
dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
RREG32(mmMC_VM_MX_L1_TLB_CNTL));
dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n",
RREG32(mmVM_L2_CNTL));
dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n",
RREG32(mmVM_L2_CNTL2));
dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n",
RREG32(mmVM_L2_CNTL3));
dev_info(adev->dev, " VM_L2_CNTL4=0x%08X\n",
RREG32(mmVM_L2_CNTL4));
dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n",
RREG32(mmVM_CONTEXT0_CNTL2));
dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n",
RREG32(mmVM_CONTEXT0_CNTL));
dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n",
RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR));
dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n",
RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR));
dev_info(adev->dev, " mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n",
RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET));
dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n",
RREG32(mmVM_CONTEXT1_CNTL2));
dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n",
RREG32(mmVM_CONTEXT1_CNTL));
for (i = 0; i < 16; i++) {
if (i < 8)
dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
else
dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
}
dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n",
RREG32(mmMC_VM_FB_LOCATION));
dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n",
RREG32(mmMC_VM_AGP_BASE));
dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n",
RREG32(mmMC_VM_AGP_TOP));
dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n",
RREG32(mmMC_VM_AGP_BOT));
dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n",
RREG32(mmHDP_NONSURFACE_BASE));
dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n",
RREG32(mmHDP_NONSURFACE_INFO));
dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n",
RREG32(mmHDP_NONSURFACE_SIZE));
dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n",
RREG32(mmHDP_MISC_CNTL));
dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n",
RREG32(mmHDP_HOST_PATH_CNTL));
for (i = 0, j = 0; i < 32; i++, j += 0x6) {
dev_info(adev->dev, " %d:\n", i);
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb05 + j, RREG32(0xb05 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb06 + j, RREG32(0xb06 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb07 + j, RREG32(0xb07 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb08 + j, RREG32(0xb08 + j));
dev_info(adev->dev, " 0x%04X=0x%08X\n",
0xb09 + j, RREG32(0xb09 + j));
}
dev_info(adev->dev, " BIF_FB_EN=0x%08X\n",
RREG32(mmBIF_FB_EN));
}
static int gmc_v8_0_soft_reset(void *handle)
{
struct amdgpu_mode_mc_save save;
@ -1206,8 +1136,6 @@ static int gmc_v8_0_soft_reset(void *handle)
}
if (srbm_soft_reset) {
gmc_v8_0_print_status((void *)adev);
gmc_v8_0_mc_stop(adev, &save);
if (gmc_v8_0_wait_for_idle(adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
@ -1231,8 +1159,6 @@ static int gmc_v8_0_soft_reset(void *handle)
gmc_v8_0_mc_resume(adev, &save);
udelay(50);
gmc_v8_0_print_status((void *)adev);
}
return 0;
@ -1314,7 +1240,7 @@ static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
{
uint32_t data;
if (enable) {
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
data = RREG32(mmMC_HUB_MISC_HUB_CG);
data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
WREG32(mmMC_HUB_MISC_HUB_CG, data);
@ -1394,7 +1320,7 @@ static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
{
uint32_t data;
if (enable) {
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
data = RREG32(mmMC_HUB_MISC_HUB_CG);
data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
WREG32(mmMC_HUB_MISC_HUB_CG, data);
@ -1505,7 +1431,6 @@ const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.is_idle = gmc_v8_0_is_idle,
.wait_for_idle = gmc_v8_0_wait_for_idle,
.soft_reset = gmc_v8_0_soft_reset,
.print_status = gmc_v8_0_print_status,
.set_clockgating_state = gmc_v8_0_set_clockgating_state,
.set_powergating_state = gmc_v8_0_set_powergating_state,
};

View File

@ -168,7 +168,6 @@ const struct amd_ip_funcs iceland_dpm_ip_funcs = {
.is_idle = NULL,
.wait_for_idle = NULL,
.soft_reset = NULL,
.print_status = NULL,
.set_clockgating_state = iceland_dpm_set_clockgating_state,
.set_powergating_state = iceland_dpm_set_powergating_state,
};

View File

@ -351,35 +351,6 @@ static int iceland_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
static void iceland_ih_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "ICELAND IH registers\n");
dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(mmSRBM_STATUS));
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2));
dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n",
RREG32(mmINTERRUPT_CNTL));
dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n",
RREG32(mmINTERRUPT_CNTL2));
dev_info(adev->dev, " IH_CNTL=0x%08X\n",
RREG32(mmIH_CNTL));
dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n",
RREG32(mmIH_RB_CNTL));
dev_info(adev->dev, " IH_RB_BASE=0x%08X\n",
RREG32(mmIH_RB_BASE));
dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n",
RREG32(mmIH_RB_WPTR_ADDR_LO));
dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n",
RREG32(mmIH_RB_WPTR_ADDR_HI));
dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n",
RREG32(mmIH_RB_RPTR));
dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n",
RREG32(mmIH_RB_WPTR));
}
static int iceland_ih_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
@ -391,8 +362,6 @@ static int iceland_ih_soft_reset(void *handle)
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
iceland_ih_print_status((void *)adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -407,8 +376,6 @@ static int iceland_ih_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
iceland_ih_print_status((void *)adev);
}
return 0;
@ -438,7 +405,6 @@ const struct amd_ip_funcs iceland_ih_ip_funcs = {
.is_idle = iceland_ih_is_idle,
.wait_for_idle = iceland_ih_wait_for_idle,
.soft_reset = iceland_ih_soft_reset,
.print_status = iceland_ih_print_status,
.set_clockgating_state = iceland_ih_set_clockgating_state,
.set_powergating_state = iceland_ih_set_powergating_state,
};

View File

@ -3147,62 +3147,6 @@ static int kv_dpm_wait_for_idle(void *handle)
return 0;
}
static void kv_dpm_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "KV/KB DPM registers\n");
dev_info(adev->dev, " DIDT_SQ_CTRL0=0x%08X\n",
RREG32_DIDT(ixDIDT_SQ_CTRL0));
dev_info(adev->dev, " DIDT_DB_CTRL0=0x%08X\n",
RREG32_DIDT(ixDIDT_DB_CTRL0));
dev_info(adev->dev, " DIDT_TD_CTRL0=0x%08X\n",
RREG32_DIDT(ixDIDT_TD_CTRL0));
dev_info(adev->dev, " DIDT_TCP_CTRL0=0x%08X\n",
RREG32_DIDT(ixDIDT_TCP_CTRL0));
dev_info(adev->dev, " LCAC_SX0_OVR_SEL=0x%08X\n",
RREG32_SMC(ixLCAC_SX0_OVR_SEL));
dev_info(adev->dev, " LCAC_SX0_OVR_VAL=0x%08X\n",
RREG32_SMC(ixLCAC_SX0_OVR_VAL));
dev_info(adev->dev, " LCAC_MC0_OVR_SEL=0x%08X\n",
RREG32_SMC(ixLCAC_MC0_OVR_SEL));
dev_info(adev->dev, " LCAC_MC0_OVR_VAL=0x%08X\n",
RREG32_SMC(ixLCAC_MC0_OVR_VAL));
dev_info(adev->dev, " LCAC_MC1_OVR_SEL=0x%08X\n",
RREG32_SMC(ixLCAC_MC1_OVR_SEL));
dev_info(adev->dev, " LCAC_MC1_OVR_VAL=0x%08X\n",
RREG32_SMC(ixLCAC_MC1_OVR_VAL));
dev_info(adev->dev, " LCAC_MC2_OVR_SEL=0x%08X\n",
RREG32_SMC(ixLCAC_MC2_OVR_SEL));
dev_info(adev->dev, " LCAC_MC2_OVR_VAL=0x%08X\n",
RREG32_SMC(ixLCAC_MC2_OVR_VAL));
dev_info(adev->dev, " LCAC_MC3_OVR_SEL=0x%08X\n",
RREG32_SMC(ixLCAC_MC3_OVR_SEL));
dev_info(adev->dev, " LCAC_MC3_OVR_VAL=0x%08X\n",
RREG32_SMC(ixLCAC_MC3_OVR_VAL));
dev_info(adev->dev, " LCAC_CPL_OVR_SEL=0x%08X\n",
RREG32_SMC(ixLCAC_CPL_OVR_SEL));
dev_info(adev->dev, " LCAC_CPL_OVR_VAL=0x%08X\n",
RREG32_SMC(ixLCAC_CPL_OVR_VAL));
dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_0=0x%08X\n",
RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0));
dev_info(adev->dev, " GENERAL_PWRMGT=0x%08X\n",
RREG32_SMC(ixGENERAL_PWRMGT));
dev_info(adev->dev, " SCLK_PWRMGT_CNTL=0x%08X\n",
RREG32_SMC(ixSCLK_PWRMGT_CNTL));
dev_info(adev->dev, " SMC_MESSAGE_0=0x%08X\n",
RREG32(mmSMC_MESSAGE_0));
dev_info(adev->dev, " SMC_RESP_0=0x%08X\n",
RREG32(mmSMC_RESP_0));
dev_info(adev->dev, " SMC_MSG_ARG_0=0x%08X\n",
RREG32(mmSMC_MSG_ARG_0));
dev_info(adev->dev, " SMC_IND_INDEX_0=0x%08X\n",
RREG32(mmSMC_IND_INDEX_0));
dev_info(adev->dev, " SMC_IND_DATA_0=0x%08X\n",
RREG32(mmSMC_IND_DATA_0));
dev_info(adev->dev, " SMC_IND_ACCESS_CNTL=0x%08X\n",
RREG32(mmSMC_IND_ACCESS_CNTL));
}
static int kv_dpm_soft_reset(void *handle)
{
@ -3311,7 +3255,6 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
.is_idle = kv_dpm_is_idle,
.wait_for_idle = kv_dpm_wait_for_idle,
.soft_reset = kv_dpm_soft_reset,
.print_status = kv_dpm_print_status,
.set_clockgating_state = kv_dpm_set_clockgating_state,
.set_powergating_state = kv_dpm_set_powergating_state,
};

View File

@ -990,7 +990,7 @@ static int sdma_v2_4_sw_init(void *handle)
ring->ring_obj = NULL;
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 256 * 1024,
r = amdgpu_ring_init(adev, ring, 1024,
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
@ -1080,55 +1080,6 @@ static int sdma_v2_4_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
static void sdma_v2_4_print_status(void *handle)
{
int i, j;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "VI SDMA registers\n");
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2));
for (i = 0; i < adev->sdma.num_instances; i++) {
dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n",
i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
mutex_lock(&adev->srbm_mutex);
for (j = 0; j < 16; j++) {
vi_srbm_select(adev, 0, 0, 0, j);
dev_info(adev->dev, " VM %d:\n", j);
dev_info(adev->dev, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n",
i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_APE1_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
}
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
}
static int sdma_v2_4_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
@ -1151,8 +1102,6 @@ static int sdma_v2_4_soft_reset(void *handle)
}
if (srbm_soft_reset) {
sdma_v2_4_print_status((void *)adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -1167,8 +1116,6 @@ static int sdma_v2_4_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
sdma_v2_4_print_status((void *)adev);
}
return 0;
@ -1294,7 +1241,6 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.is_idle = sdma_v2_4_is_idle,
.wait_for_idle = sdma_v2_4_wait_for_idle,
.soft_reset = sdma_v2_4_soft_reset,
.print_status = sdma_v2_4_print_status,
.set_clockgating_state = sdma_v2_4_set_clockgating_state,
.set_powergating_state = sdma_v2_4_set_powergating_state,
};

View File

@ -56,6 +56,11 @@ MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
MODULE_FIRMWARE("amdgpu/stoney_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
{
@ -101,6 +106,32 @@ static const u32 fiji_mgcg_cgcg_init[] =
mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
};
static const u32 golden_settings_polaris11_a11[] =
{
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
};
static const u32 golden_settings_polaris10_a11[] =
{
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
};
static const u32 cz_golden_settings_a11[] =
{
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
@ -172,6 +203,16 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_tonga_a11,
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
amdgpu_program_register_sequence(adev,
golden_settings_polaris11_a11,
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
break;
case CHIP_POLARIS10:
amdgpu_program_register_sequence(adev,
golden_settings_polaris10_a11,
(const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
break;
case CHIP_CARRIZO:
amdgpu_program_register_sequence(adev,
cz_mgcg_cgcg_init,
@ -220,6 +261,12 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
case CHIP_FIJI:
chip_name = "fiji";
break;
case CHIP_POLARIS11:
chip_name = "polaris11";
break;
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
case CHIP_CARRIZO:
chip_name = "carrizo";
break;
@ -452,6 +499,31 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
unsigned init_cond_exec(struct amdgpu_ring *ring)
{
unsigned ret;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
amdgpu_ring_write(ring, 1);
ret = ring->wptr;/* this is the offset we need patch later */
amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
return ret;
}
void patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
{
unsigned cur;
BUG_ON(ring->ring[offset] != 0x55aa55aa);
cur = ring->wptr - 1;
if (likely(cur > offset))
ring->ring[offset] = cur - offset;
else
ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
}
/**
* sdma_v3_0_gfx_stop - stop the gfx async dma engines
*
@ -1151,7 +1223,7 @@ static int sdma_v3_0_sw_init(void *handle)
AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 256 * 1024,
r = amdgpu_ring_init(adev, ring, 1024,
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
@ -1242,57 +1314,6 @@ static int sdma_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
static void sdma_v3_0_print_status(void *handle)
{
int i, j;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "VI SDMA registers\n");
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2));
for (i = 0; i < adev->sdma.num_instances; i++) {
dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_DOORBELL=0x%08X\n",
i, RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n",
i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
mutex_lock(&adev->srbm_mutex);
for (j = 0; j < 16; j++) {
vi_srbm_select(adev, 0, 0, 0, j);
dev_info(adev->dev, " VM %d:\n", j);
dev_info(adev->dev, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n",
i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_APE1_CNTL=0x%08X\n",
i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
}
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
}
static int sdma_v3_0_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
@ -1315,8 +1336,6 @@ static int sdma_v3_0_soft_reset(void *handle)
}
if (srbm_soft_reset) {
sdma_v3_0_print_status((void *)adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -1331,8 +1350,6 @@ static int sdma_v3_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
sdma_v3_0_print_status((void *)adev);
}
return 0;
@ -1433,14 +1450,16 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
static void fiji_update_sdma_medium_grain_clock_gating(
static void sdma_v3_0_update_sdma_medium_grain_clock_gating(
struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
int i;
if (enable) {
temp = data = RREG32(mmSDMA0_CLK_CTRL);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
@ -1450,22 +1469,11 @@ static void fiji_update_sdma_medium_grain_clock_gating(
SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
if (data != temp)
WREG32(mmSDMA0_CLK_CTRL, data);
temp = data = RREG32(mmSDMA1_CLK_CTRL);
data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
if (data != temp)
WREG32(mmSDMA1_CLK_CTRL, data);
WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
}
} else {
temp = data = RREG32(mmSDMA0_CLK_CTRL);
for (i = 0; i < adev->sdma.num_instances; i++) {
temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
@ -1476,53 +1484,34 @@ static void fiji_update_sdma_medium_grain_clock_gating(
SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK;
if (data != temp)
WREG32(mmSDMA0_CLK_CTRL, data);
temp = data = RREG32(mmSDMA1_CLK_CTRL);
data |= SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK;
if (data != temp)
WREG32(mmSDMA1_CLK_CTRL, data);
WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
}
}
}
static void fiji_update_sdma_medium_grain_light_sleep(
static void sdma_v3_0_update_sdma_medium_grain_light_sleep(
struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
int i;
if (enable) {
temp = data = RREG32(mmSDMA0_POWER_CNTL);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (temp != data)
WREG32(mmSDMA0_POWER_CNTL, data);
temp = data = RREG32(mmSDMA1_POWER_CNTL);
data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (temp != data)
WREG32(mmSDMA1_POWER_CNTL, data);
WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
}
} else {
temp = data = RREG32(mmSDMA0_POWER_CNTL);
for (i = 0; i < adev->sdma.num_instances; i++) {
temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (temp != data)
WREG32(mmSDMA0_POWER_CNTL, data);
temp = data = RREG32(mmSDMA1_POWER_CNTL);
data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (temp != data)
WREG32(mmSDMA1_POWER_CNTL, data);
WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
}
}
}
@ -1533,9 +1522,11 @@ static int sdma_v3_0_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_FIJI:
fiji_update_sdma_medium_grain_clock_gating(adev,
case CHIP_CARRIZO:
case CHIP_STONEY:
sdma_v3_0_update_sdma_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
fiji_update_sdma_medium_grain_light_sleep(adev,
sdma_v3_0_update_sdma_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
default:
@ -1562,7 +1553,6 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.is_idle = sdma_v3_0_is_idle,
.wait_for_idle = sdma_v3_0_wait_for_idle,
.soft_reset = sdma_v3_0_soft_reset,
.print_status = sdma_v3_0_print_status,
.set_clockgating_state = sdma_v3_0_set_clockgating_state,
.set_powergating_state = sdma_v3_0_set_powergating_state,
};

View File

@ -44,6 +44,7 @@
#define UCODE_ID_IH_REG_RESTORE 11
#define UCODE_ID_VBIOS 12
#define UCODE_ID_MISC_METADATA 13
#define UCODE_ID_SMU_SK 14
#define UCODE_ID_RLC_SCRATCH 32
#define UCODE_ID_RLC_SRM_ARAM 33
#define UCODE_ID_RLC_SRM_DRAM 34

View File

@ -154,7 +154,6 @@ const struct amd_ip_funcs tonga_dpm_ip_funcs = {
.is_idle = NULL,
.wait_for_idle = NULL,
.soft_reset = NULL,
.print_status = NULL,
.set_clockgating_state = tonga_dpm_set_clockgating_state,
.set_powergating_state = tonga_dpm_set_powergating_state,
};

View File

@ -374,35 +374,6 @@ static int tonga_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
static void tonga_ih_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "TONGA IH registers\n");
dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(mmSRBM_STATUS));
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2));
dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n",
RREG32(mmINTERRUPT_CNTL));
dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n",
RREG32(mmINTERRUPT_CNTL2));
dev_info(adev->dev, " IH_CNTL=0x%08X\n",
RREG32(mmIH_CNTL));
dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n",
RREG32(mmIH_RB_CNTL));
dev_info(adev->dev, " IH_RB_BASE=0x%08X\n",
RREG32(mmIH_RB_BASE));
dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n",
RREG32(mmIH_RB_WPTR_ADDR_LO));
dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n",
RREG32(mmIH_RB_WPTR_ADDR_HI));
dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n",
RREG32(mmIH_RB_RPTR));
dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n",
RREG32(mmIH_RB_WPTR));
}
static int tonga_ih_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
@ -414,8 +385,6 @@ static int tonga_ih_soft_reset(void *handle)
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
tonga_ih_print_status(adev);
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@ -430,8 +399,6 @@ static int tonga_ih_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
tonga_ih_print_status(adev);
}
return 0;
@ -461,7 +428,6 @@ const struct amd_ip_funcs tonga_ih_ip_funcs = {
.is_idle = tonga_ih_is_idle,
.wait_for_idle = tonga_ih_wait_for_idle,
.soft_reset = tonga_ih_soft_reset,
.print_status = tonga_ih_print_status,
.set_clockgating_state = tonga_ih_set_clockgating_state,
.set_powergating_state = tonga_ih_set_powergating_state,
};

View File

@ -114,7 +114,7 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf,
r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
return r;
@ -559,12 +559,13 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
addr += size;
size = AMDGPU_UVD_STACK_SIZE >> 3;
size = AMDGPU_UVD_HEAP_SIZE >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
addr += size;
size = AMDGPU_UVD_HEAP_SIZE >> 3;
size = (AMDGPU_UVD_STACK_SIZE +
(AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
@ -679,117 +680,6 @@ static int uvd_v4_2_soft_reset(void *handle)
return uvd_v4_2_start(adev);
}
static void uvd_v4_2_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "UVD 4.2 registers\n");
dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n",
RREG32(mmUVD_SEMA_ADDR_LOW));
dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n",
RREG32(mmUVD_SEMA_ADDR_HIGH));
dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n",
RREG32(mmUVD_SEMA_CMD));
dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n",
RREG32(mmUVD_GPCOM_VCPU_CMD));
dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n",
RREG32(mmUVD_GPCOM_VCPU_DATA0));
dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n",
RREG32(mmUVD_GPCOM_VCPU_DATA1));
dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n",
RREG32(mmUVD_ENGINE_CNTL));
dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_ADDR_CONFIG));
dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n",
RREG32(mmUVD_SEMA_CNTL));
dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n",
RREG32(mmUVD_LMI_EXT40_ADDR));
dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n",
RREG32(mmUVD_CTX_INDEX));
dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n",
RREG32(mmUVD_CTX_DATA));
dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n",
RREG32(mmUVD_CGC_GATE));
dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n",
RREG32(mmUVD_CGC_CTRL));
dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n",
RREG32(mmUVD_LMI_CTRL2));
dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n",
RREG32(mmUVD_MASTINT_EN));
dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n",
RREG32(mmUVD_LMI_ADDR_EXT));
dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n",
RREG32(mmUVD_LMI_CTRL));
dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n",
RREG32(mmUVD_LMI_SWAP_CNTL));
dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n",
RREG32(mmUVD_MP_SWAP_CNTL));
dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n",
RREG32(mmUVD_MPC_SET_MUXA0));
dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n",
RREG32(mmUVD_MPC_SET_MUXA1));
dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n",
RREG32(mmUVD_MPC_SET_MUXB0));
dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n",
RREG32(mmUVD_MPC_SET_MUXB1));
dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n",
RREG32(mmUVD_MPC_SET_MUX));
dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n",
RREG32(mmUVD_MPC_SET_ALU));
dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_OFFSET0));
dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_SIZE0));
dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_OFFSET1));
dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_SIZE1));
dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_OFFSET2));
dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_SIZE2));
dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n",
RREG32(mmUVD_VCPU_CNTL));
dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n",
RREG32(mmUVD_SOFT_RESET));
dev_info(adev->dev, " UVD_RBC_IB_BASE=0x%08X\n",
RREG32(mmUVD_RBC_IB_BASE));
dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n",
RREG32(mmUVD_RBC_IB_SIZE));
dev_info(adev->dev, " UVD_RBC_RB_BASE=0x%08X\n",
RREG32(mmUVD_RBC_RB_BASE));
dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n",
RREG32(mmUVD_RBC_RB_RPTR));
dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n",
RREG32(mmUVD_RBC_RB_WPTR));
dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n",
RREG32(mmUVD_RBC_RB_WPTR_CNTL));
dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n",
RREG32(mmUVD_RBC_RB_CNTL));
dev_info(adev->dev, " UVD_STATUS=0x%08X\n",
RREG32(mmUVD_STATUS));
dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n",
RREG32(mmUVD_SEMA_TIMEOUT_STATUS));
dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL));
dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n",
RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL));
dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n",
RREG32(mmUVD_CONTEXT_ID));
dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_ADDR_CONFIG));
dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
}
static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@ -860,7 +750,6 @@ const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
.is_idle = uvd_v4_2_is_idle,
.wait_for_idle = uvd_v4_2_wait_for_idle,
.soft_reset = uvd_v4_2_soft_reset,
.print_status = uvd_v4_2_print_status,
.set_clockgating_state = uvd_v4_2_set_clockgating_state,
.set_powergating_state = uvd_v4_2_set_powergating_state,
};

View File

@ -31,6 +31,7 @@
#include "uvd/uvd_5_0_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#include "vi.h"
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
@ -110,7 +111,7 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf,
r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
return r;
@ -271,12 +272,13 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
offset += size;
size = AMDGPU_UVD_STACK_SIZE;
size = AMDGPU_UVD_HEAP_SIZE;
WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
offset += size;
size = AMDGPU_UVD_HEAP_SIZE;
size = AMDGPU_UVD_STACK_SIZE +
(AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
@ -622,120 +624,6 @@ static int uvd_v5_0_soft_reset(void *handle)
return uvd_v5_0_start(adev);
}
static void uvd_v5_0_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "UVD 5.0 registers\n");
dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n",
RREG32(mmUVD_SEMA_ADDR_LOW));
dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n",
RREG32(mmUVD_SEMA_ADDR_HIGH));
dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n",
RREG32(mmUVD_SEMA_CMD));
dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n",
RREG32(mmUVD_GPCOM_VCPU_CMD));
dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n",
RREG32(mmUVD_GPCOM_VCPU_DATA0));
dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n",
RREG32(mmUVD_GPCOM_VCPU_DATA1));
dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n",
RREG32(mmUVD_ENGINE_CNTL));
dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_ADDR_CONFIG));
dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n",
RREG32(mmUVD_SEMA_CNTL));
dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n",
RREG32(mmUVD_LMI_EXT40_ADDR));
dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n",
RREG32(mmUVD_CTX_INDEX));
dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n",
RREG32(mmUVD_CTX_DATA));
dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n",
RREG32(mmUVD_CGC_GATE));
dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n",
RREG32(mmUVD_CGC_CTRL));
dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n",
RREG32(mmUVD_LMI_CTRL2));
dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n",
RREG32(mmUVD_MASTINT_EN));
dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n",
RREG32(mmUVD_LMI_ADDR_EXT));
dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n",
RREG32(mmUVD_LMI_CTRL));
dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n",
RREG32(mmUVD_LMI_SWAP_CNTL));
dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n",
RREG32(mmUVD_MP_SWAP_CNTL));
dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n",
RREG32(mmUVD_MPC_SET_MUXA0));
dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n",
RREG32(mmUVD_MPC_SET_MUXA1));
dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n",
RREG32(mmUVD_MPC_SET_MUXB0));
dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n",
RREG32(mmUVD_MPC_SET_MUXB1));
dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n",
RREG32(mmUVD_MPC_SET_MUX));
dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n",
RREG32(mmUVD_MPC_SET_ALU));
dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_OFFSET0));
dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_SIZE0));
dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_OFFSET1));
dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_SIZE1));
dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_OFFSET2));
dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_SIZE2));
dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n",
RREG32(mmUVD_VCPU_CNTL));
dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n",
RREG32(mmUVD_SOFT_RESET));
dev_info(adev->dev, " UVD_LMI_RBC_IB_64BIT_BAR_LOW=0x%08X\n",
RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW));
dev_info(adev->dev, " UVD_LMI_RBC_IB_64BIT_BAR_HIGH=0x%08X\n",
RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH));
dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n",
RREG32(mmUVD_RBC_IB_SIZE));
dev_info(adev->dev, " UVD_LMI_RBC_RB_64BIT_BAR_LOW=0x%08X\n",
RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW));
dev_info(adev->dev, " UVD_LMI_RBC_RB_64BIT_BAR_HIGH=0x%08X\n",
RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH));
dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n",
RREG32(mmUVD_RBC_RB_RPTR));
dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n",
RREG32(mmUVD_RBC_RB_WPTR));
dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n",
RREG32(mmUVD_RBC_RB_WPTR_CNTL));
dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n",
RREG32(mmUVD_RBC_RB_CNTL));
dev_info(adev->dev, " UVD_STATUS=0x%08X\n",
RREG32(mmUVD_STATUS));
dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n",
RREG32(mmUVD_SEMA_TIMEOUT_STATUS));
dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL));
dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n",
RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL));
dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n",
RREG32(mmUVD_CONTEXT_ID));
dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_ADDR_CONFIG));
dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
}
static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@ -754,14 +642,128 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
{
uint32_t data, data1, data2, suvd_flags;
data = RREG32(mmUVD_CGC_CTRL);
data1 = RREG32(mmUVD_SUVD_CGC_GATE);
data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK;
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
UVD_CGC_CTRL__SYS_MODE_MASK |
UVD_CGC_CTRL__UDEC_MODE_MASK |
UVD_CGC_CTRL__MPEG2_MODE_MASK |
UVD_CGC_CTRL__REGS_MODE_MASK |
UVD_CGC_CTRL__RBC_MODE_MASK |
UVD_CGC_CTRL__LMI_MC_MODE_MASK |
UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
UVD_CGC_CTRL__IDCT_MODE_MASK |
UVD_CGC_CTRL__MPRD_MODE_MASK |
UVD_CGC_CTRL__MPC_MODE_MASK |
UVD_CGC_CTRL__LBSI_MODE_MASK |
UVD_CGC_CTRL__LRBBM_MODE_MASK |
UVD_CGC_CTRL__WCB_MODE_MASK |
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__JPEG_MODE_MASK |
UVD_CGC_CTRL__SCPU_MODE_MASK);
data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
data1 |= suvd_flags;
WREG32(mmUVD_CGC_CTRL, data);
WREG32(mmUVD_CGC_GATE, 0);
WREG32(mmUVD_SUVD_CGC_GATE, data1);
WREG32(mmUVD_SUVD_CGC_CTRL, data2);
}
#if 0
static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
{
uint32_t data, data1, cgc_flags, suvd_flags;
data = RREG32(mmUVD_CGC_GATE);
data1 = RREG32(mmUVD_SUVD_CGC_GATE);
cgc_flags = UVD_CGC_GATE__SYS_MASK |
UVD_CGC_GATE__UDEC_MASK |
UVD_CGC_GATE__MPEG2_MASK |
UVD_CGC_GATE__RBC_MASK |
UVD_CGC_GATE__LMI_MC_MASK |
UVD_CGC_GATE__IDCT_MASK |
UVD_CGC_GATE__MPRD_MASK |
UVD_CGC_GATE__MPC_MASK |
UVD_CGC_GATE__LBSI_MASK |
UVD_CGC_GATE__LRBBM_MASK |
UVD_CGC_GATE__UDEC_RE_MASK |
UVD_CGC_GATE__UDEC_CM_MASK |
UVD_CGC_GATE__UDEC_IT_MASK |
UVD_CGC_GATE__UDEC_DB_MASK |
UVD_CGC_GATE__UDEC_MP_MASK |
UVD_CGC_GATE__WCB_MASK |
UVD_CGC_GATE__VCPU_MASK |
UVD_CGC_GATE__SCPU_MASK;
suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK;
data |= cgc_flags;
data1 |= suvd_flags;
WREG32(mmUVD_CGC_GATE, data);
WREG32(mmUVD_SUVD_CGC_GATE, data1);
}
#endif
static int uvd_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
static int curstate = -1;
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
if (curstate == state)
return 0;
curstate = state;
if (enable) {
/* disable HW gating and enable Sw gating */
uvd_v5_0_set_sw_clock_gating(adev);
} else {
/* wait for STATUS to clear */
if (uvd_v5_0_wait_for_idle(handle))
return -EBUSY;
/* enable HW gates because UVD is idle */
/* uvd_v5_0_set_hw_clock_gating(adev); */
}
return 0;
}
@ -800,7 +802,6 @@ const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.is_idle = uvd_v5_0_is_idle,
.wait_for_idle = uvd_v5_0_wait_for_idle,
.soft_reset = uvd_v5_0_soft_reset,
.print_status = uvd_v5_0_print_status,
.set_clockgating_state = uvd_v5_0_set_clockgating_state,
.set_powergating_state = uvd_v5_0_set_powergating_state,
};

View File

@ -31,11 +31,15 @@
#include "uvd/uvd_6_0_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#include "smu/smu_7_1_3_d.h"
#include "smu/smu_7_1_3_sh_mask.h"
#include "vi.h"
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v6_0_start(struct amdgpu_device *adev);
static void uvd_v6_0_stop(struct amdgpu_device *adev);
static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
/**
* uvd_v6_0_ring_get_rptr - get read pointer
@ -110,7 +114,7 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf,
r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
return r;
@ -270,20 +274,24 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
offset += size;
size = AMDGPU_UVD_STACK_SIZE;
size = AMDGPU_UVD_HEAP_SIZE;
WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
offset += size;
size = AMDGPU_UVD_HEAP_SIZE;
size = AMDGPU_UVD_STACK_SIZE +
(AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
}
#if 0
static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
bool enable)
{
@ -360,157 +368,7 @@ static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
WREG32(mmUVD_CGC_GATE, data);
WREG32(mmUVD_SUVD_CGC_GATE, data1);
}
static void tonga_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
bool enable)
{
u32 data, data1;
data = RREG32(mmUVD_CGC_GATE);
data1 = RREG32(mmUVD_SUVD_CGC_GATE);
if (enable) {
data |= UVD_CGC_GATE__SYS_MASK |
UVD_CGC_GATE__UDEC_MASK |
UVD_CGC_GATE__MPEG2_MASK |
UVD_CGC_GATE__RBC_MASK |
UVD_CGC_GATE__LMI_MC_MASK |
UVD_CGC_GATE__IDCT_MASK |
UVD_CGC_GATE__MPRD_MASK |
UVD_CGC_GATE__MPC_MASK |
UVD_CGC_GATE__LBSI_MASK |
UVD_CGC_GATE__LRBBM_MASK |
UVD_CGC_GATE__UDEC_RE_MASK |
UVD_CGC_GATE__UDEC_CM_MASK |
UVD_CGC_GATE__UDEC_IT_MASK |
UVD_CGC_GATE__UDEC_DB_MASK |
UVD_CGC_GATE__UDEC_MP_MASK |
UVD_CGC_GATE__WCB_MASK |
UVD_CGC_GATE__VCPU_MASK |
UVD_CGC_GATE__SCPU_MASK;
data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK;
} else {
data &= ~(UVD_CGC_GATE__SYS_MASK |
UVD_CGC_GATE__UDEC_MASK |
UVD_CGC_GATE__MPEG2_MASK |
UVD_CGC_GATE__RBC_MASK |
UVD_CGC_GATE__LMI_MC_MASK |
UVD_CGC_GATE__LMI_UMC_MASK |
UVD_CGC_GATE__IDCT_MASK |
UVD_CGC_GATE__MPRD_MASK |
UVD_CGC_GATE__MPC_MASK |
UVD_CGC_GATE__LBSI_MASK |
UVD_CGC_GATE__LRBBM_MASK |
UVD_CGC_GATE__UDEC_RE_MASK |
UVD_CGC_GATE__UDEC_CM_MASK |
UVD_CGC_GATE__UDEC_IT_MASK |
UVD_CGC_GATE__UDEC_DB_MASK |
UVD_CGC_GATE__UDEC_MP_MASK |
UVD_CGC_GATE__WCB_MASK |
UVD_CGC_GATE__VCPU_MASK |
UVD_CGC_GATE__SCPU_MASK);
data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK);
}
WREG32(mmUVD_CGC_GATE, data);
WREG32(mmUVD_SUVD_CGC_GATE, data1);
}
static void uvd_v6_0_set_uvd_dynamic_clock_mode(struct amdgpu_device *adev,
bool swmode)
{
u32 data, data1 = 0, data2;
/* Always un-gate UVD REGS bit */
data = RREG32(mmUVD_CGC_GATE);
data &= ~(UVD_CGC_GATE__REGS_MASK);
WREG32(mmUVD_CGC_GATE, data);
data = RREG32(mmUVD_CGC_CTRL);
data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER) |
4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY);
data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
if (swmode) {
data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
UVD_CGC_CTRL__SYS_MODE_MASK |
UVD_CGC_CTRL__UDEC_MODE_MASK |
UVD_CGC_CTRL__MPEG2_MODE_MASK |
UVD_CGC_CTRL__REGS_MODE_MASK |
UVD_CGC_CTRL__RBC_MODE_MASK |
UVD_CGC_CTRL__LMI_MC_MODE_MASK |
UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
UVD_CGC_CTRL__IDCT_MODE_MASK |
UVD_CGC_CTRL__MPRD_MODE_MASK |
UVD_CGC_CTRL__MPC_MODE_MASK |
UVD_CGC_CTRL__LBSI_MODE_MASK |
UVD_CGC_CTRL__LRBBM_MODE_MASK |
UVD_CGC_CTRL__WCB_MODE_MASK |
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__JPEG_MODE_MASK |
UVD_CGC_CTRL__SCPU_MODE_MASK);
data1 |= UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK;
data1 &= ~UVD_CGC_CTRL2__GATER_DIV_ID_MASK;
data1 |= 7 << REG_FIELD_SHIFT(UVD_CGC_CTRL2, GATER_DIV_ID);
data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
} else {
data |= UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
UVD_CGC_CTRL__SYS_MODE_MASK |
UVD_CGC_CTRL__UDEC_MODE_MASK |
UVD_CGC_CTRL__MPEG2_MODE_MASK |
UVD_CGC_CTRL__REGS_MODE_MASK |
UVD_CGC_CTRL__RBC_MODE_MASK |
UVD_CGC_CTRL__LMI_MC_MODE_MASK |
UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
UVD_CGC_CTRL__IDCT_MODE_MASK |
UVD_CGC_CTRL__MPRD_MODE_MASK |
UVD_CGC_CTRL__MPC_MODE_MASK |
UVD_CGC_CTRL__LBSI_MODE_MASK |
UVD_CGC_CTRL__LRBBM_MODE_MASK |
UVD_CGC_CTRL__WCB_MODE_MASK |
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__SCPU_MODE_MASK;
data2 |= UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK;
}
WREG32(mmUVD_CGC_CTRL, data);
WREG32(mmUVD_SUVD_CGC_CTRL, data2);
data = RREG32_UVD_CTX(ixUVD_CGC_CTRL2);
data &= ~(REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_OCLK_RAMP_EN) |
REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_RCLK_RAMP_EN) |
REG_FIELD_MASK(UVD_CGC_CTRL2, GATER_DIV_ID));
data1 &= (REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_OCLK_RAMP_EN) |
REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_RCLK_RAMP_EN) |
REG_FIELD_MASK(UVD_CGC_CTRL2, GATER_DIV_ID));
data |= data1;
WREG32_UVD_CTX(ixUVD_CGC_CTRL2, data);
}
#endif
/**
* uvd_v6_0_start - start UVD block
@ -538,11 +396,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
/* Set dynamic clock gating in S/W control mode */
if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
if (adev->flags & AMD_IS_APU)
cz_set_uvd_clock_gating_branches(adev, false);
else
tonga_set_uvd_clock_gating_branches(adev, false);
uvd_v6_0_set_uvd_dynamic_clock_mode(adev, true);
uvd_v6_0_set_sw_clock_gating(adev);
} else {
/* disable clock gating */
uint32_t data = RREG32(mmUVD_CGC_CTRL);
@ -854,112 +708,6 @@ static int uvd_v6_0_soft_reset(void *handle)
return uvd_v6_0_start(adev);
}
static void uvd_v6_0_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "UVD 6.0 registers\n");
dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n",
RREG32(mmUVD_SEMA_ADDR_LOW));
dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n",
RREG32(mmUVD_SEMA_ADDR_HIGH));
dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n",
RREG32(mmUVD_SEMA_CMD));
dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n",
RREG32(mmUVD_GPCOM_VCPU_CMD));
dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n",
RREG32(mmUVD_GPCOM_VCPU_DATA0));
dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n",
RREG32(mmUVD_GPCOM_VCPU_DATA1));
dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n",
RREG32(mmUVD_ENGINE_CNTL));
dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_ADDR_CONFIG));
dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n",
RREG32(mmUVD_SEMA_CNTL));
dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n",
RREG32(mmUVD_LMI_EXT40_ADDR));
dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n",
RREG32(mmUVD_CTX_INDEX));
dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n",
RREG32(mmUVD_CTX_DATA));
dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n",
RREG32(mmUVD_CGC_GATE));
dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n",
RREG32(mmUVD_CGC_CTRL));
dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n",
RREG32(mmUVD_LMI_CTRL2));
dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n",
RREG32(mmUVD_MASTINT_EN));
dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n",
RREG32(mmUVD_LMI_ADDR_EXT));
dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n",
RREG32(mmUVD_LMI_CTRL));
dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n",
RREG32(mmUVD_LMI_SWAP_CNTL));
dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n",
RREG32(mmUVD_MP_SWAP_CNTL));
dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n",
RREG32(mmUVD_MPC_SET_MUXA0));
dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n",
RREG32(mmUVD_MPC_SET_MUXA1));
dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n",
RREG32(mmUVD_MPC_SET_MUXB0));
dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n",
RREG32(mmUVD_MPC_SET_MUXB1));
dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n",
RREG32(mmUVD_MPC_SET_MUX));
dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n",
RREG32(mmUVD_MPC_SET_ALU));
dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_OFFSET0));
dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_SIZE0));
dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_OFFSET1));
dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_SIZE1));
dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_OFFSET2));
dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n",
RREG32(mmUVD_VCPU_CACHE_SIZE2));
dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n",
RREG32(mmUVD_VCPU_CNTL));
dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n",
RREG32(mmUVD_SOFT_RESET));
dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n",
RREG32(mmUVD_RBC_IB_SIZE));
dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n",
RREG32(mmUVD_RBC_RB_RPTR));
dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n",
RREG32(mmUVD_RBC_RB_WPTR));
dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n",
RREG32(mmUVD_RBC_RB_WPTR_CNTL));
dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n",
RREG32(mmUVD_RBC_RB_CNTL));
dev_info(adev->dev, " UVD_STATUS=0x%08X\n",
RREG32(mmUVD_STATUS));
dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n",
RREG32(mmUVD_SEMA_TIMEOUT_STATUS));
dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL));
dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n",
RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL));
dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n",
RREG32(mmUVD_CONTEXT_ID));
dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_ADDR_CONFIG));
dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
}
static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@ -978,25 +726,146 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
{
uint32_t data, data1, data2, suvd_flags;
data = RREG32(mmUVD_CGC_CTRL);
data1 = RREG32(mmUVD_SUVD_CGC_GATE);
data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK;
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
UVD_CGC_CTRL__SYS_MODE_MASK |
UVD_CGC_CTRL__UDEC_MODE_MASK |
UVD_CGC_CTRL__MPEG2_MODE_MASK |
UVD_CGC_CTRL__REGS_MODE_MASK |
UVD_CGC_CTRL__RBC_MODE_MASK |
UVD_CGC_CTRL__LMI_MC_MODE_MASK |
UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
UVD_CGC_CTRL__IDCT_MODE_MASK |
UVD_CGC_CTRL__MPRD_MODE_MASK |
UVD_CGC_CTRL__MPC_MODE_MASK |
UVD_CGC_CTRL__LBSI_MODE_MASK |
UVD_CGC_CTRL__LRBBM_MODE_MASK |
UVD_CGC_CTRL__WCB_MODE_MASK |
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__JPEG_MODE_MASK |
UVD_CGC_CTRL__SCPU_MODE_MASK |
UVD_CGC_CTRL__JPEG2_MODE_MASK);
data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
data1 |= suvd_flags;
WREG32(mmUVD_CGC_CTRL, data);
WREG32(mmUVD_CGC_GATE, 0);
WREG32(mmUVD_SUVD_CGC_GATE, data1);
WREG32(mmUVD_SUVD_CGC_CTRL, data2);
}
#if 0
static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
{
uint32_t data, data1, cgc_flags, suvd_flags;
data = RREG32(mmUVD_CGC_GATE);
data1 = RREG32(mmUVD_SUVD_CGC_GATE);
cgc_flags = UVD_CGC_GATE__SYS_MASK |
UVD_CGC_GATE__UDEC_MASK |
UVD_CGC_GATE__MPEG2_MASK |
UVD_CGC_GATE__RBC_MASK |
UVD_CGC_GATE__LMI_MC_MASK |
UVD_CGC_GATE__IDCT_MASK |
UVD_CGC_GATE__MPRD_MASK |
UVD_CGC_GATE__MPC_MASK |
UVD_CGC_GATE__LBSI_MASK |
UVD_CGC_GATE__LRBBM_MASK |
UVD_CGC_GATE__UDEC_RE_MASK |
UVD_CGC_GATE__UDEC_CM_MASK |
UVD_CGC_GATE__UDEC_IT_MASK |
UVD_CGC_GATE__UDEC_DB_MASK |
UVD_CGC_GATE__UDEC_MP_MASK |
UVD_CGC_GATE__WCB_MASK |
UVD_CGC_GATE__VCPU_MASK |
UVD_CGC_GATE__SCPU_MASK |
UVD_CGC_GATE__JPEG_MASK |
UVD_CGC_GATE__JPEG2_MASK;
suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK;
data |= cgc_flags;
data1 |= suvd_flags;
WREG32(mmUVD_CGC_GATE, data);
WREG32(mmUVD_SUVD_CGC_GATE, data1);
}
#endif
static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable)
{
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
if (enable)
tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
else
tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
}
static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
static int curstate = -1;
if (adev->asic_type == CHIP_FIJI)
uvd_v6_set_bypass_mode(adev, enable);
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
if (curstate == state)
return 0;
curstate = state;
if (enable) {
if (adev->flags & AMD_IS_APU)
cz_set_uvd_clock_gating_branches(adev, enable);
else
tonga_set_uvd_clock_gating_branches(adev, enable);
uvd_v6_0_set_uvd_dynamic_clock_mode(adev, true);
/* disable HW gating and enable Sw gating */
uvd_v6_0_set_sw_clock_gating(adev);
} else {
uint32_t data = RREG32(mmUVD_CGC_CTRL);
data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
WREG32(mmUVD_CGC_CTRL, data);
/* wait for STATUS to clear */
if (uvd_v6_0_wait_for_idle(handle))
return -EBUSY;
/* enable HW gates because UVD is idle */
/* uvd_v6_0_set_hw_clock_gating(adev); */
}
return 0;
@ -1037,7 +906,6 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.is_idle = uvd_v6_0_is_idle,
.wait_for_idle = uvd_v6_0_wait_for_idle,
.soft_reset = uvd_v6_0_soft_reset,
.print_status = uvd_v6_0_print_status,
.set_clockgating_state = uvd_v6_0_set_clockgating_state,
.set_powergating_state = uvd_v6_0_set_powergating_state,
};

View File

@ -44,7 +44,7 @@
static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
static int vce_v2_0_wait_for_idle(void *handle);
/**
* vce_v2_0_ring_get_rptr - get read pointer
*
@ -201,14 +201,14 @@ static int vce_v2_0_sw_init(void *handle)
ring = &adev->vce.ring[0];
sprintf(ring->name, "vce0");
r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
&adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
if (r)
return r;
ring = &adev->vce.ring[1];
sprintf(ring->name, "vce1");
r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
&adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
if (r)
return r;
@ -240,7 +240,8 @@ static int vce_v2_0_hw_init(void *handle)
r = vce_v2_0_start(adev);
if (r)
return r;
/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
return 0;
ring = &adev->vce.ring[0];
ring->ready = true;
@ -339,6 +340,21 @@ static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
{
u32 orig, tmp;
if (gated) {
if (vce_v2_0_wait_for_idle(adev)) {
DRM_INFO("VCE is busy, Can't set clock gateing");
return;
}
WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
mdelay(100);
WREG32(mmVCE_STATUS, 0);
} else {
WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
mdelay(100);
}
tmp = RREG32(mmVCE_CLOCK_GATING_B);
tmp &= ~0x00060006;
if (gated) {
@ -362,6 +378,7 @@ static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
if (gated)
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
}
static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
@ -478,75 +495,6 @@ static int vce_v2_0_soft_reset(void *handle)
return vce_v2_0_start(adev);
}
static void vce_v2_0_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "VCE 2.0 registers\n");
dev_info(adev->dev, " VCE_STATUS=0x%08X\n",
RREG32(mmVCE_STATUS));
dev_info(adev->dev, " VCE_VCPU_CNTL=0x%08X\n",
RREG32(mmVCE_VCPU_CNTL));
dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
RREG32(mmVCE_VCPU_CACHE_OFFSET0));
dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE0=0x%08X\n",
RREG32(mmVCE_VCPU_CACHE_SIZE0));
dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
RREG32(mmVCE_VCPU_CACHE_OFFSET1));
dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE1=0x%08X\n",
RREG32(mmVCE_VCPU_CACHE_SIZE1));
dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
RREG32(mmVCE_VCPU_CACHE_OFFSET2));
dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE2=0x%08X\n",
RREG32(mmVCE_VCPU_CACHE_SIZE2));
dev_info(adev->dev, " VCE_SOFT_RESET=0x%08X\n",
RREG32(mmVCE_SOFT_RESET));
dev_info(adev->dev, " VCE_RB_BASE_LO2=0x%08X\n",
RREG32(mmVCE_RB_BASE_LO2));
dev_info(adev->dev, " VCE_RB_BASE_HI2=0x%08X\n",
RREG32(mmVCE_RB_BASE_HI2));
dev_info(adev->dev, " VCE_RB_SIZE2=0x%08X\n",
RREG32(mmVCE_RB_SIZE2));
dev_info(adev->dev, " VCE_RB_RPTR2=0x%08X\n",
RREG32(mmVCE_RB_RPTR2));
dev_info(adev->dev, " VCE_RB_WPTR2=0x%08X\n",
RREG32(mmVCE_RB_WPTR2));
dev_info(adev->dev, " VCE_RB_BASE_LO=0x%08X\n",
RREG32(mmVCE_RB_BASE_LO));
dev_info(adev->dev, " VCE_RB_BASE_HI=0x%08X\n",
RREG32(mmVCE_RB_BASE_HI));
dev_info(adev->dev, " VCE_RB_SIZE=0x%08X\n",
RREG32(mmVCE_RB_SIZE));
dev_info(adev->dev, " VCE_RB_RPTR=0x%08X\n",
RREG32(mmVCE_RB_RPTR));
dev_info(adev->dev, " VCE_RB_WPTR=0x%08X\n",
RREG32(mmVCE_RB_WPTR));
dev_info(adev->dev, " VCE_CLOCK_GATING_A=0x%08X\n",
RREG32(mmVCE_CLOCK_GATING_A));
dev_info(adev->dev, " VCE_CLOCK_GATING_B=0x%08X\n",
RREG32(mmVCE_CLOCK_GATING_B));
dev_info(adev->dev, " VCE_CGTT_CLK_OVERRIDE=0x%08X\n",
RREG32(mmVCE_CGTT_CLK_OVERRIDE));
dev_info(adev->dev, " VCE_UENC_CLOCK_GATING=0x%08X\n",
RREG32(mmVCE_UENC_CLOCK_GATING));
dev_info(adev->dev, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
RREG32(mmVCE_UENC_REG_CLOCK_GATING));
dev_info(adev->dev, " VCE_SYS_INT_EN=0x%08X\n",
RREG32(mmVCE_SYS_INT_EN));
dev_info(adev->dev, " VCE_LMI_CTRL2=0x%08X\n",
RREG32(mmVCE_LMI_CTRL2));
dev_info(adev->dev, " VCE_LMI_CTRL=0x%08X\n",
RREG32(mmVCE_LMI_CTRL));
dev_info(adev->dev, " VCE_LMI_VM_CTRL=0x%08X\n",
RREG32(mmVCE_LMI_VM_CTRL));
dev_info(adev->dev, " VCE_LMI_SWAP_CNTL=0x%08X\n",
RREG32(mmVCE_LMI_SWAP_CNTL));
dev_info(adev->dev, " VCE_LMI_SWAP_CNTL1=0x%08X\n",
RREG32(mmVCE_LMI_SWAP_CNTL1));
dev_info(adev->dev, " VCE_LMI_CACHE_CTRL=0x%08X\n",
RREG32(mmVCE_LMI_CACHE_CTRL));
}
static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@ -630,7 +578,6 @@ const struct amd_ip_funcs vce_v2_0_ip_funcs = {
.is_idle = vce_v2_0_is_idle,
.wait_for_idle = vce_v2_0_wait_for_idle,
.soft_reset = vce_v2_0_soft_reset,
.print_status = vce_v2_0_print_status,
.set_clockgating_state = vce_v2_0_set_clockgating_state,
.set_powergating_state = vce_v2_0_set_powergating_state,
};

View File

@ -315,9 +315,11 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
{
u32 tmp;
/* Fiji, Stoney are single pipe */
/* Fiji, Stoney, Polaris10, Polaris11 are single pipe */
if ((adev->asic_type == CHIP_FIJI) ||
(adev->asic_type == CHIP_STONEY))
(adev->asic_type == CHIP_STONEY) ||
(adev->asic_type == CHIP_POLARIS10) ||
(adev->asic_type == CHIP_POLARIS11))
return AMDGPU_VCE_HARVEST_VCE1;
/* Tonga and CZ are dual or single pipe */
@ -381,14 +383,14 @@ static int vce_v3_0_sw_init(void *handle)
ring = &adev->vce.ring[0];
sprintf(ring->name, "vce0");
r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
&adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
if (r)
return r;
ring = &adev->vce.ring[1];
sprintf(ring->name, "vce1");
r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
&adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
if (r)
return r;
@ -564,73 +566,6 @@ static int vce_v3_0_soft_reset(void *handle)
return vce_v3_0_start(adev);
}
static void vce_v3_0_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "VCE 3.0 registers\n");
dev_info(adev->dev, " VCE_STATUS=0x%08X\n",
RREG32(mmVCE_STATUS));
dev_info(adev->dev, " VCE_VCPU_CNTL=0x%08X\n",
RREG32(mmVCE_VCPU_CNTL));
dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
RREG32(mmVCE_VCPU_CACHE_OFFSET0));
dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE0=0x%08X\n",
RREG32(mmVCE_VCPU_CACHE_SIZE0));
dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
RREG32(mmVCE_VCPU_CACHE_OFFSET1));
dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE1=0x%08X\n",
RREG32(mmVCE_VCPU_CACHE_SIZE1));
dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
RREG32(mmVCE_VCPU_CACHE_OFFSET2));
dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE2=0x%08X\n",
RREG32(mmVCE_VCPU_CACHE_SIZE2));
dev_info(adev->dev, " VCE_SOFT_RESET=0x%08X\n",
RREG32(mmVCE_SOFT_RESET));
dev_info(adev->dev, " VCE_RB_BASE_LO2=0x%08X\n",
RREG32(mmVCE_RB_BASE_LO2));
dev_info(adev->dev, " VCE_RB_BASE_HI2=0x%08X\n",
RREG32(mmVCE_RB_BASE_HI2));
dev_info(adev->dev, " VCE_RB_SIZE2=0x%08X\n",
RREG32(mmVCE_RB_SIZE2));
dev_info(adev->dev, " VCE_RB_RPTR2=0x%08X\n",
RREG32(mmVCE_RB_RPTR2));
dev_info(adev->dev, " VCE_RB_WPTR2=0x%08X\n",
RREG32(mmVCE_RB_WPTR2));
dev_info(adev->dev, " VCE_RB_BASE_LO=0x%08X\n",
RREG32(mmVCE_RB_BASE_LO));
dev_info(adev->dev, " VCE_RB_BASE_HI=0x%08X\n",
RREG32(mmVCE_RB_BASE_HI));
dev_info(adev->dev, " VCE_RB_SIZE=0x%08X\n",
RREG32(mmVCE_RB_SIZE));
dev_info(adev->dev, " VCE_RB_RPTR=0x%08X\n",
RREG32(mmVCE_RB_RPTR));
dev_info(adev->dev, " VCE_RB_WPTR=0x%08X\n",
RREG32(mmVCE_RB_WPTR));
dev_info(adev->dev, " VCE_CLOCK_GATING_A=0x%08X\n",
RREG32(mmVCE_CLOCK_GATING_A));
dev_info(adev->dev, " VCE_CLOCK_GATING_B=0x%08X\n",
RREG32(mmVCE_CLOCK_GATING_B));
dev_info(adev->dev, " VCE_UENC_CLOCK_GATING=0x%08X\n",
RREG32(mmVCE_UENC_CLOCK_GATING));
dev_info(adev->dev, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
RREG32(mmVCE_UENC_REG_CLOCK_GATING));
dev_info(adev->dev, " VCE_SYS_INT_EN=0x%08X\n",
RREG32(mmVCE_SYS_INT_EN));
dev_info(adev->dev, " VCE_LMI_CTRL2=0x%08X\n",
RREG32(mmVCE_LMI_CTRL2));
dev_info(adev->dev, " VCE_LMI_CTRL=0x%08X\n",
RREG32(mmVCE_LMI_CTRL));
dev_info(adev->dev, " VCE_LMI_VM_CTRL=0x%08X\n",
RREG32(mmVCE_LMI_VM_CTRL));
dev_info(adev->dev, " VCE_LMI_SWAP_CNTL=0x%08X\n",
RREG32(mmVCE_LMI_SWAP_CNTL));
dev_info(adev->dev, " VCE_LMI_SWAP_CNTL1=0x%08X\n",
RREG32(mmVCE_LMI_SWAP_CNTL1));
dev_info(adev->dev, " VCE_LMI_CACHE_CTRL=0x%08X\n",
RREG32(mmVCE_LMI_CACHE_CTRL));
}
static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@ -750,7 +685,6 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.is_idle = vce_v3_0_is_idle,
.wait_for_idle = vce_v3_0_wait_for_idle,
.soft_reset = vce_v3_0_soft_reset,
.print_status = vce_v3_0_print_status,
.set_clockgating_state = vce_v3_0_set_clockgating_state,
.set_powergating_state = vce_v3_0_set_powergating_state,
};

View File

@ -78,6 +78,11 @@
#include "amdgpu_acp.h"
#endif
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
/*
* Indirect registers accessor
*/
@ -276,6 +281,8 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
stoney_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
default:
break;
}
@ -414,11 +421,11 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
{mmGB_MACROTILE_MODE7, true},
};
static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
{mmGB_TILE_MODE7, true},
{mmGB_TILE_MODE12, true},
{mmGB_TILE_MODE17, true},
@ -426,7 +433,7 @@ static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
{mmGB_MACROTILE_MODE7, true},
};
static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
{mmGRBM_STATUS, false},
{mmGRBM_STATUS2, false},
{mmGRBM_STATUS_SE0, false},
@ -525,8 +532,8 @@ static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value)
{
struct amdgpu_allowed_register_entry *asic_register_table = NULL;
struct amdgpu_allowed_register_entry *asic_register_entry;
const struct amdgpu_allowed_register_entry *asic_register_table = NULL;
const struct amdgpu_allowed_register_entry *asic_register_entry;
uint32_t size, i;
*value = 0;
@ -537,6 +544,8 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
break;
case CHIP_FIJI:
case CHIP_TONGA:
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_CARRIZO:
case CHIP_STONEY:
asic_register_table = cz_allowed_read_registers;
@ -907,6 +916,74 @@ static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
},
};
static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
{
/* ORDER MATTERS! */
{
.type = AMD_IP_BLOCK_TYPE_COMMON,
.major = 2,
.minor = 0,
.rev = 0,
.funcs = &vi_common_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_GMC,
.major = 8,
.minor = 1,
.rev = 0,
.funcs = &gmc_v8_0_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_IH,
.major = 3,
.minor = 1,
.rev = 0,
.funcs = &tonga_ih_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_SMC,
.major = 7,
.minor = 2,
.rev = 0,
.funcs = &amdgpu_pp_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 11,
.minor = 2,
.rev = 0,
.funcs = &dce_v11_0_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_GFX,
.major = 8,
.minor = 0,
.rev = 0,
.funcs = &gfx_v8_0_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 3,
.minor = 1,
.rev = 0,
.funcs = &sdma_v3_0_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_UVD,
.major = 6,
.minor = 3,
.rev = 0,
.funcs = &uvd_v6_0_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_VCE,
.major = 3,
.minor = 4,
.rev = 0,
.funcs = &vce_v3_0_ip_funcs,
},
};
static const struct amdgpu_ip_block_version cz_ip_blocks[] =
{
/* ORDER MATTERS! */
@ -999,6 +1076,11 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_blocks = tonga_ip_blocks;
adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
adev->ip_blocks = polaris11_ip_blocks;
adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
adev->ip_blocks = cz_ip_blocks;
@ -1076,19 +1158,68 @@ static int vi_common_early_init(void *handle)
adev->external_rev_id = 0x1;
break;
case CHIP_FIJI:
adev->cg_flags = 0;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_CGTS |
AMD_CG_SUPPORT_GFX_CGTS_LS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_ROM_MGCG |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x3c;
break;
case CHIP_TONGA:
adev->cg_flags = 0;
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_POLARIS11:
adev->cg_flags = 0;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x5A;
break;
case CHIP_POLARIS10:
adev->cg_flags = 0;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x50;
break;
case CHIP_CARRIZO:
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_CGTS |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGTS_LS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x1;
break;
case CHIP_STONEY:
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x1;
break;
default:
@ -1164,24 +1295,19 @@ static int vi_common_wait_for_idle(void *handle)
return 0;
}
static void vi_common_print_status(void *handle)
{
return;
}
static int vi_common_soft_reset(void *handle)
{
return 0;
}
static void fiji_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
temp = data = RREG32_PCIE(ixPCIE_CNTL2);
if (enable)
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
@ -1194,14 +1320,14 @@ static void fiji_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
WREG32_PCIE(ixPCIE_CNTL2, data);
}
static void fiji_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
if (enable)
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
else
data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
@ -1210,14 +1336,14 @@ static void fiji_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev
WREG32(mmHDP_HOST_PATH_CNTL, data);
}
static void fiji_update_hdp_light_sleep(struct amdgpu_device *adev,
static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
temp = data = RREG32(mmHDP_MEM_POWER_LS);
if (enable)
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
else
data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
@ -1226,14 +1352,14 @@ static void fiji_update_hdp_light_sleep(struct amdgpu_device *adev,
WREG32(mmHDP_MEM_POWER_LS, data);
}
static void fiji_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
if (enable)
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
else
@ -1251,13 +1377,22 @@ static int vi_common_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_FIJI:
fiji_update_bif_medium_grain_light_sleep(adev,
vi_update_bif_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
fiji_update_hdp_medium_grain_clock_gating(adev,
vi_update_hdp_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
fiji_update_hdp_light_sleep(adev,
vi_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
fiji_update_rom_medium_grain_clock_gating(adev,
vi_update_rom_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
vi_update_bif_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
vi_update_hdp_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
vi_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
default:
@ -1284,7 +1419,6 @@ const struct amd_ip_funcs vi_common_ip_funcs = {
.is_idle = vi_common_is_idle,
.wait_for_idle = vi_common_wait_for_idle,
.soft_reset = vi_common_soft_reset,
.print_status = vi_common_print_status,
.set_clockgating_state = vi_common_set_clockgating_state,
.set_powergating_state = vi_common_set_powergating_state,
};

View File

@ -48,6 +48,8 @@ enum amd_asic_type {
CHIP_FIJI,
CHIP_CARRIZO,
CHIP_STONEY,
CHIP_POLARIS10,
CHIP_POLARIS11,
CHIP_LAST,
};
@ -104,6 +106,7 @@ enum amd_powergating_state {
#define AMD_CG_SUPPORT_VCE_MGCG (1 << 14)
#define AMD_CG_SUPPORT_HDP_LS (1 << 15)
#define AMD_CG_SUPPORT_HDP_MGCG (1 << 16)
#define AMD_CG_SUPPORT_ROM_MGCG (1 << 17)
/* PG flags */
#define AMD_PG_SUPPORT_GFX_PG (1 << 0)
@ -162,8 +165,6 @@ struct amd_ip_funcs {
int (*wait_for_idle)(void *handle);
/* soft reset the IP block */
int (*soft_reset)(void *handle);
/* dump the IP block status registers */
void (*print_status)(void *handle);
/* enable/disable cg for the IP block */
int (*set_clockgating_state)(void *handle,
enum amd_clockgating_state state);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1391,6 +1391,8 @@
#define mmRLC_CGTT_MGCG_OVERRIDE 0xec48
#define mmRLC_CGCG_CGLS_CTRL 0xec49
#define mmRLC_CGCG_RAMP_CTRL 0xec4a
#define mmRLC_CGCG_CGLS_CTRL_3D 0xec9d
#define mmRLC_CGCG_RAMP_CTRL_3D 0xec9e
#define mmRLC_DYN_PG_STATUS 0xec4b
#define mmRLC_DYN_PG_REQUEST 0xec4c
#define mmRLC_PG_DELAY 0xec4d

View File

@ -111,5 +111,6 @@
#define mmUVD_MIF_RECON1_ADDR_CONFIG 0x39c5
#define ixUVD_MIF_SCLR_ADDR_CONFIG 0x4
#define mmUVD_JPEG_ADDR_CONFIG 0x3a1f
#define mmUVD_GP_SCRATCH4 0x3d38
#endif /* UVD_6_0_D_H */

View File

@ -79,9 +79,23 @@
#define ATOM_PPLL0 2
#define ATOM_PPLL3 3
#define ATOM_PHY_PLL0 4
#define ATOM_PHY_PLL1 5
#define ATOM_EXT_PLL1 8
#define ATOM_GCK_DFS 8
#define ATOM_EXT_PLL2 9
#define ATOM_FCH_CLK 9
#define ATOM_EXT_CLOCK 10
#define ATOM_DP_DTO 11
#define ATOM_COMBOPHY_PLL0 20
#define ATOM_COMBOPHY_PLL1 21
#define ATOM_COMBOPHY_PLL2 22
#define ATOM_COMBOPHY_PLL3 23
#define ATOM_COMBOPHY_PLL4 24
#define ATOM_COMBOPHY_PLL5 25
#define ATOM_PPLL_INVALID 0xFF
#define ENCODER_REFCLK_SRC_P1PLL 0
@ -224,6 +238,31 @@ typedef struct _ATOM_ROM_HEADER
UCHAR ucReserved;
}ATOM_ROM_HEADER;
typedef struct _ATOM_ROM_HEADER_V2_1
{
ATOM_COMMON_TABLE_HEADER sHeader;
UCHAR uaFirmWareSignature[4]; //Signature to distinguish between Atombios and non-atombios,
//atombios should init it as "ATOM", don't change the position
USHORT usBiosRuntimeSegmentAddress;
USHORT usProtectedModeInfoOffset;
USHORT usConfigFilenameOffset;
USHORT usCRC_BlockOffset;
USHORT usBIOS_BootupMessageOffset;
USHORT usInt10Offset;
USHORT usPciBusDevInitCode;
USHORT usIoBaseAddress;
USHORT usSubsystemVendorID;
USHORT usSubsystemID;
USHORT usPCI_InfoOffset;
USHORT usMasterCommandTableOffset;//Offest for SW to get all command table offsets, Don't change the position
USHORT usMasterDataTableOffset; //Offest for SW to get all data table offsets, Don't change the position
UCHAR ucExtendedFunctionCode;
UCHAR ucReserved;
ULONG ulPSPDirTableOffset;
}ATOM_ROM_HEADER_V2_1;
//==============================Command Table Portion====================================
@ -272,12 +311,12 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
USHORT GetSCLKOverMCLKRatio; //Atomic Table, only used by Bios
USHORT SetCRTC_Timing; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT SetCRTC_OverScan; //Atomic Table, used by various SW components,latest version 1.1
USHORT SetCRTC_Replication; //Atomic Table, used only by Bios
USHORT GetSMUClockInfo; //Atomic Table, used only by Bios
USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios
USHORT UpdateCRTC_DoubleBufferRegisters; //Atomic Table, used only by Bios
USHORT LUT_AutoFill; //Atomic Table, only used by Bios
USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios
USHORT SetDCEClock; //Atomic Table, start from DCE11.1, shared by driver and VBIOS, change DISPCLK and DPREFCLK
USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT GetEngineClock; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT SetCRTC_UsingDTDTiming; //Atomic Table, directly used by various SW components,latest version 1.1
@ -292,7 +331,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
USHORT PowerConnectorDetection; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT MC_Synchronization; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
USHORT ComputeMemoryEnginePLL; //Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock
USHORT MemoryRefreshConversion; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock
USHORT Gfx_Init; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock
USHORT VRAM_GetCurrentInfoBlock; //Atomic Table, used only by Bios
USHORT DynamicMemorySettings; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
USHORT MemoryTraining; //Atomic Table, used only by Bios
@ -333,6 +372,10 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
#define LCD1OutputControl HW_Misc_Operation
#define TV1OutputControl Gfx_Harvesting
#define TVEncoderControl SMC_Init
#define EnableHW_IconCursor SetDCEClock
#define SetCRTC_Replication GetSMUClockInfo
#define MemoryRefreshConversion Gfx_Init
typedef struct _ATOM_MASTER_COMMAND_TABLE
{
@ -425,6 +468,9 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2
#define b3FIRST_TIME_CHANGE_CLOCK 0x08 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
#define b3SKIP_SW_PROGRAM_PLL 0x10 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
#define b3DRAM_SELF_REFRESH_EXIT 0x20 //Applicable to DRAM self refresh exit only. when set, it means it will go to program DRAM self refresh exit path
#define b3SRIOV_INIT_BOOT 0x40 //Use by HV GPU driver only, to load uCode. for ASIC_InitTable SCLK parameter only
#define b3SRIOV_LOAD_UCODE 0x40 //Use by HV GPU driver only, to load uCode. for ASIC_InitTable SCLK parameter only
#define b3SRIOV_SKIP_ASIC_INIT 0x02 //Use by HV GPU driver only, skip ASIC_Init for primary adapter boot. for ASIC_InitTable SCLK parameter only
typedef struct _ATOM_COMPUTE_CLOCK_FREQ
{
@ -518,6 +564,33 @@ typedef struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6
//ucPllCntlFlag
#define SPLL_CNTL_FLAG_VCO_MODE_MASK 0x03
typedef struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_7
{
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
ULONG ulReserved[5];
}COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_7;
//ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag
#define COMPUTE_GPUCLK_INPUT_FLAG_CLK_TYPE_MASK 0x0f
#define COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK 0x00
#define COMPUTE_GPUCLK_INPUT_FLAG_SCLK 0x01
typedef struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7
{
COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock; //Output Parameter: ucPostDiv=DFS divider
USHORT usSclk_fcw_frac; //fractional divider of fcw = usSclk_fcw_frac/65536
USHORT usSclk_fcw_int; //integer divider of fcwc
UCHAR ucSclkPostDiv; //PLL post divider = 2^ucSclkPostDiv
UCHAR ucSclkVcoMode; //0: 4G~8Ghz, 1:3G~6Ghz,3: 2G~4Ghz, 2:Reserved
UCHAR ucSclkPllRange; //GreenTable SCLK PLL range entry index ( 0~7 )
UCHAR ucSscEnable;
USHORT usSsc_fcw1_frac; //fcw1_frac when SSC enable
USHORT usSsc_fcw1_int; //fcw1_int when SSC enable
USHORT usReserved;
USHORT usPcc_fcw_int;
USHORT usSsc_fcw_slew_frac; //fcw_slew_frac when SSC enable
USHORT usPcc_fcw_slew_frac;
}COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7;
// ucInputFlag
#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
@ -557,12 +630,16 @@ typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2
ULONG ulReserved;
}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2;
//Input parameter of DynamicMemorySettingsTable
//when ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag = COMPUTE_MEMORY_PLL_PARAM
typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
{
ATOM_COMPUTE_CLOCK_FREQ ulClock;
ULONG ulReserved[2];
}DYNAMICE_MEMORY_SETTINGS_PARAMETER;
//Input parameter of DynamicMemorySettingsTable
//when ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag == COMPUTE_ENGINE_PLL_PARAM
typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
{
ATOM_COMPUTE_CLOCK_FREQ ulClock;
@ -570,6 +647,29 @@ typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
ULONG ulReserved;
}DYNAMICE_ENGINE_SETTINGS_PARAMETER;
//Input parameter of DynamicMemorySettingsTable ver2.1 and above
//when ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag == ADJUST_MC_SETTING_PARAM
typedef struct _DYNAMICE_MC_DPM_SETTINGS_PARAMETER
{
ATOM_COMPUTE_CLOCK_FREQ ulClock;
UCHAR ucMclkDPMState;
UCHAR ucReserved[3];
ULONG ulReserved;
}DYNAMICE_MC_DPM_SETTINGS_PARAMETER;
//ucMclkDPMState
#define DYNAMIC_MC_DPM_SETTING_LOW_DPM_STATE 0
#define DYNAMIC_MC_DPM_SETTING_MEDIUM_DPM_STATE 1
#define DYNAMIC_MC_DPM_SETTING_HIGH_DPM_STATE 2
typedef union _DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1
{
DYNAMICE_MEMORY_SETTINGS_PARAMETER asMCReg;
DYNAMICE_ENGINE_SETTINGS_PARAMETER asMCArbReg;
DYNAMICE_MC_DPM_SETTINGS_PARAMETER asDPMMCReg;
}DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1;
/****************************************************************************/
// Structures used by SetEngineClockTable
/****************************************************************************/
@ -584,6 +684,13 @@ typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION
COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
}SET_ENGINE_CLOCK_PS_ALLOCATION;
typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION_V1_2
{
ULONG ulTargetEngineClock; //In 10Khz unit
COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_7 sReserved;
}SET_ENGINE_CLOCK_PS_ALLOCATION_V1_2;
/****************************************************************************/
// Structures used by SetMemoryClockTable
/****************************************************************************/
@ -827,6 +934,12 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
#define ATOM_ENCODER_CMD_SETUP 0x0f
#define ATOM_ENCODER_CMD_SETUP_PANEL_MODE 0x10
// New Command for DIGxEncoderControlTable v1.5
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN4 0x14
#define ATOM_ENCODER_CMD_STREAM_SETUP 0x0F //change name ATOM_ENCODER_CMD_SETUP
#define ATOM_ENCODER_CMD_LINK_SETUP 0x11 //internal use, called by other Command Table
#define ATOM_ENCODER_CMD_ENCODER_BLANK 0x12 //internal use, called by other Command Table
// ucStatus
#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00
@ -955,6 +1068,69 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
#define DP_PANEL_MODE_INTERNAL_DP2_MODE 0x01
#define DP_PANEL_MODE_INTERNAL_DP1_MODE 0x11
typedef struct _ENCODER_STREAM_SETUP_PARAMETERS_V5
{
UCHAR ucDigId; // 0~6 map to DIG0~DIG6
UCHAR ucAction; // = ATOM_ENOCODER_CMD_STREAM_SETUP
UCHAR ucDigMode; // ATOM_ENCODER_MODE_DP/ATOM_ENCODER_MODE_DVI/ATOM_ENCODER_MODE_HDMI
UCHAR ucLaneNum; // Lane number
ULONG ulPixelClock; // Pixel Clock in 10Khz
UCHAR ucBitPerColor;
UCHAR ucLinkRateIn270Mhz;//= DP link rate/270Mhz, =6: 1.62G = 10: 2.7G, =20: 5.4Ghz, =30: 8.1Ghz etc
UCHAR ucReserved[2];
}ENCODER_STREAM_SETUP_PARAMETERS_V5;
typedef struct _ENCODER_LINK_SETUP_PARAMETERS_V5
{
UCHAR ucDigId; // 0~6 map to DIG0~DIG6
UCHAR ucAction; // = ATOM_ENOCODER_CMD_LINK_SETUP
UCHAR ucDigMode; // ATOM_ENCODER_MODE_DP/ATOM_ENCODER_MODE_DVI/ATOM_ENCODER_MODE_HDMI
UCHAR ucLaneNum; // Lane number
ULONG ulSymClock; // Symbol Clock in 10Khz
UCHAR ucHPDSel;
UCHAR ucDigEncoderSel; // DIG stream( front-end ) selection, bit0 means DIG0 FE is enable,
UCHAR ucReserved[2];
}ENCODER_LINK_SETUP_PARAMETERS_V5;
typedef struct _DP_PANEL_MODE_SETUP_PARAMETERS_V5
{
UCHAR ucDigId; // 0~6 map to DIG0~DIG6
UCHAR ucAction; // = ATOM_ENCODER_CMD_DPLINK_SETUP
UCHAR ucPanelMode; // =0: external DP
// =0x1: internal DP2
// =0x11: internal DP1 NutMeg/Travis DP Translator
UCHAR ucReserved;
ULONG ulReserved[2];
}DP_PANEL_MODE_SETUP_PARAMETERS_V5;
typedef struct _ENCODER_GENERIC_CMD_PARAMETERS_V5
{
UCHAR ucDigId; // 0~6 map to DIG0~DIG6
UCHAR ucAction; // = rest of generic encoder command which does not carry any parameters
UCHAR ucReserved[2];
ULONG ulReserved[2];
}ENCODER_GENERIC_CMD_PARAMETERS_V5;
//ucDigId
#define ATOM_ENCODER_CONFIG_V5_DIG0_ENCODER 0x00
#define ATOM_ENCODER_CONFIG_V5_DIG1_ENCODER 0x01
#define ATOM_ENCODER_CONFIG_V5_DIG2_ENCODER 0x02
#define ATOM_ENCODER_CONFIG_V5_DIG3_ENCODER 0x03
#define ATOM_ENCODER_CONFIG_V5_DIG4_ENCODER 0x04
#define ATOM_ENCODER_CONFIG_V5_DIG5_ENCODER 0x05
#define ATOM_ENCODER_CONFIG_V5_DIG6_ENCODER 0x06
typedef union _DIG_ENCODER_CONTROL_PARAMETERS_V5
{
ENCODER_GENERIC_CMD_PARAMETERS_V5 asCmdParam;
ENCODER_STREAM_SETUP_PARAMETERS_V5 asStreamParam;
ENCODER_LINK_SETUP_PARAMETERS_V5 asLinkParam;
DP_PANEL_MODE_SETUP_PARAMETERS_V5 asDPPanelModeParam;
}DIG_ENCODER_CONTROL_PARAMETERS_V5;
/****************************************************************************/
// Structures used by UNIPHYTransmitterControlTable
// LVTMATransmitterControlTable
@ -1371,6 +1547,49 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5 DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6
{
UCHAR ucPhyId; // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF
UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_xxx
union
{
UCHAR ucDigMode; // ATOM_ENCODER_MODE_DP/ATOM_ENCODER_MODE_DVI/ATOM_ENCODER_MODE_HDMI
UCHAR ucDPLaneSet; // DP voltage swing and pre-emphasis value defined in DPCD DP_LANE_SET, "DP_LANE_SET__xDB_y_zV"
};
UCHAR ucLaneNum; // Lane number
ULONG ulSymClock; // Symbol Clock in 10Khz
UCHAR ucHPDSel; // =1: HPD1, =2: HPD2, .... =6: HPD6, =0: HPD is not assigned
UCHAR ucDigEncoderSel; // DIG stream( front-end ) selection, bit0 means DIG0 FE is enable,
UCHAR ucConnObjId; // Connector Object Id defined in ObjectId.h
UCHAR ucReserved;
ULONG ulReserved;
}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6;
// ucDigEncoderSel
#define ATOM_TRANMSITTER_V6__DIGA_SEL 0x01
#define ATOM_TRANMSITTER_V6__DIGB_SEL 0x02
#define ATOM_TRANMSITTER_V6__DIGC_SEL 0x04
#define ATOM_TRANMSITTER_V6__DIGD_SEL 0x08
#define ATOM_TRANMSITTER_V6__DIGE_SEL 0x10
#define ATOM_TRANMSITTER_V6__DIGF_SEL 0x20
#define ATOM_TRANMSITTER_V6__DIGG_SEL 0x40
// ucDigMode
#define ATOM_TRANSMITTER_DIGMODE_V6_DP 0
#define ATOM_TRANSMITTER_DIGMODE_V6_DVI 2
#define ATOM_TRANSMITTER_DIGMODE_V6_HDMI 3
#define ATOM_TRANSMITTER_DIGMODE_V6_DP_MST 5
//ucHPDSel
#define ATOM_TRANSMITTER_V6_NO_HPD_SEL 0x00
#define ATOM_TRANSMITTER_V6_HPD1_SEL 0x01
#define ATOM_TRANSMITTER_V6_HPD2_SEL 0x02
#define ATOM_TRANSMITTER_V6_HPD3_SEL 0x03
#define ATOM_TRANSMITTER_V6_HPD4_SEL 0x04
#define ATOM_TRANSMITTER_V6_HPD5_SEL 0x05
#define ATOM_TRANSMITTER_V6_HPD6_SEL 0x06
/****************************************************************************/
// Structures used by ExternalEncoderControlTable V1.3
@ -1784,6 +2003,101 @@ typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3
PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput;
}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3;
typedef struct _PIXEL_CLOCK_PARAMETERS_V7
{
ULONG ulPixelClock; // target the pixel clock to drive the CRTC timing in unit of 100Hz.
UCHAR ucPpll; // ATOM_PHY_PLL0/ATOM_PHY_PLL1/ATOM_PPLL0
UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h,
// indicate which graphic encoder will be used.
UCHAR ucEncoderMode; // Encoder mode:
UCHAR ucMiscInfo; // bit[0]= Force program PLL for pixclk
// bit[1]= Force program PHY PLL only ( internally used by VBIOS only in DP case which PHYPLL is programmed for SYMCLK, not Pixclk )
// bit[5:4]= RefClock source for PPLL.
// =0: XTLAIN( default mode )
// =1: pcie
// =2: GENLK
UCHAR ucCRTC; // ATOM_CRTC1~6, indicate the CRTC controller to
UCHAR ucDeepColorRatio; // HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:36bpp
UCHAR ucReserved[2];
ULONG ulReserved;
}PIXEL_CLOCK_PARAMETERS_V7;
//ucMiscInfo
#define PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL 0x01
#define PIXEL_CLOCK_V7_MISC_PROG_PHYPLL 0x02
#define PIXEL_CLOCK_V7_MISC_YUV420_MODE 0x04
#define PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN 0x08
#define PIXEL_CLOCK_V7_MISC_REF_DIV_SRC 0x30
#define PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN 0x00
#define PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_PCIE 0x10
#define PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK 0x20
//ucDeepColorRatio
#define PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS 0x00 //00 - DCCG_DEEP_COLOR_DTO_DISABLE: Disable Deep Color DTO
#define PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4 0x01 //01 - DCCG_DEEP_COLOR_DTO_5_4_RATIO: Set Deep Color DTO to 5:4
#define PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2 0x02 //02 - DCCG_DEEP_COLOR_DTO_3_2_RATIO: Set Deep Color DTO to 3:2
#define PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1 0x03 //03 - DCCG_DEEP_COLOR_DTO_2_1_RATIO: Set Deep Color DTO to 2:1
// SetDCEClockTable input parameter for DCE11.1
typedef struct _SET_DCE_CLOCK_PARAMETERS_V1_1
{
ULONG ulDISPClkFreq; // target DISPCLK frquency in unit of 10kHz, return real DISPCLK frequency. when ucFlag[1]=1, in unit of 100Hz.
UCHAR ucFlag; // bit0=1: DPREFCLK bypass DFS bit0=0: DPREFCLK not bypass DFS
UCHAR ucCrtc; // use when enable DCCG pixel clock ucFlag[1]=1
UCHAR ucPpllId; // use when enable DCCG pixel clock ucFlag[1]=1
UCHAR ucDeepColorRatio; // use when enable DCCG pixel clock ucFlag[1]=1
}SET_DCE_CLOCK_PARAMETERS_V1_1;
typedef struct _SET_DCE_CLOCK_PS_ALLOCATION_V1_1
{
SET_DCE_CLOCK_PARAMETERS_V1_1 asParam;
ULONG ulReserved[2];
}SET_DCE_CLOCK_PS_ALLOCATION_V1_1;
//SET_DCE_CLOCK_PARAMETERS_V1_1.ucFlag
#define SET_DCE_CLOCK_FLAG_GEN_DPREFCLK 0x01
#define SET_DCE_CLOCK_FLAG_DPREFCLK_BYPASS 0x01
#define SET_DCE_CLOCK_FLAG_ENABLE_PIXCLK 0x02
// SetDCEClockTable input parameter for DCE11.2( POLARIS10 and POLARIS11 ) and above
typedef struct _SET_DCE_CLOCK_PARAMETERS_V2_1
{
ULONG ulDCEClkFreq; // target DCE frequency in unit of 10KHZ, return real DISPCLK/DPREFCLK frequency.
UCHAR ucDCEClkType; // =0: DISPCLK =1: DPREFCLK =2: PIXCLK
UCHAR ucDCEClkSrc; // ATOM_PLL0 or ATOM_GCK_DFS or ATOM_FCH_CLK or ATOM_COMBOPHY_PLLx
UCHAR ucDCEClkFlag; // Bit [1:0] = PPLL ref clock source ( when ucDCEClkSrc= ATOM_PPLL0 )
UCHAR ucCRTC; // ucDisp Pipe Id, ATOM_CRTC0/1/2/..., use only when ucDCEClkType = PIXCLK
}SET_DCE_CLOCK_PARAMETERS_V2_1;
//ucDCEClkType
#define DCE_CLOCK_TYPE_DISPCLK 0
#define DCE_CLOCK_TYPE_DPREFCLK 1
#define DCE_CLOCK_TYPE_PIXELCLK 2 // used by VBIOS internally, called by SetPixelClockTable
//ucDCEClkFlag when ucDCEClkType == DPREFCLK
#define DCE_CLOCK_FLAG_PLL_REFCLK_SRC_MASK 0x03
#define DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENERICA 0x00
#define DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENLK 0x01
#define DCE_CLOCK_FLAG_PLL_REFCLK_SRC_PCIE 0x02
#define DCE_CLOCK_FLAG_PLL_REFCLK_SRC_XTALIN 0x03
//ucDCEClkFlag when ucDCEClkType == PIXCLK
#define DCE_CLOCK_FLAG_PCLK_DEEPCOLOR_RATIO_MASK 0x03
#define DCE_CLOCK_FLAG_PCLK_DEEPCOLOR_RATIO_DIS 0x00 //00 - DCCG_DEEP_COLOR_DTO_DISABLE: Disable Deep Color DTO
#define DCE_CLOCK_FLAG_PCLK_DEEPCOLOR_RATIO_5_4 0x01 //01 - DCCG_DEEP_COLOR_DTO_5_4_RATIO: Set Deep Color DTO to 5:4
#define DCE_CLOCK_FLAG_PCLK_DEEPCOLOR_RATIO_3_2 0x02 //02 - DCCG_DEEP_COLOR_DTO_3_2_RATIO: Set Deep Color DTO to 3:2
#define DCE_CLOCK_FLAG_PCLK_DEEPCOLOR_RATIO_2_1 0x03 //03 - DCCG_DEEP_COLOR_DTO_2_1_RATIO: Set Deep Color DTO to 2:1
#define DCE_CLOCK_FLAG_PIXCLK_YUV420_MODE 0x04
typedef struct _SET_DCE_CLOCK_PS_ALLOCATION_V2_1
{
SET_DCE_CLOCK_PARAMETERS_V2_1 asParam;
ULONG ulReserved[2];
}SET_DCE_CLOCK_PS_ALLOCATION_V2_1;
/****************************************************************************/
// Structures used by AdjustDisplayPllTable
@ -2300,6 +2614,11 @@ typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
#define VOLTAGE_TYPE_VDDCI 4
#define VOLTAGE_TYPE_VDDGFX 5
#define VOLTAGE_TYPE_PCC 6
#define VOLTAGE_TYPE_MVPP 7
#define VOLTAGE_TYPE_LEDDPM 8
#define VOLTAGE_TYPE_PCC_MVDD 9
#define VOLTAGE_TYPE_PCIE_VDDC 10
#define VOLTAGE_TYPE_PCIE_VDDR 11
#define VOLTAGE_TYPE_GENERIC_I2C_1 0x11
#define VOLTAGE_TYPE_GENERIC_I2C_2 0x12
@ -2396,6 +2715,39 @@ typedef struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2
USHORT usTDP_Power; // TDP_Current in unit of 0.1W
}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2;
// New Added from CI Hawaii for GetVoltageInfoTable, input parameter structure
typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3
{
UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info
USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
ULONG ulSCLKFreq; // Input: when ucVoltageMode= ATOM_GET_VOLTAGE_EVV_VOLTAGE, DPM state SCLK frequency, Define in PPTable SCLK/Voltage dependence table
ULONG ulReserved[3];
}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3;
// New Added from CI Hawaii for EVV feature
typedef struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3
{
ULONG ulVoltageLevel; // real voltage level in unit of 0.01mv
ULONG ulReserved[4];
}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3;
/****************************************************************************/
// Structures used by GetSMUClockInfo
/****************************************************************************/
typedef struct _GET_SMU_CLOCK_INFO_INPUT_PARAMETER_V2_1
{
ULONG ulDfsPllOutputFreq:24;
ULONG ucDfsDivider:8;
}GET_SMU_CLOCK_INFO_INPUT_PARAMETER_V2_1;
typedef struct _GET_SMU_CLOCK_INFO_OUTPUT_PARAMETER_V2_1
{
ULONG ulDfsOutputFreq;
}GET_SMU_CLOCK_INFO_OUTPUT_PARAMETER_V2_1;
/****************************************************************************/
// Structures used by TVEncoderControlTable
/****************************************************************************/
@ -2429,13 +2781,13 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
USHORT PaletteData; // Only used by BIOS
USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info
USHORT DIGTransmitterInfo; // Internal used by VBIOS only version 3.1
USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
USHORT SMU_Info; // Shared by various SW components,latest version 1.1
USHORT SupportedDevicesInfo; // Will be obsolete from R600
USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600
USHORT VRAM_UsageByFirmware; // Shared by various SW components,latest version 1.3 will be used from R600
USHORT GPIO_Pin_LUT; // Shared by various SW components,latest version 1.1
USHORT VESA_ToInternalModeLUT; // Only used by Bios
USHORT ComponentVideoInfo; // Shared by various SW components,latest version 2.1 will be used from R600
USHORT GFX_Info; // Shared by various SW components,latest version 2.1 will be used from R600
USHORT PowerPlayInfo; // Shared by various SW components,latest version 2.1,new design from R600
USHORT GPUVirtualizationInfo; // Will be obsolete from R600
USHORT SaveRestoreInfo; // Only used by Bios
@ -2469,6 +2821,8 @@ typedef struct _ATOM_MASTER_DATA_TABLE
#define DAC_Info PaletteData
#define TMDS_Info DIGTransmitterInfo
#define CompassionateData GPUVirtualizationInfo
#define AnalogTV_Info SMU_Info
#define ComponentVideoInfo GFX_Info
/****************************************************************************/
// Structure used in MultimediaCapabilityInfoTable
@ -4278,10 +4632,15 @@ typedef struct _EXT_DISPLAY_PATH
#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
//usCaps
#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01
#define EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN 0x02
#define EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 0x04
#define EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT 0x08
#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x0001
#define EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN 0x0002
#define EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK 0x007C
#define EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 (0x01 << 2 ) //PI redriver chip
#define EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT (0x02 << 2 ) //TI retimer chip
#define EXT_DISPLAY_PATH_CAPS__HDMI20_PARADE_PS175 (0x03 << 2 ) //Parade DP->HDMI recoverter chip
typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
{
@ -4325,10 +4684,10 @@ typedef struct _ATOM_COMMON_RECORD_HEADER
#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
#define ATOM_ENCODER_CAP_RECORD_TYPE 20
#define ATOM_BRACKET_LAYOUT_RECORD_TYPE 21
#define ATOM_CONNECTOR_FORCED_TMDS_CAP_RECORD_TYPE 22
//Must be updated when new record type is added,equal to that record definition!
#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE
#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_FORCED_TMDS_CAP_RECORD_TYPE
typedef struct _ATOM_I2C_RECORD
{
@ -4458,10 +4817,12 @@ typedef struct _ATOM_ENCODER_DVO_CF_RECORD
UCHAR ucPadding[2];
}ATOM_ENCODER_DVO_CF_RECORD;
// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by HW encoder
// Bit maps for ATOM_ENCODER_CAP_RECORD.usEncoderCap
#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by HW encoder, it is retired in NI. the real meaning from SI is MST_EN
#define ATOM_ENCODER_CAP_RECORD_MST_EN 0x01 // from SI, this bit means DP MST is enable or not.
#define ATOM_ENCODER_CAP_RECORD_HBR2_EN 0x02 // DP1.2 HBR2 setting is qualified and HBR2 can be enabled
#define ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN 0x04 // HDMI2.0 6Gbps enable or not.
#define ATOM_ENCODER_CAP_RECORD_HBR3_EN 0x08 // DP1.3 HBR3 is supported by board.
typedef struct _ATOM_ENCODER_CAP_RECORD
{
@ -4482,6 +4843,31 @@ typedef struct _ATOM_ENCODER_CAP_RECORD
};
}ATOM_ENCODER_CAP_RECORD;
// Used after SI
typedef struct _ATOM_ENCODER_CAP_RECORD_V2
{
ATOM_COMMON_RECORD_HEADER sheader;
union {
USHORT usEncoderCap;
struct {
#if ATOM_BIG_ENDIAN
USHORT usReserved:12; // Bit4-15 may be defined for other capability in future
USHORT usHBR3En:1; // bit3 is for DP1.3 HBR3 enable
USHORT usHDMI6GEn:1; // Bit2 is for HDMI6Gbps enable, this bit is used starting from CZ( APU) Ellemere (dGPU)
USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
USHORT usMSTEn:1; // Bit0 is for DP1.2 MST enable
#else
USHORT usMSTEn:1; // Bit0 is for DP1.2 MST enable
USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
USHORT usHDMI6GEn:1; // Bit2 is for HDMI6Gbps enable, this bit is used starting from CZ( APU) Ellemere (dGPU)
USHORT usHBR3En:1; // bit3 is for DP1.3 HBR3 enable
USHORT usReserved:12; // Bit4-15 may be defined for other capability in future
#endif
};
};
}ATOM_ENCODER_CAP_RECORD_V2;
// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
@ -4554,6 +4940,16 @@ typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
USHORT usReserved;
}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
typedef struct _ATOM_CONNECTOR_FORCED_TMDS_CAP_RECORD
{
ATOM_COMMON_RECORD_HEADER sheader;
// override TMDS capability on this connector when it operate in TMDS mode. usMaxTmdsClkRate = max TMDS Clock in Mhz/2.5
UCHAR ucMaxTmdsClkRateIn2_5Mhz;
UCHAR ucReserved;
} ATOM_CONNECTOR_FORCED_TMDS_CAP_RECORD;
typedef struct _ATOM_CONNECTOR_LAYOUT_INFO
{
USHORT usConnectorObjectId;
@ -4784,11 +5180,38 @@ typedef struct _ATOM_SVID2_VOLTAGE_OBJECT_V3
ULONG ulReserved;
}ATOM_SVID2_VOLTAGE_OBJECT_V3;
typedef struct _ATOM_MERGED_VOLTAGE_OBJECT_V3
{
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_MERGED_POWER
UCHAR ucMergedVType; // VDDC/VDCCI/....
UCHAR ucReserved[3];
}ATOM_MERGED_VOLTAGE_OBJECT_V3;
typedef struct _ATOM_EVV_DPM_INFO
{
ULONG ulDPMSclk; // DPM state SCLK
USHORT usVAdjOffset; // Adjust Voltage offset in unit of mv
UCHAR ucDPMTblVIndex; // Voltage Index in SMC_DPM_Table structure VddcTable/VddGfxTable
UCHAR ucDPMState; // DPMState0~7
} ATOM_EVV_DPM_INFO;
// ucVoltageMode = VOLTAGE_OBJ_EVV
typedef struct _ATOM_EVV_VOLTAGE_OBJECT_V3
{
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_SVID2
ATOM_EVV_DPM_INFO asEvvDpmList[8];
}ATOM_EVV_VOLTAGE_OBJECT_V3;
typedef union _ATOM_VOLTAGE_OBJECT_V3{
ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
ATOM_SVID2_VOLTAGE_OBJECT_V3 asSVID2Obj;
ATOM_EVV_VOLTAGE_OBJECT_V3 asEvvObj;
}ATOM_VOLTAGE_OBJECT_V3;
typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1
@ -4963,7 +5386,11 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V3_3
ULONG ulLkgEncodeMax;
ULONG ulLkgEncodeMin;
ULONG ulEfuseLogisticAlpha;
union{
USHORT usPowerDpm0;
USHORT usParamNegFlag; //bit0 =1 :indicate ulRoBeta is Negative, bit1=1 indicate Kv_m max is postive
};
USHORT usPowerDpm1;
USHORT usPowerDpm2;
USHORT usPowerDpm3;
@ -5067,6 +5494,86 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V3_4
ULONG ulReserved[8]; // Reserved for future ASIC
}ATOM_ASIC_PROFILING_INFO_V3_4;
// for Polaris10/Polaris11 speed EVV algorithm
typedef struct _ATOM_ASIC_PROFILING_INFO_V3_5
{
ATOM_COMMON_TABLE_HEADER asHeader;
ULONG ulMaxVddc; //Maximum voltage for all parts, in unit of 0.01mv
ULONG ulMinVddc; //Minimum voltage for all parts, in unit of 0.01mv
USHORT usLkgEuseIndex; //Efuse Lkg_FT address ( BYTE address )
UCHAR ucLkgEfuseBitLSB; //Efuse Lkg_FT bit shift in 32bit DWORD
UCHAR ucLkgEfuseLength; //Efuse Lkg_FT length
ULONG ulLkgEncodeLn_MaxDivMin; //value of ln(Max_Lkg_Ft/Min_Lkg_Ft ) in unit of 0.00001 ( unit=100000 )
ULONG ulLkgEncodeMax; //Maximum Lkg_Ft measured value ( or efuse decode value ), in unit of 0.00001 ( unit=100000 )
ULONG ulLkgEncodeMin; //Minimum Lkg_Ft measured value ( or efuse decode value ), in unit of 0.00001 ( unit=100000 )
EFUSE_LINEAR_FUNC_PARAM sRoFuse;//Efuse RO info: DWORD address, bit shift, length, max/min measure value. in unit of 1.
ULONG ulEvvDefaultVddc; //def="EVV_DEFAULT_VDDC" descr="return default VDDC(v) when Efuse not cut" unit="100000"/>
ULONG ulEvvNoCalcVddc; //def="EVV_NOCALC_VDDC" descr="return VDDC(v) when Calculation is bad" unit="100000"/>
ULONG ulSpeed_Model; //def="EVV_SPEED_MODEL" descr="0 = Greek model, 1 = multivariate model" unit="1"/>
ULONG ulSM_A0; //def="EVV_SM_A0" descr="Leakage coeff(Multivariant Mode)." unit="100000"/>
ULONG ulSM_A1; //def="EVV_SM_A1" descr="Leakage/SCLK coeff(Multivariant Mode)." unit="1000000"/>
ULONG ulSM_A2; //def="EVV_SM_A2" descr="Alpha( Greek Mode ) or VDDC/SCLK coeff(Multivariant Mode)." unit="100000"/>
ULONG ulSM_A3; //def="EVV_SM_A3" descr="Beta( Greek Mode ) or SCLK coeff(Multivariant Mode)." unit="100000"/>
ULONG ulSM_A4; //def="EVV_SM_A4" descr="VDDC^2/SCLK coeff(Multivariant Mode)." unit="100000"/>
ULONG ulSM_A5; //def="EVV_SM_A5" descr="VDDC^2 coeff(Multivariant Mode)." unit="100000"/>
ULONG ulSM_A6; //def="EVV_SM_A6" descr="Gamma( Greek Mode ) or VDDC coeff(Multivariant Mode)." unit="100000"/>
ULONG ulSM_A7; //def="EVV_SM_A7" descr="Epsilon( Greek Mode ) or constant(Multivariant Mode)." unit="100000"/>
UCHAR ucSM_A0_sign; //def="EVV_SM_A0_SIGN" descr="=0 SM_A0 is postive. =1: SM_A0 is negative" unit="1"/>
UCHAR ucSM_A1_sign; //def="EVV_SM_A1_SIGN" descr="=0 SM_A1 is postive. =1: SM_A1 is negative" unit="1"/>
UCHAR ucSM_A2_sign; //def="EVV_SM_A2_SIGN" descr="=0 SM_A2 is postive. =1: SM_A2 is negative" unit="1"/>
UCHAR ucSM_A3_sign; //def="EVV_SM_A3_SIGN" descr="=0 SM_A3 is postive. =1: SM_A3 is negative" unit="1"/>
UCHAR ucSM_A4_sign; //def="EVV_SM_A4_SIGN" descr="=0 SM_A4 is postive. =1: SM_A4 is negative" unit="1"/>
UCHAR ucSM_A5_sign; //def="EVV_SM_A5_SIGN" descr="=0 SM_A5 is postive. =1: SM_A5 is negative" unit="1"/>
UCHAR ucSM_A6_sign; //def="EVV_SM_A6_SIGN" descr="=0 SM_A6 is postive. =1: SM_A6 is negative" unit="1"/>
UCHAR ucSM_A7_sign; //def="EVV_SM_A7_SIGN" descr="=0 SM_A7 is postive. =1: SM_A7 is negative" unit="1"/>
ULONG ulMargin_RO_a; //def="EVV_MARGIN_RO_A" descr="A Term to represent RO equation in Ax2+Bx+C, unit=1"
ULONG ulMargin_RO_b; //def="EVV_MARGIN_RO_B" descr="B Term to represent RO equation in Ax2+Bx+C, unit=1"
ULONG ulMargin_RO_c; //def="EVV_MARGIN_RO_C" descr="C Term to represent RO equation in Ax2+Bx+C, unit=1"
ULONG ulMargin_fixed; //def="EVV_MARGIN_FIXED" descr="Fixed MHz to add to SCLK margin, unit=1" unit="1"/>
ULONG ulMargin_Fmax_mean; //def="EVV_MARGIN_FMAX_MEAN" descr="Percentage to add for Fmas mean margin unit=10000" unit="10000"/>
ULONG ulMargin_plat_mean; //def="EVV_MARGIN_PLAT_MEAN" descr="Percentage to add for platform mean margin unit=10000" unit="10000"/>
ULONG ulMargin_Fmax_sigma; //def="EVV_MARGIN_FMAX_SIGMA" descr="Percentage to add for Fmax sigma margin unit=10000" unit="10000"/>
ULONG ulMargin_plat_sigma; //def="EVV_MARGIN_PLAT_SIGMA" descr="Percentage to add for platform sigma margin unit=10000" unit="10000"/>
ULONG ulMargin_DC_sigma; //def="EVV_MARGIN_DC_SIGMA" descr="Regulator DC tolerance margin (mV) unit=100" unit="100"/>
ULONG ulReserved[12];
}ATOM_ASIC_PROFILING_INFO_V3_5;
typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{
ULONG ulMaxSclkFreq;
UCHAR ucVco_setting; // 1: 3-6GHz, 3: 2-4GHz
UCHAR ucPostdiv; // divide by 2^n
USHORT ucFcw_pcc;
USHORT ucFcw_trans_upper;
USHORT ucRcw_trans_lower;
}ATOM_SCLK_FCW_RANGE_ENTRY_V1;
// SMU_InfoTable for Polaris10/Polaris11
typedef struct _ATOM_SMU_INFO_V2_1
{
ATOM_COMMON_TABLE_HEADER asHeader;
UCHAR ucSclkEntryNum; // for potential future extend, indicate the number of ATOM_SCLK_FCW_RANGE_ENTRY_V1
UCHAR ucReserved[3];
ATOM_SCLK_FCW_RANGE_ENTRY_V1 asSclkFcwRangeEntry[8];
}ATOM_SMU_INFO_V2_1;
// GFX_InfoTable for Polaris10/Polaris11
typedef struct _ATOM_GFX_INFO_V2_1
{
ATOM_COMMON_TABLE_HEADER asHeader;
UCHAR GfxIpMinVer;
UCHAR GfxIpMajVer;
UCHAR max_shader_engines;
UCHAR max_tile_pipes;
UCHAR max_cu_per_sh;
UCHAR max_sh_per_se;
UCHAR max_backends_per_se;
UCHAR max_texture_channel_caches;
}ATOM_GFX_INFO_V2_1;
typedef struct _ATOM_POWER_SOURCE_OBJECT
{
UCHAR ucPwrSrcId; // Power source
@ -5765,14 +6272,6 @@ sExtDispConnInfo: Display connector information table provided t
**********************************************************************************************************************/
// this Table is used for Kaveri/Kabini APU
typedef struct _ATOM_FUSION_SYSTEM_INFO_V2
{
ATOM_INTEGRATED_SYSTEM_INFO_V1_8 sIntegratedSysInfo; // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition
ULONG ulPowerplayTable[128]; // Update comments here to link new powerplay table definition structure
}ATOM_FUSION_SYSTEM_INFO_V2;
typedef struct _ATOM_I2C_REG_INFO
{
UCHAR ucI2cRegIndex;
@ -5859,7 +6358,50 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9
#define EDP_VS_VARIABLE_PREM_MODE 5
// this IntegrateSystemInfoTable is used for Carrizo
// ulGPUCapInfo
#define SYS_INFO_V1_9_GPUCAPSINFO_DISABLE_AUX_MODE_DETECT 0x08
#define SYS_INFO_V1_9_GPUCAPSINFO_ENABEL_DFS_BYPASS 0x10
//ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML
#define SYS_INFO_V1_9_GPUCAPSINFO_GNB_FAST_RESUME_CAPABLE 0x00010000
//ulGPUCapInfo[18]=1 indicate the IOMMU is not available
#define SYS_INFO_V1_9_GPUCAPINFO_IOMMU_DISABLE 0x00040000
//ulGPUCapInfo[19]=1 indicate the MARC Aperture is opened.
#define SYS_INFO_V1_9_GPUCAPINFO_MARC_APERTURE_ENABLE 0x00080000
typedef struct _DPHY_TIMING_PARA
{
UCHAR ucProfileID; // SENSOR_PROFILES
ULONG ucPara;
} DPHY_TIMING_PARA;
typedef struct _DPHY_ELEC_PARA
{
USHORT usPara[3];
} DPHY_ELEC_PARA;
typedef struct _CAMERA_MODULE_INFO
{
UCHAR ucID; // 0: Rear, 1: Front right of user, 2: Front left of user
UCHAR strModuleName[8];
DPHY_TIMING_PARA asTimingPara[6]; // Exact number is under estimation and confirmation from sensor vendor
} CAMERA_MODULE_INFO;
typedef struct _FLASHLIGHT_INFO
{
UCHAR ucID; // 0: Rear, 1: Front
UCHAR strName[8];
} FLASHLIGHT_INFO;
typedef struct _CAMERA_DATA
{
ULONG ulVersionCode;
CAMERA_MODULE_INFO asCameraInfo[3]; // Assuming 3 camera sensors max
FLASHLIGHT_INFO asFlashInfo; // Assuming 1 flashlight max
DPHY_ELEC_PARA asDphyElecPara;
ULONG ulCrcVal; // CRC
}CAMERA_DATA;
typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_10
{
ATOM_COMMON_TABLE_HEADER sHeader;
@ -5883,7 +6425,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_10
USHORT usPanelRefreshRateRange;
UCHAR ucMemoryType;
UCHAR ucUMAChannelNumber;
UCHAR strVBIOSMsg[40];
ULONG ulMsgReserved[10];
ATOM_TDP_CONFIG asTdpConfig;
ULONG ulReserved[7];
ATOM_CLK_VOLT_CAPABILITY_V2 sDispClkVoltageMapping[8];
@ -5925,8 +6467,27 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_10
UCHAR ucEDPv1_4VSMode;
UCHAR ucReserved2;
ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
CAMERA_DATA asCameraInfo;
ULONG ulReserved8[29];
}ATOM_INTEGRATED_SYSTEM_INFO_V1_10;
// this Table is used for Kaveri/Kabini APU
typedef struct _ATOM_FUSION_SYSTEM_INFO_V2
{
ATOM_INTEGRATED_SYSTEM_INFO_V1_8 sIntegratedSysInfo; // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition
ULONG ulPowerplayTable[128]; // Update comments here to link new powerplay table definition structure
}ATOM_FUSION_SYSTEM_INFO_V2;
typedef struct _ATOM_FUSION_SYSTEM_INFO_V3
{
ATOM_INTEGRATED_SYSTEM_INFO_V1_10 sIntegratedSysInfo; // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition
ULONG ulPowerplayTable[192]; // Reserve 768 bytes space for PowerPlayInfoTable
}ATOM_FUSION_SYSTEM_INFO_V3;
#define FUSION_V3_OFFSET_FROM_TOP_OF_FB 0x800
/**************************************************************************/
// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
//Memory SS Info Table
@ -6878,15 +7439,18 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE_V2_1
#define _32Mx16 0x32
#define _32Mx32 0x33
#define _32Mx128 0x35
#define _64Mx32 0x43
#define _64Mx8 0x41
#define _64Mx16 0x42
#define _64Mx32 0x43
#define _64Mx128 0x45
#define _128Mx8 0x51
#define _128Mx16 0x52
#define _128Mx32 0x53
#define _256Mx8 0x61
#define _256Mx16 0x62
#define _256Mx32 0x63
#define _512Mx8 0x71
#define _512Mx16 0x72
#define SAMSUNG 0x1
@ -7407,6 +7971,17 @@ typedef struct _ATOM_MEMORY_TRAINING_INFO
}ATOM_MEMORY_TRAINING_INFO;
typedef struct _ATOM_MEMORY_TRAINING_INFO_V3_1
{
ATOM_COMMON_TABLE_HEADER sHeader;
ULONG ulMCUcodeVersion;
USHORT usMCIOInitLen; //len of ATOM_REG_INIT_SETTING array
USHORT usMCUcodeLen; //len of ATOM_MC_UCODE_DATA array
USHORT usMCIORegInitOffset; //point of offset of ATOM_REG_INIT_SETTING array
USHORT usMCUcodeOffset; //point of offset of MC uCode ULONG array.
}ATOM_MEMORY_TRAINING_INFO_V3_1;
typedef struct SW_I2C_CNTL_DATA_PARAMETERS
{
UCHAR ucControl;

View File

@ -26,6 +26,8 @@
#include "amd_shared.h"
struct cgs_device;
/**
* enum cgs_gpu_mem_type - GPU memory types
*/
@ -92,6 +94,7 @@ enum cgs_voltage_planes {
*/
enum cgs_ucode_id {
CGS_UCODE_ID_SMU = 0,
CGS_UCODE_ID_SMU_SK,
CGS_UCODE_ID_SDMA0,
CGS_UCODE_ID_SDMA1,
CGS_UCODE_ID_CP_CE,
@ -111,6 +114,7 @@ enum cgs_system_info_id {
CGS_SYSTEM_INFO_PCIE_MLW,
CGS_SYSTEM_INFO_CG_FLAGS,
CGS_SYSTEM_INFO_PG_FLAGS,
CGS_SYSTEM_INFO_GFX_CU_INFO,
CGS_SYSTEM_INFO_ID_MAXIMUM,
};
@ -223,7 +227,7 @@ struct cgs_acpi_method_info {
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_gpu_mem_info_t)(void *cgs_device, enum cgs_gpu_mem_type type,
typedef int (*cgs_gpu_mem_info_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
uint64_t *mc_start, uint64_t *mc_size,
uint64_t *mem_size);
@ -239,7 +243,7 @@ typedef int (*cgs_gpu_mem_info_t)(void *cgs_device, enum cgs_gpu_mem_type type,
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_gmap_kmem_t)(void *cgs_device, void *kmem, uint64_t size,
typedef int (*cgs_gmap_kmem_t)(struct cgs_device *cgs_device, void *kmem, uint64_t size,
uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *kmem_handle, uint64_t *mcaddr);
@ -250,7 +254,7 @@ typedef int (*cgs_gmap_kmem_t)(void *cgs_device, void *kmem, uint64_t size,
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_gunmap_kmem_t)(void *cgs_device, cgs_handle_t kmem_handle);
typedef int (*cgs_gunmap_kmem_t)(struct cgs_device *cgs_device, cgs_handle_t kmem_handle);
/**
* cgs_alloc_gpu_mem() - Allocate GPU memory
@ -279,7 +283,7 @@ typedef int (*cgs_gunmap_kmem_t)(void *cgs_device, cgs_handle_t kmem_handle);
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_alloc_gpu_mem_t)(void *cgs_device, enum cgs_gpu_mem_type type,
typedef int (*cgs_alloc_gpu_mem_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *handle);
@ -291,7 +295,7 @@ typedef int (*cgs_alloc_gpu_mem_t)(void *cgs_device, enum cgs_gpu_mem_type type,
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_free_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
typedef int (*cgs_free_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
/**
* cgs_gmap_gpu_mem() - GPU-map GPU memory
@ -303,7 +307,7 @@ typedef int (*cgs_free_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_gmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
typedef int (*cgs_gmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle,
uint64_t *mcaddr);
/**
@ -315,7 +319,7 @@ typedef int (*cgs_gmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_gunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
typedef int (*cgs_gunmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
/**
* cgs_kmap_gpu_mem() - Kernel-map GPU memory
@ -326,7 +330,7 @@ typedef int (*cgs_gunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_kmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
typedef int (*cgs_kmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle,
void **map);
/**
@ -336,7 +340,7 @@ typedef int (*cgs_kmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_kunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
typedef int (*cgs_kunmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
/**
* cgs_read_register() - Read an MMIO register
@ -345,7 +349,7 @@ typedef int (*cgs_kunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
*
* Return: register value
*/
typedef uint32_t (*cgs_read_register_t)(void *cgs_device, unsigned offset);
typedef uint32_t (*cgs_read_register_t)(struct cgs_device *cgs_device, unsigned offset);
/**
* cgs_write_register() - Write an MMIO register
@ -353,7 +357,7 @@ typedef uint32_t (*cgs_read_register_t)(void *cgs_device, unsigned offset);
* @offset: register offset
* @value: register value
*/
typedef void (*cgs_write_register_t)(void *cgs_device, unsigned offset,
typedef void (*cgs_write_register_t)(struct cgs_device *cgs_device, unsigned offset,
uint32_t value);
/**
@ -363,7 +367,7 @@ typedef void (*cgs_write_register_t)(void *cgs_device, unsigned offset,
*
* Return: register value
*/
typedef uint32_t (*cgs_read_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
typedef uint32_t (*cgs_read_ind_register_t)(struct cgs_device *cgs_device, enum cgs_ind_reg space,
unsigned index);
/**
@ -372,7 +376,7 @@ typedef uint32_t (*cgs_read_ind_register_t)(void *cgs_device, enum cgs_ind_reg s
* @offset: register offset
* @value: register value
*/
typedef void (*cgs_write_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
typedef void (*cgs_write_ind_register_t)(struct cgs_device *cgs_device, enum cgs_ind_reg space,
unsigned index, uint32_t value);
/**
@ -382,7 +386,7 @@ typedef void (*cgs_write_ind_register_t)(void *cgs_device, enum cgs_ind_reg spac
*
* Return: Value read
*/
typedef uint8_t (*cgs_read_pci_config_byte_t)(void *cgs_device, unsigned addr);
typedef uint8_t (*cgs_read_pci_config_byte_t)(struct cgs_device *cgs_device, unsigned addr);
/**
* cgs_read_pci_config_word() - Read word from PCI configuration space
@ -391,7 +395,7 @@ typedef uint8_t (*cgs_read_pci_config_byte_t)(void *cgs_device, unsigned addr);
*
* Return: Value read
*/
typedef uint16_t (*cgs_read_pci_config_word_t)(void *cgs_device, unsigned addr);
typedef uint16_t (*cgs_read_pci_config_word_t)(struct cgs_device *cgs_device, unsigned addr);
/**
* cgs_read_pci_config_dword() - Read dword from PCI configuration space
@ -400,7 +404,7 @@ typedef uint16_t (*cgs_read_pci_config_word_t)(void *cgs_device, unsigned addr);
*
* Return: Value read
*/
typedef uint32_t (*cgs_read_pci_config_dword_t)(void *cgs_device,
typedef uint32_t (*cgs_read_pci_config_dword_t)(struct cgs_device *cgs_device,
unsigned addr);
/**
@ -409,7 +413,7 @@ typedef uint32_t (*cgs_read_pci_config_dword_t)(void *cgs_device,
* @addr: address
* @value: value to write
*/
typedef void (*cgs_write_pci_config_byte_t)(void *cgs_device, unsigned addr,
typedef void (*cgs_write_pci_config_byte_t)(struct cgs_device *cgs_device, unsigned addr,
uint8_t value);
/**
@ -418,7 +422,7 @@ typedef void (*cgs_write_pci_config_byte_t)(void *cgs_device, unsigned addr,
* @addr: address, must be word-aligned
* @value: value to write
*/
typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr,
typedef void (*cgs_write_pci_config_word_t)(struct cgs_device *cgs_device, unsigned addr,
uint16_t value);
/**
@ -427,7 +431,7 @@ typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr,
* @addr: address, must be dword-aligned
* @value: value to write
*/
typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr,
typedef void (*cgs_write_pci_config_dword_t)(struct cgs_device *cgs_device, unsigned addr,
uint32_t value);
@ -441,7 +445,7 @@ typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr,
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_get_pci_resource_t)(void *cgs_device,
typedef int (*cgs_get_pci_resource_t)(struct cgs_device *cgs_device,
enum cgs_resource_type resource_type,
uint64_t size,
uint64_t offset,
@ -458,7 +462,7 @@ typedef int (*cgs_get_pci_resource_t)(void *cgs_device,
* Return: Pointer to start of the table, or NULL on failure
*/
typedef const void *(*cgs_atom_get_data_table_t)(
void *cgs_device, unsigned table,
struct cgs_device *cgs_device, unsigned table,
uint16_t *size, uint8_t *frev, uint8_t *crev);
/**
@ -470,7 +474,7 @@ typedef const void *(*cgs_atom_get_data_table_t)(
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_atom_get_cmd_table_revs_t)(void *cgs_device, unsigned table,
typedef int (*cgs_atom_get_cmd_table_revs_t)(struct cgs_device *cgs_device, unsigned table,
uint8_t *frev, uint8_t *crev);
/**
@ -481,7 +485,7 @@ typedef int (*cgs_atom_get_cmd_table_revs_t)(void *cgs_device, unsigned table,
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_atom_exec_cmd_table_t)(void *cgs_device,
typedef int (*cgs_atom_exec_cmd_table_t)(struct cgs_device *cgs_device,
unsigned table, void *args);
/**
@ -491,7 +495,7 @@ typedef int (*cgs_atom_exec_cmd_table_t)(void *cgs_device,
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_create_pm_request_t)(void *cgs_device, cgs_handle_t *request);
typedef int (*cgs_create_pm_request_t)(struct cgs_device *cgs_device, cgs_handle_t *request);
/**
* cgs_destroy_pm_request() - Destroy a power management request
@ -500,7 +504,7 @@ typedef int (*cgs_create_pm_request_t)(void *cgs_device, cgs_handle_t *request);
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_destroy_pm_request_t)(void *cgs_device, cgs_handle_t request);
typedef int (*cgs_destroy_pm_request_t)(struct cgs_device *cgs_device, cgs_handle_t request);
/**
* cgs_set_pm_request() - Activate or deactiveate a PM request
@ -516,7 +520,7 @@ typedef int (*cgs_destroy_pm_request_t)(void *cgs_device, cgs_handle_t request);
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_set_pm_request_t)(void *cgs_device, cgs_handle_t request,
typedef int (*cgs_set_pm_request_t)(struct cgs_device *cgs_device, cgs_handle_t request,
int active);
/**
@ -528,7 +532,7 @@ typedef int (*cgs_set_pm_request_t)(void *cgs_device, cgs_handle_t request,
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_pm_request_clock_t)(void *cgs_device, cgs_handle_t request,
typedef int (*cgs_pm_request_clock_t)(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_clock clock, unsigned freq);
/**
@ -540,7 +544,7 @@ typedef int (*cgs_pm_request_clock_t)(void *cgs_device, cgs_handle_t request,
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_pm_request_engine_t)(void *cgs_device, cgs_handle_t request,
typedef int (*cgs_pm_request_engine_t)(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_engine engine, int powered);
/**
@ -551,7 +555,7 @@ typedef int (*cgs_pm_request_engine_t)(void *cgs_device, cgs_handle_t request,
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_pm_query_clock_limits_t)(void *cgs_device,
typedef int (*cgs_pm_query_clock_limits_t)(struct cgs_device *cgs_device,
enum cgs_clock clock,
struct cgs_clock_limits *limits);
@ -563,7 +567,7 @@ typedef int (*cgs_pm_query_clock_limits_t)(void *cgs_device,
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_set_camera_voltages_t)(void *cgs_device, uint32_t mask,
typedef int (*cgs_set_camera_voltages_t)(struct cgs_device *cgs_device, uint32_t mask,
const uint32_t *voltages);
/**
* cgs_get_firmware_info - Get the firmware information from core driver
@ -573,25 +577,25 @@ typedef int (*cgs_set_camera_voltages_t)(void *cgs_device, uint32_t mask,
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_get_firmware_info)(void *cgs_device,
typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info);
typedef int(*cgs_set_powergating_state)(void *cgs_device,
typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_powergating_state state);
typedef int(*cgs_set_clockgating_state)(void *cgs_device,
typedef int(*cgs_set_clockgating_state)(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_clockgating_state state);
typedef int(*cgs_get_active_displays_info)(
void *cgs_device,
struct cgs_device *cgs_device,
struct cgs_display_info *info);
typedef int (*cgs_notify_dpm_enabled)(void *cgs_device, bool enabled);
typedef int (*cgs_notify_dpm_enabled)(struct cgs_device *cgs_device, bool enabled);
typedef int (*cgs_call_acpi_method)(void *cgs_device,
typedef int (*cgs_call_acpi_method)(struct cgs_device *cgs_device,
uint32_t acpi_method,
uint32_t acpi_function,
void *pinput, void *poutput,
@ -599,7 +603,7 @@ typedef int (*cgs_call_acpi_method)(void *cgs_device,
uint32_t input_size,
uint32_t output_size);
typedef int (*cgs_query_system_info)(void *cgs_device,
typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info);
struct cgs_ops {

View File

@ -66,7 +66,7 @@ typedef int (*cgs_irq_handler_func_t)(void *private_data,
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned src_id,
typedef int (*cgs_add_irq_source_t)(struct cgs_device *cgs_device, unsigned src_id,
unsigned num_types,
cgs_irq_source_set_func_t set,
cgs_irq_handler_func_t handler,
@ -83,7 +83,7 @@ typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned src_id,
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
typedef int (*cgs_irq_get_t)(struct cgs_device *cgs_device, unsigned src_id, unsigned type);
/**
* cgs_irq_put() - Indicate IRQ source is no longer needed
@ -98,7 +98,7 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
*
* Return: 0 on success, -errno otherwise
*/
typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
typedef int (*cgs_irq_put_t)(struct cgs_device *cgs_device, unsigned src_id, unsigned type);
struct cgs_os_ops {
/* IRQ handling */

View File

@ -37,6 +37,12 @@
return -EINVAL; \
} while (0)
#define PP_CHECK_HW(hwmgr) \
do { \
if ((hwmgr) == NULL || (hwmgr)->hwmgr_func == NULL) \
return -EINVAL; \
} while (0)
static int pp_early_init(void *handle)
{
return 0;
@ -54,8 +60,9 @@ static int pp_sw_init(void *handle)
pp_handle = (struct pp_instance *)handle;
hwmgr = pp_handle->hwmgr;
if (hwmgr == NULL || hwmgr->pptable_func == NULL ||
hwmgr->hwmgr_func == NULL ||
PP_CHECK_HW(hwmgr);
if (hwmgr->pptable_func == NULL ||
hwmgr->pptable_func->pptable_init == NULL ||
hwmgr->hwmgr_func->backend_init == NULL)
return -EINVAL;
@ -66,9 +73,9 @@ static int pp_sw_init(void *handle)
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
if (ret)
printk("amdgpu: powerplay initialization failed\n");
printk(KERN_ERR "amdgpu: powerplay initialization failed\n");
else
printk("amdgpu: powerplay initialized\n");
printk(KERN_INFO "amdgpu: powerplay initialized\n");
return ret;
}
@ -85,8 +92,9 @@ static int pp_sw_fini(void *handle)
pp_handle = (struct pp_instance *)handle;
hwmgr = pp_handle->hwmgr;
if (hwmgr != NULL || hwmgr->hwmgr_func != NULL ||
hwmgr->hwmgr_func->backend_fini != NULL)
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->backend_fini != NULL)
ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
return ret;
@ -172,21 +180,117 @@ static int pp_sw_reset(void *handle)
return 0;
}
static void pp_print_status(void *handle)
{
}
static int pp_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct pp_hwmgr *hwmgr;
uint32_t msg_id, pp_state;
if (handle == NULL)
return -EINVAL;
hwmgr = ((struct pp_instance *)handle)->hwmgr;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
if (state == AMD_CG_STATE_UNGATE)
pp_state = 0;
else
pp_state = PP_STATE_CG | PP_STATE_LS;
/* Enable/disable GFX blocks clock gating through SMU */
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_CG,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_3D,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_RLC,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_CP,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_MG,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
/* Enable/disable System blocks clock gating through SMU */
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_MC,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_ROM,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_DRM,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_HDP,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_SDMA,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
return 0;
}
static int pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct pp_hwmgr *hwmgr;
if (handle == NULL)
return -EINVAL;
hwmgr = ((struct pp_instance *)handle)->hwmgr;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
/* Enable/disable GFX per cu powergating through SMU */
return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
state == AMD_PG_STATE_GATE ? true : false);
}
static int pp_suspend(void *handle)
@ -247,7 +351,6 @@ const struct amd_ip_funcs pp_ip_funcs = {
.is_idle = pp_is_idle,
.wait_for_idle = pp_wait_for_idle,
.soft_reset = pp_sw_reset,
.print_status = pp_print_status,
.set_clockgating_state = pp_set_clockgating_state,
.set_powergating_state = pp_set_powergating_state,
};
@ -275,9 +378,12 @@ static int pp_dpm_force_performance_level(void *handle,
hwmgr = pp_handle->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
hwmgr->hwmgr_func->force_dpm_level == NULL)
return -EINVAL;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->force_dpm_level == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
@ -309,9 +415,12 @@ static int pp_dpm_get_sclk(void *handle, bool low)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
hwmgr->hwmgr_func->get_sclk == NULL)
return -EINVAL;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->get_sclk == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
}
@ -325,9 +434,12 @@ static int pp_dpm_get_mclk(void *handle, bool low)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
hwmgr->hwmgr_func->get_mclk == NULL)
return -EINVAL;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->get_mclk == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
}
@ -341,9 +453,12 @@ static int pp_dpm_powergate_vce(void *handle, bool gate)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
hwmgr->hwmgr_func->powergate_vce == NULL)
return -EINVAL;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
}
@ -357,9 +472,12 @@ static int pp_dpm_powergate_uvd(void *handle, bool gate)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
hwmgr->hwmgr_func->powergate_uvd == NULL)
return -EINVAL;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
}
@ -455,10 +573,14 @@ pp_debugfs_print_current_performance_level(void *handle,
hwmgr = ((struct pp_instance *)handle)->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
hwmgr->hwmgr_func->print_current_perforce_level == NULL)
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL)
return;
if (hwmgr->hwmgr_func->print_current_perforce_level == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return;
}
hwmgr->hwmgr_func->print_current_perforce_level(hwmgr, m);
}
@ -471,9 +593,12 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
hwmgr->hwmgr_func->set_fan_control_mode == NULL)
return -EINVAL;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
}
@ -487,9 +612,12 @@ static int pp_dpm_get_fan_control_mode(void *handle)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
hwmgr->hwmgr_func->get_fan_control_mode == NULL)
return -EINVAL;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
}
@ -503,9 +631,12 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
hwmgr->hwmgr_func->set_fan_speed_percent == NULL)
return -EINVAL;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
}
@ -519,9 +650,12 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
hwmgr->hwmgr_func->get_fan_speed_percent == NULL)
return -EINVAL;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
}
@ -535,9 +669,12 @@ static int pp_dpm_get_temperature(void *handle)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
hwmgr->hwmgr_func->get_temperature == NULL)
return -EINVAL;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->get_temperature == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->get_temperature(hwmgr);
}
@ -591,9 +728,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
hwmgr->hwmgr_func->get_pp_table == NULL)
return -EINVAL;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->get_pp_table == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->get_pp_table(hwmgr, table);
}
@ -607,15 +747,18 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
hwmgr->hwmgr_func->set_pp_table == NULL)
return -EINVAL;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->set_pp_table == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size);
}
static int pp_dpm_force_clock_level(void *handle,
enum pp_clock_type type, int level)
enum pp_clock_type type, uint32_t mask)
{
struct pp_hwmgr *hwmgr;
@ -624,11 +767,14 @@ static int pp_dpm_force_clock_level(void *handle,
hwmgr = ((struct pp_instance *)handle)->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
hwmgr->hwmgr_func->force_clock_level == NULL)
return -EINVAL;
PP_CHECK_HW(hwmgr);
return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, level);
if (hwmgr->hwmgr_func->force_clock_level == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
}
static int pp_dpm_print_clock_levels(void *handle,
@ -641,10 +787,12 @@ static int pp_dpm_print_clock_levels(void *handle,
hwmgr = ((struct pp_instance *)handle)->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
hwmgr->hwmgr_func->print_clock_levels == NULL)
return -EINVAL;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
}

View File

@ -24,7 +24,7 @@
#include "eventactionchains.h"
#include "eventsubchains.h"
static const pem_event_action *initialize_event[] = {
static const pem_event_action * const initialize_event[] = {
block_adjust_power_state_tasks,
power_budget_tasks,
system_config_tasks,
@ -45,7 +45,7 @@ const struct action_chain initialize_action_chain = {
initialize_event
};
static const pem_event_action *uninitialize_event[] = {
static const pem_event_action * const uninitialize_event[] = {
ungate_all_display_phys_tasks,
uninitialize_display_phy_access_tasks,
disable_gfx_voltage_island_power_gating_tasks,
@ -64,7 +64,7 @@ const struct action_chain uninitialize_action_chain = {
uninitialize_event
};
static const pem_event_action *power_source_change_event_pp_enabled[] = {
static const pem_event_action * const power_source_change_event_pp_enabled[] = {
set_power_source_tasks,
set_power_saving_state_tasks,
adjust_power_state_tasks,
@ -79,7 +79,7 @@ const struct action_chain power_source_change_action_chain_pp_enabled = {
power_source_change_event_pp_enabled
};
static const pem_event_action *power_source_change_event_pp_disabled[] = {
static const pem_event_action * const power_source_change_event_pp_disabled[] = {
set_power_source_tasks,
set_nbmcu_state_tasks,
NULL
@ -90,7 +90,7 @@ const struct action_chain power_source_changes_action_chain_pp_disabled = {
power_source_change_event_pp_disabled
};
static const pem_event_action *power_source_change_event_hardware_dc[] = {
static const pem_event_action * const power_source_change_event_hardware_dc[] = {
set_power_source_tasks,
set_power_saving_state_tasks,
adjust_power_state_tasks,
@ -106,7 +106,7 @@ const struct action_chain power_source_change_action_chain_hardware_dc = {
power_source_change_event_hardware_dc
};
static const pem_event_action *suspend_event[] = {
static const pem_event_action * const suspend_event[] = {
reset_display_phy_access_tasks,
unregister_interrupt_tasks,
disable_gfx_voltage_island_power_gating_tasks,
@ -130,7 +130,7 @@ const struct action_chain suspend_action_chain = {
suspend_event
};
static const pem_event_action *resume_event[] = {
static const pem_event_action * const resume_event[] = {
unblock_hw_access_tasks,
resume_connected_standby_tasks,
notify_smu_resume_tasks,
@ -164,7 +164,7 @@ const struct action_chain resume_action_chain = {
resume_event
};
static const pem_event_action *complete_init_event[] = {
static const pem_event_action * const complete_init_event[] = {
unblock_adjust_power_state_tasks,
adjust_power_state_tasks,
enable_gfx_clock_gating_tasks,
@ -178,7 +178,7 @@ const struct action_chain complete_init_action_chain = {
complete_init_event
};
static const pem_event_action *enable_gfx_clock_gating_event[] = {
static const pem_event_action * const enable_gfx_clock_gating_event[] = {
enable_gfx_clock_gating_tasks,
NULL
};
@ -188,7 +188,7 @@ const struct action_chain enable_gfx_clock_gating_action_chain = {
enable_gfx_clock_gating_event
};
static const pem_event_action *disable_gfx_clock_gating_event[] = {
static const pem_event_action * const disable_gfx_clock_gating_event[] = {
disable_gfx_clock_gating_tasks,
NULL
};
@ -198,7 +198,7 @@ const struct action_chain disable_gfx_clock_gating_action_chain = {
disable_gfx_clock_gating_event
};
static const pem_event_action *enable_cgpg_event[] = {
static const pem_event_action * const enable_cgpg_event[] = {
enable_cgpg_tasks,
NULL
};
@ -208,7 +208,7 @@ const struct action_chain enable_cgpg_action_chain = {
enable_cgpg_event
};
static const pem_event_action *disable_cgpg_event[] = {
static const pem_event_action * const disable_cgpg_event[] = {
disable_cgpg_tasks,
NULL
};
@ -221,7 +221,7 @@ const struct action_chain disable_cgpg_action_chain = {
/* Enable user _2d performance and activate */
static const pem_event_action *enable_user_state_event[] = {
static const pem_event_action * const enable_user_state_event[] = {
create_new_user_performance_state_tasks,
adjust_power_state_tasks,
NULL
@ -232,7 +232,7 @@ const struct action_chain enable_user_state_action_chain = {
enable_user_state_event
};
static const pem_event_action *enable_user_2d_performance_event[] = {
static const pem_event_action * const enable_user_2d_performance_event[] = {
enable_user_2d_performance_tasks,
add_user_2d_performance_state_tasks,
set_performance_state_tasks,
@ -247,7 +247,7 @@ const struct action_chain enable_user_2d_performance_action_chain = {
};
static const pem_event_action *disable_user_2d_performance_event[] = {
static const pem_event_action * const disable_user_2d_performance_event[] = {
disable_user_2d_performance_tasks,
delete_user_2d_performance_state_tasks,
NULL
@ -259,7 +259,7 @@ const struct action_chain disable_user_2d_performance_action_chain = {
};
static const pem_event_action *display_config_change_event[] = {
static const pem_event_action * const display_config_change_event[] = {
/* countDisplayConfigurationChangeEventTasks, */
unblock_adjust_power_state_tasks,
set_cpu_power_state,
@ -278,7 +278,7 @@ const struct action_chain display_config_change_action_chain = {
display_config_change_event
};
static const pem_event_action *readjust_power_state_event[] = {
static const pem_event_action * const readjust_power_state_event[] = {
adjust_power_state_tasks,
NULL
};

View File

@ -62,7 +62,7 @@ int pem_init_event_action_chains(struct pp_eventmgr *eventmgr)
int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data)
{
const pem_event_action **paction_chain;
const pem_event_action * const *paction_chain;
const pem_event_action *psub_chain;
int tmp_result = 0;
int result = 0;

View File

@ -8,7 +8,9 @@ HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
tonga_processpptables.o ppatomctrl.o \
tonga_hwmgr.o pppcielanes.o tonga_thermal.o\
fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \
fiji_clockpowergating.o fiji_thermal.o
fiji_clockpowergating.o fiji_thermal.o \
polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \
polaris10_clockpowergating.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))

View File

@ -237,7 +237,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
}
static struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
static const struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
/*we don't need an exit table here, because there is only D3 cold on Kv*/
{ phm_cf_want_uvd_power_gating, cz_tf_uvd_power_gating_initialize },
{ phm_cf_want_vce_power_gating, cz_tf_vce_power_gating_initialize },
@ -245,7 +245,7 @@ static struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
{ NULL, NULL }
};
struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = {
const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = {
0,
PHM_MasterTableFlag_None,
cz_enable_clock_power_gatings_list

View File

@ -28,8 +28,7 @@
#include "pp_asicblocks.h"
extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
extern struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
extern struct phm_master_table_header cz_phm_disable_clock_power_gatings_master;
extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
extern int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
extern int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);

View File

@ -915,7 +915,7 @@ static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr,
return 0;
}
static struct phm_master_table_item cz_set_power_state_list[] = {
static const struct phm_master_table_item cz_set_power_state_list[] = {
{NULL, cz_tf_update_sclk_limit},
{NULL, cz_tf_set_deep_sleep_sclk_threshold},
{NULL, cz_tf_set_watermark_threshold},
@ -925,13 +925,13 @@ static struct phm_master_table_item cz_set_power_state_list[] = {
{NULL, NULL}
};
static struct phm_master_table_header cz_set_power_state_master = {
static const struct phm_master_table_header cz_set_power_state_master = {
0,
PHM_MasterTableFlag_None,
cz_set_power_state_list
};
static struct phm_master_table_item cz_setup_asic_list[] = {
static const struct phm_master_table_item cz_setup_asic_list[] = {
{NULL, cz_tf_reset_active_process_mask},
{NULL, cz_tf_upload_pptable_to_smu},
{NULL, cz_tf_init_sclk_limit},
@ -943,7 +943,7 @@ static struct phm_master_table_item cz_setup_asic_list[] = {
{NULL, NULL}
};
static struct phm_master_table_header cz_setup_asic_master = {
static const struct phm_master_table_header cz_setup_asic_master = {
0,
PHM_MasterTableFlag_None,
cz_setup_asic_list
@ -984,14 +984,14 @@ static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
return 0;
}
static struct phm_master_table_item cz_power_down_asic_list[] = {
static const struct phm_master_table_item cz_power_down_asic_list[] = {
{NULL, cz_tf_power_up_display_clock_sys_pll},
{NULL, cz_tf_clear_nb_dpm_flag},
{NULL, cz_tf_reset_cc6_data},
{NULL, NULL}
};
static struct phm_master_table_header cz_power_down_asic_master = {
static const struct phm_master_table_header cz_power_down_asic_master = {
0,
PHM_MasterTableFlag_None,
cz_power_down_asic_list
@ -1095,19 +1095,19 @@ static int cz_tf_check_for_dpm_enabled(struct pp_hwmgr *hwmgr,
return 0;
}
static struct phm_master_table_item cz_disable_dpm_list[] = {
static const struct phm_master_table_item cz_disable_dpm_list[] = {
{ NULL, cz_tf_check_for_dpm_enabled},
{NULL, NULL},
};
static struct phm_master_table_header cz_disable_dpm_master = {
static const struct phm_master_table_header cz_disable_dpm_master = {
0,
PHM_MasterTableFlag_None,
cz_disable_dpm_list
};
static struct phm_master_table_item cz_enable_dpm_list[] = {
static const struct phm_master_table_item cz_enable_dpm_list[] = {
{ NULL, cz_tf_check_for_dpm_disabled },
{ NULL, cz_tf_program_voting_clients },
{ NULL, cz_tf_start_dpm},
@ -1117,7 +1117,7 @@ static struct phm_master_table_item cz_enable_dpm_list[] = {
{NULL, NULL},
};
static struct phm_master_table_header cz_enable_dpm_master = {
static const struct phm_master_table_header cz_enable_dpm_master = {
0,
PHM_MasterTableFlag_None,
cz_enable_dpm_list
@ -1729,7 +1729,7 @@ static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
}
static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, int level)
enum pp_clock_type type, uint32_t mask)
{
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
return -EINVAL;
@ -1738,10 +1738,10 @@ static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
case PP_SCLK:
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetSclkSoftMin,
(1 << level));
mask);
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetSclkSoftMax,
(1 << level));
mask);
break;
default:
break;

View File

@ -47,10 +47,17 @@ int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
data->uvd_power_gated = bgate;
if (bgate)
if (bgate) {
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
fiji_update_uvd_dpm(hwmgr, true);
else
} else {
fiji_update_uvd_dpm(hwmgr, false);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_UNGATE);
}
return 0;
}

View File

@ -95,23 +95,23 @@ enum DPM_EVENT_SRC {
/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
* not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
*/
uint16_t fiji_clock_stretcher_lookup_table[2][4] = { {600, 1050, 3, 0},
{600, 1050, 6, 1} };
static const uint16_t fiji_clock_stretcher_lookup_table[2][4] =
{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
/* [FF, SS] type, [] 4 voltage ranges, and
* [Floor Freq, Boundary Freq, VID min , VID max]
*/
uint32_t fiji_clock_stretcher_ddt_table[2][4][4] =
static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] =
{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
{ {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
* (coming from PWR_CKS_CNTL.stretch_amount reg spec)
*/
uint8_t fiji_clock_stretch_amount_conversion[2][6] = { {0, 1, 3, 2, 4, 5},
{0, 2, 4, 5, 6, 5} };
static const uint8_t fiji_clock_stretch_amount_conversion[2][6] =
{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
static const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
struct fiji_power_state *cast_phw_fiji_power_state(
struct pp_hw_power_state *hw_ps)
@ -579,6 +579,18 @@ static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr,
return 0;
}
static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
if (data->soft_pp_table) {
kfree(data->soft_pp_table);
data->soft_pp_table = NULL;
}
return phm_hwmgr_backend_fini(hwmgr);
}
static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@ -734,7 +746,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
data->pcie_lane_cap = (uint32_t)sys_info.value;
} else {
/* Ignore return value in here, we are cleaning up a mess. */
tonga_hwmgr_backend_fini(hwmgr);
fiji_hwmgr_backend_fini(hwmgr);
}
return 0;
@ -1885,6 +1897,23 @@ static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
return 0;
}
static uint8_t fiji_get_sleep_divider_id_from_clock(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t clock_insr)
{
uint8_t i;
uint32_t temp;
uint32_t min = clock_insr > 2500 ? clock_insr : 2500;
PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
for (i = FIJI_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
temp = clock / (1UL << i);
if (temp >= min || i == 0)
break;
}
return i;
}
/**
* Populates single SMC SCLK structure using the provided engine clock
*
@ -1928,17 +1957,13 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
threshold = clock * data->fast_watermark_threshold / 100;
/*
* TODO: get minimum clocks from dal configaration
* PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks);
*/
/* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */
/* get level->DeepSleepDivId
if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
{
level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR);
} */
data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
level->DeepSleepDivId = fiji_get_sleep_divider_id_from_clock(hwmgr, clock,
hwmgr->display_config.min_core_set_clock_in_sr);
/* Default to slow, highest DPM level will be
* set to PPSMC_DISPLAY_WATERMARK_LOW later.
@ -3364,7 +3389,7 @@ static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
DPM_EVENT_SRC, src);
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
THERMAL_PROTECTION_DIS,
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ThermalController));
} else
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
@ -4066,7 +4091,6 @@ static int fiji_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
uint32_t mclk = fiji_ps->performance_levels
[fiji_ps->performance_level_count - 1].memory_clock;
struct PP_Clocks min_clocks = {0};
uint32_t i;
struct cgs_display_info info = {0};
@ -4080,10 +4104,8 @@ static int fiji_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
if (i >= sclk_table->count)
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
else {
/* TODO: Check SCLK in DAL's minimum clocks
* in case DeepSleep divider update is required.
*/
if(data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR)
if(data->display_timing.min_clock_in_sr !=
hwmgr->display_config.min_core_set_clock_in_sr)
data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
}
@ -5086,24 +5108,40 @@ static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
*table = (char *)&data->smc_state_table;
if (!data->soft_pp_table) {
data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
if (!data->soft_pp_table)
return -ENOMEM;
memcpy(data->soft_pp_table, hwmgr->soft_pp_table,
hwmgr->soft_pp_table_size);
}
return sizeof(struct SMU73_Discrete_DpmTable);
*table = (char *)&data->soft_pp_table;
return hwmgr->soft_pp_table_size;
}
static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
void *table = (void *)&data->smc_state_table;
if (!data->soft_pp_table) {
data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
if (!data->soft_pp_table)
return -ENOMEM;
}
memcpy(table, buf, size);
memcpy(data->soft_pp_table, buf, size);
hwmgr->soft_pp_table = data->soft_pp_table;
/* TODO: re-init powerplay to implement modified pptable */
return 0;
}
static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, int level)
enum pp_clock_type type, uint32_t mask)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@ -5115,20 +5153,30 @@ static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
if (!data->sclk_dpm_key_disabled)
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
(1 << level));
data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
break;
case PP_MCLK:
if (!data->mclk_dpm_key_disabled)
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
(1 << level));
data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
break;
case PP_PCIE:
{
uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
uint32_t level = 0;
while (tmp >>= 1)
level++;
if (!data->pcie_dpm_key_disabled)
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
(1 << level));
level);
break;
}
default:
break;
}
@ -5252,19 +5300,19 @@ bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *h
if (data->display_timing.num_existing_displays != info.display_count)
is_update_required = true;
/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
if(hwmgr->display_config.min_core_set_clock_in_sr != data->display_timing.min_clock_in_sr)
is_update_required = true;
*/
}
return is_update_required;
}
static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
.backend_init = &fiji_hwmgr_backend_init,
.backend_fini = &tonga_hwmgr_backend_fini,
.backend_fini = &fiji_hwmgr_backend_fini,
.asic_setup = &fiji_setup_asic_task,
.dynamic_state_management_enable = &fiji_enable_dpm_tasks,
.force_dpm_level = &fiji_dpm_force_dpm_level,

View File

@ -263,7 +263,7 @@ struct fiji_hwmgr {
bool enable_tdc_limit_feature;
bool enable_pkg_pwr_tracking_feature;
bool disable_uvd_power_tune_feature;
struct fiji_pt_defaults *power_tune_defaults;
const struct fiji_pt_defaults *power_tune_defaults;
struct SMU73_Discrete_PmFuses power_tune_table;
uint32_t dte_tj_offset;
uint32_t fast_watermark_threshold;
@ -302,6 +302,9 @@ struct fiji_hwmgr {
bool pg_acp_init;
bool frtc_enabled;
bool frtc_status_changed;
/* soft pptable for re-uploading into smu */
void *soft_pp_table;
};
/* To convert to Q8.8 format for firmware */
@ -338,7 +341,6 @@ enum Fiji_I2CLineID {
#define FIJI_UNUSED_GPIO_PIN 0x7F
extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
extern int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr);
extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display);

View File

@ -32,7 +32,7 @@
#define VOLTAGE_SCALE 4
#define POWERTUNE_DEFAULT_SET_MAX 1
struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
/*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
{1, 0xF, 0xFD,
/* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
@ -143,7 +143,7 @@ static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
struct fiji_pt_defaults *defaults = data->power_tune_defaults;
const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
SMU73_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@ -222,7 +222,7 @@ int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
struct fiji_pt_defaults *defaults = data->power_tune_defaults;
const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
@ -238,7 +238,7 @@ static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct fiji_pt_defaults *defaults = data->power_tune_defaults;
const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
/* TDC number of fraction bits are changed from 8 to 7
* for Fiji as requested by SMC team
@ -256,7 +256,7 @@ static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
struct fiji_pt_defaults *defaults = data->power_tune_defaults;
const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
uint32_t temp;
if (fiji_read_smc_sram_dword(hwmgr->smumgr,

View File

@ -221,8 +221,8 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
if (duty100 == 0)
return -EINVAL;
tmp64 = (uint64_t)speed * 100;
do_div(tmp64, duty100);
tmp64 = (uint64_t)speed * duty100;
do_div(tmp64, 100);
duty = (uint32_t)tmp64;
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@ -615,7 +615,7 @@ static int tf_fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr,
return fiji_thermal_disable_alert(hwmgr);
}
static struct phm_master_table_item
static const struct phm_master_table_item
fiji_thermal_start_thermal_controller_master_list[] = {
{NULL, tf_fiji_thermal_initialize},
{NULL, tf_fiji_thermal_set_temperature_range},
@ -630,14 +630,14 @@ fiji_thermal_start_thermal_controller_master_list[] = {
{NULL, NULL}
};
static struct phm_master_table_header
static const struct phm_master_table_header
fiji_thermal_start_thermal_controller_master = {
0,
PHM_MasterTableFlag_None,
fiji_thermal_start_thermal_controller_master_list
};
static struct phm_master_table_item
static const struct phm_master_table_item
fiji_thermal_set_temperature_range_master_list[] = {
{NULL, tf_fiji_thermal_disable_alert},
{NULL, tf_fiji_thermal_set_temperature_range},
@ -645,7 +645,7 @@ fiji_thermal_set_temperature_range_master_list[] = {
{NULL, NULL}
};
struct phm_master_table_header
static const struct phm_master_table_header
fiji_thermal_set_temperature_range_master = {
0,
PHM_MasterTableFlag_None,

View File

@ -84,7 +84,7 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr,
}
int phm_construct_table(struct pp_hwmgr *hwmgr,
struct phm_master_table_header *master_table,
const struct phm_master_table_header *master_table,
struct phm_runtime_table_header *rt_table)
{
uint32_t function_count = 0;

View File

@ -34,6 +34,7 @@
extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr);
extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
{
@ -67,6 +68,10 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
case CHIP_FIJI:
fiji_hwmgr_init(hwmgr);
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
polaris10_hwmgr_init(hwmgr);
break;
default:
return -EINVAL;
}

View File

@ -92,6 +92,8 @@ typedef struct phm_ppt_v1_voltage_lookup_table phm_ppt_v1_voltage_lookup_table;
struct phm_ppt_v1_pcie_record {
uint8_t gen_speed;
uint8_t lane_width;
uint16_t usreserved;
uint32_t pcie_sclk;
};
typedef struct phm_ppt_v1_pcie_record phm_ppt_v1_pcie_record;

View File

@ -0,0 +1,430 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "polaris10_clockpowergating.h"
int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_UVDPowerOFF);
return 0;
}
int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDDynamicPowerGating)) {
return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_UVDPowerON, 1);
} else {
return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_UVDPowerON, 0);
}
}
return 0;
}
int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_VCEPowerOFF);
return 0;
}
int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_VCEPowerON);
return 0;
}
int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
return smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_SAMPowerOFF);
return 0;
}
int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
return smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_SAMPowerON);
return 0;
}
int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = false;
data->vce_power_gated = false;
data->samu_power_gated = false;
polaris10_phm_powerup_uvd(hwmgr);
polaris10_phm_powerup_vce(hwmgr);
polaris10_phm_powerup_samu(hwmgr);
return 0;
}
int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
if (data->uvd_power_gated == bgate)
return 0;
data->uvd_power_gated = bgate;
if (bgate) {
polaris10_update_uvd_dpm(hwmgr, true);
polaris10_phm_powerdown_uvd(hwmgr);
} else {
polaris10_phm_powerup_uvd(hwmgr);
polaris10_update_uvd_dpm(hwmgr, false);
}
return 0;
}
int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
if (data->vce_power_gated == bgate)
return 0;
data->vce_power_gated = bgate;
if (bgate)
polaris10_phm_powerdown_vce(hwmgr);
else
polaris10_phm_powerup_vce(hwmgr);
return 0;
}
int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
if (data->samu_power_gated == bgate)
return 0;
data->samu_power_gated = bgate;
if (bgate) {
polaris10_update_samu_dpm(hwmgr, true);
polaris10_phm_powerdown_samu(hwmgr);
} else {
polaris10_phm_powerup_samu(hwmgr);
polaris10_update_samu_dpm(hwmgr, false);
}
return 0;
}
int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id)
{
PPSMC_Msg msg;
uint32_t value;
switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
case PP_GROUP_GFX:
switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
case PP_BLOCK_GFX_CG:
if (PP_STATE_SUPPORT_CG & *msg_id) {
msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_GFX_CGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
? PPSMC_MSG_EnableClockGatingFeature
: PPSMC_MSG_DisableClockGatingFeature;
value = CG_GFX_CGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
break;
case PP_BLOCK_GFX_3D:
if (PP_STATE_SUPPORT_CG & *msg_id) {
msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_GFX_3DCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_GFX_3DLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
break;
case PP_BLOCK_GFX_RLC:
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_GFX_RLC_LS_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
break;
case PP_BLOCK_GFX_CP:
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_GFX_CP_LS_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
break;
case PP_BLOCK_GFX_MG:
if (PP_STATE_SUPPORT_CG & *msg_id) {
msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = (CG_CPF_MGCG_MASK | CG_RLC_MGCG_MASK |
CG_GFX_OTHERS_MGCG_MASK);
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
break;
default:
return -1;
}
break;
case PP_GROUP_SYS:
switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
case PP_BLOCK_SYS_BIF:
if (PP_STATE_SUPPORT_CG & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_BIF_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_BIF_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
break;
case PP_BLOCK_SYS_MC:
if (PP_STATE_SUPPORT_CG & *msg_id) {
msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_MC_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_MC_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
break;
case PP_BLOCK_SYS_DRM:
if (PP_STATE_SUPPORT_CG & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_DRM_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_DRM_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
break;
case PP_BLOCK_SYS_HDP:
if (PP_STATE_SUPPORT_CG & *msg_id) {
msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_HDP_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_HDP_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
break;
case PP_BLOCK_SYS_SDMA:
if (PP_STATE_SUPPORT_CG & *msg_id) {
msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_SDMA_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_SDMA_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
break;
case PP_BLOCK_SYS_ROM:
if (PP_STATE_SUPPORT_CG & *msg_id) {
msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
PPSMC_MSG_EnableClockGatingFeature :
PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_ROM_MASK;
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
return -1;
}
break;
default:
return -1;
}
break;
default:
return -1;
}
return 0;
}
/* This function is for Polaris11 only for now,
* Powerplay will only control the static per CU Power Gating.
* Dynamic per CU Power Gating will be done in gfx.
*/
int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
{
struct cgs_system_info sys_info = {0};
uint32_t active_cus;
int result;
sys_info.size = sizeof(struct cgs_system_info);
sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
return -EINVAL;
else
active_cus = sys_info.value;
if (enable)
return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_GFX_CU_PG_ENABLE, active_cus);
else
return smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_GFX_CU_PG_DISABLE);
}

View File

@ -0,0 +1,40 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _POLARIS10_CLOCK_POWER_GATING_H_
#define _POLARIS10_CLOCK_POWER_GATING_H_
#include "polaris10_hwmgr.h"
#include "pp_asicblocks.h"
int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
int polaris10_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id);
int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
#endif /* _POLARIS10_CLOCK_POWER_GATING_H_ */

View File

@ -0,0 +1,62 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef POLARIS10_DYN_DEFAULTS_H
#define POLARIS10_DYN_DEFAULTS_H
enum Polaris10dpm_TrendDetection {
Polaris10Adpm_TrendDetection_AUTO,
Polaris10Adpm_TrendDetection_UP,
Polaris10Adpm_TrendDetection_DOWN
};
typedef enum Polaris10dpm_TrendDetection Polaris10dpm_TrendDetection;
/* We need to fill in the default values */
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
#define PPPOLARIS10_THERMALPROTECTCOUNTER_DFLT 0x200
#define PPPOLARIS10_STATICSCREENTHRESHOLDUNIT_DFLT 0
#define PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT 0x00C8
#define PPPOLARIS10_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
#define PPPOLARIS10_REFERENCEDIVIDER_DFLT 4
#define PPPOLARIS10_ULVVOLTAGECHANGEDELAY_DFLT 1687
#define PPPOLARIS10_CGULVPARAMETER_DFLT 0x00040035
#define PPPOLARIS10_CGULVCONTROL_DFLT 0x00007450
#define PPPOLARIS10_TARGETACTIVITY_DFLT 50
#define PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT 10
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,357 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef POLARIS10_HWMGR_H
#define POLARIS10_HWMGR_H
#include "hwmgr.h"
#include "smu74.h"
#include "smu74_discrete.h"
#include "ppatomctrl.h"
#include "polaris10_ppsmc.h"
#include "polaris10_powertune.h"
#define POLARIS10_MAX_HARDWARE_POWERLEVELS 2
#define POLARIS10_VOLTAGE_CONTROL_NONE 0x0
#define POLARIS10_VOLTAGE_CONTROL_BY_GPIO 0x1
#define POLARIS10_VOLTAGE_CONTROL_BY_SVID2 0x2
#define POLARIS10_VOLTAGE_CONTROL_MERGED 0x3
#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
#define DPMTABLE_UPDATE_SCLK 0x00000004
#define DPMTABLE_UPDATE_MCLK 0x00000008
struct polaris10_performance_level {
uint32_t memory_clock;
uint32_t engine_clock;
uint16_t pcie_gen;
uint16_t pcie_lane;
};
struct polaris10_uvd_clocks {
uint32_t vclk;
uint32_t dclk;
};
struct polaris10_vce_clocks {
uint32_t evclk;
uint32_t ecclk;
};
struct polaris10_power_state {
uint32_t magic;
struct polaris10_uvd_clocks uvd_clks;
struct polaris10_vce_clocks vce_clks;
uint32_t sam_clk;
uint16_t performance_level_count;
bool dc_compatible;
uint32_t sclk_threshold;
struct polaris10_performance_level performance_levels[POLARIS10_MAX_HARDWARE_POWERLEVELS];
};
struct polaris10_dpm_level {
bool enabled;
uint32_t value;
uint32_t param1;
};
#define POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID 5
#define MAX_REGULAR_DPM_NUMBER 8
#define POLARIS10_MINIMUM_ENGINE_CLOCK 2500
struct polaris10_single_dpm_table {
uint32_t count;
struct polaris10_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
};
struct polaris10_dpm_table {
struct polaris10_single_dpm_table sclk_table;
struct polaris10_single_dpm_table mclk_table;
struct polaris10_single_dpm_table pcie_speed_table;
struct polaris10_single_dpm_table vddc_table;
struct polaris10_single_dpm_table vddci_table;
struct polaris10_single_dpm_table mvdd_table;
};
struct polaris10_clock_registers {
uint32_t vCG_SPLL_FUNC_CNTL;
uint32_t vCG_SPLL_FUNC_CNTL_2;
uint32_t vCG_SPLL_FUNC_CNTL_3;
uint32_t vCG_SPLL_FUNC_CNTL_4;
uint32_t vCG_SPLL_SPREAD_SPECTRUM;
uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
uint32_t vDLL_CNTL;
uint32_t vMCLK_PWRMGT_CNTL;
uint32_t vMPLL_AD_FUNC_CNTL;
uint32_t vMPLL_DQ_FUNC_CNTL;
uint32_t vMPLL_FUNC_CNTL;
uint32_t vMPLL_FUNC_CNTL_1;
uint32_t vMPLL_FUNC_CNTL_2;
uint32_t vMPLL_SS1;
uint32_t vMPLL_SS2;
};
#define DISABLE_MC_LOADMICROCODE 1
#define DISABLE_MC_CFGPROGRAMMING 2
struct polaris10_voltage_smio_registers {
uint32_t vS0_VID_LOWER_SMIO_CNTL;
};
#define POLARIS10_MAX_LEAKAGE_COUNT 8
struct polaris10_leakage_voltage {
uint16_t count;
uint16_t leakage_id[POLARIS10_MAX_LEAKAGE_COUNT];
uint16_t actual_voltage[POLARIS10_MAX_LEAKAGE_COUNT];
};
struct polaris10_vbios_boot_state {
uint16_t mvdd_bootup_value;
uint16_t vddc_bootup_value;
uint16_t vddci_bootup_value;
uint32_t sclk_bootup_value;
uint32_t mclk_bootup_value;
uint16_t pcie_gen_bootup_value;
uint16_t pcie_lane_bootup_value;
};
/* Ultra Low Voltage parameter structure */
struct polaris10_ulv_parm {
bool ulv_supported;
uint32_t cg_ulv_parameter;
uint32_t ulv_volt_change_delay;
struct polaris10_performance_level ulv_power_level;
};
struct polaris10_display_timing {
uint32_t min_clock_in_sr;
uint32_t num_existing_displays;
};
struct polaris10_dpmlevel_enable_mask {
uint32_t uvd_dpm_enable_mask;
uint32_t vce_dpm_enable_mask;
uint32_t acp_dpm_enable_mask;
uint32_t samu_dpm_enable_mask;
uint32_t sclk_dpm_enable_mask;
uint32_t mclk_dpm_enable_mask;
uint32_t pcie_dpm_enable_mask;
};
struct polaris10_pcie_perf_range {
uint16_t max;
uint16_t min;
};
struct polaris10_range_table {
uint32_t trans_lower_frequency; /* in 10khz */
uint32_t trans_upper_frequency;
};
struct polaris10_hwmgr {
struct polaris10_dpm_table dpm_table;
struct polaris10_dpm_table golden_dpm_table;
SMU74_Discrete_DpmTable smc_state_table;
struct SMU74_Discrete_Ulv ulv_setting;
struct polaris10_range_table range_table[NUM_SCLK_RANGE];
uint32_t voting_rights_clients0;
uint32_t voting_rights_clients1;
uint32_t voting_rights_clients2;
uint32_t voting_rights_clients3;
uint32_t voting_rights_clients4;
uint32_t voting_rights_clients5;
uint32_t voting_rights_clients6;
uint32_t voting_rights_clients7;
uint32_t static_screen_threshold_unit;
uint32_t static_screen_threshold;
uint32_t voltage_control;
uint32_t vddc_vddci_delta;
uint32_t active_auto_throttle_sources;
struct polaris10_clock_registers clock_registers;
struct polaris10_voltage_smio_registers voltage_smio_registers;
bool is_memory_gddr5;
uint16_t acpi_vddc;
bool pspp_notify_required;
uint16_t force_pcie_gen;
uint16_t acpi_pcie_gen;
uint32_t pcie_gen_cap;
uint32_t pcie_lane_cap;
uint32_t pcie_spc_cap;
struct polaris10_leakage_voltage vddc_leakage;
struct polaris10_leakage_voltage Vddci_leakage;
uint32_t mvdd_control;
uint32_t vddc_mask_low;
uint32_t mvdd_mask_low;
uint16_t max_vddc_in_pptable;
uint16_t min_vddc_in_pptable;
uint16_t max_vddci_in_pptable;
uint16_t min_vddci_in_pptable;
uint32_t mclk_strobe_mode_threshold;
uint32_t mclk_stutter_mode_threshold;
uint32_t mclk_edc_enable_threshold;
uint32_t mclk_edcwr_enable_threshold;
bool is_uvd_enabled;
struct polaris10_vbios_boot_state vbios_boot_state;
bool pcie_performance_request;
bool battery_state;
bool is_tlu_enabled;
/* ---- SMC SRAM Address of firmware header tables ---- */
uint32_t sram_end;
uint32_t dpm_table_start;
uint32_t soft_regs_start;
uint32_t mc_reg_table_start;
uint32_t fan_table_start;
uint32_t arb_table_start;
/* ---- Stuff originally coming from Evergreen ---- */
uint32_t vddci_control;
struct pp_atomctrl_voltage_table vddc_voltage_table;
struct pp_atomctrl_voltage_table vddci_voltage_table;
struct pp_atomctrl_voltage_table mvdd_voltage_table;
uint32_t mgcg_cgtt_local2;
uint32_t mgcg_cgtt_local3;
uint32_t gpio_debug;
uint32_t mc_micro_code_feature;
uint32_t highest_mclk;
uint16_t acpi_vddci;
uint8_t mvdd_high_index;
uint8_t mvdd_low_index;
bool dll_default_on;
bool performance_request_registered;
/* ---- Low Power Features ---- */
struct polaris10_ulv_parm ulv;
/* ---- CAC Stuff ---- */
uint32_t cac_table_start;
bool cac_configuration_required;
bool driver_calculate_cac_leakage;
bool cac_enabled;
/* ---- DPM2 Parameters ---- */
uint32_t power_containment_features;
bool enable_dte_feature;
bool enable_tdc_limit_feature;
bool enable_pkg_pwr_tracking_feature;
bool disable_uvd_power_tune_feature;
const struct polaris10_pt_defaults *power_tune_defaults;
struct SMU74_Discrete_PmFuses power_tune_table;
uint32_t dte_tj_offset;
uint32_t fast_watermark_threshold;
/* ---- Phase Shedding ---- */
bool vddc_phase_shed_control;
/* ---- DI/DT ---- */
struct polaris10_display_timing display_timing;
uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
/* ---- Thermal Temperature Setting ---- */
struct polaris10_dpmlevel_enable_mask dpm_level_enable_mask;
uint32_t need_update_smu7_dpm_table;
uint32_t sclk_dpm_key_disabled;
uint32_t mclk_dpm_key_disabled;
uint32_t pcie_dpm_key_disabled;
uint32_t min_engine_clocks;
struct polaris10_pcie_perf_range pcie_gen_performance;
struct polaris10_pcie_perf_range pcie_lane_performance;
struct polaris10_pcie_perf_range pcie_gen_power_saving;
struct polaris10_pcie_perf_range pcie_lane_power_saving;
bool use_pcie_performance_levels;
bool use_pcie_power_saving_levels;
uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
uint32_t mclk_activity_target;
uint32_t mclk_dpm0_activity_target;
uint32_t low_sclk_interrupt_threshold;
uint32_t last_mclk_dpm_enable_mask;
bool uvd_enabled;
/* ---- Power Gating States ---- */
bool uvd_power_gated;
bool vce_power_gated;
bool samu_power_gated;
bool need_long_memory_training;
/* Application power optimization parameters */
bool update_up_hyst;
bool update_down_hyst;
uint32_t down_hyst;
uint32_t up_hyst;
uint32_t disable_dpm_mask;
bool apply_optimized_settings;
/* soft pptable for re-uploading into smu */
void *soft_pp_table;
};
/* To convert to Q8.8 format for firmware */
#define POLARIS10_Q88_FORMAT_CONVERSION_UNIT 256
enum Polaris10_I2CLineID {
Polaris10_I2CLineID_DDC1 = 0x90,
Polaris10_I2CLineID_DDC2 = 0x91,
Polaris10_I2CLineID_DDC3 = 0x92,
Polaris10_I2CLineID_DDC4 = 0x93,
Polaris10_I2CLineID_DDC5 = 0x94,
Polaris10_I2CLineID_DDC6 = 0x95,
Polaris10_I2CLineID_SCLSDA = 0x96,
Polaris10_I2CLineID_DDCVGA = 0x97
};
#define POLARIS10_I2C_DDC1DATA 0
#define POLARIS10_I2C_DDC1CLK 1
#define POLARIS10_I2C_DDC2DATA 2
#define POLARIS10_I2C_DDC2CLK 3
#define POLARIS10_I2C_DDC3DATA 4
#define POLARIS10_I2C_DDC3CLK 5
#define POLARIS10_I2C_SDA 40
#define POLARIS10_I2C_SCL 41
#define POLARIS10_I2C_DDC4DATA 65
#define POLARIS10_I2C_DDC4CLK 66
#define POLARIS10_I2C_DDC5DATA 0x48
#define POLARIS10_I2C_DDC5CLK 0x49
#define POLARIS10_I2C_DDC6DATA 0x4a
#define POLARIS10_I2C_DDC6CLK 0x4b
#define POLARIS10_I2C_DDCVGADATA 0x4c
#define POLARIS10_I2C_DDCVGACLK 0x4d
#define POLARIS10_UNUSED_GPIO_PIN 0x7F
int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
#endif

View File

@ -0,0 +1,398 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "hwmgr.h"
#include "smumgr.h"
#include "polaris10_hwmgr.h"
#include "polaris10_powertune.h"
#include "polaris10_smumgr.h"
#include "smu74_discrete.h"
#include "pp_debug.h"
#define VOLTAGE_SCALE 4
#define POWERTUNE_DEFAULT_SET_MAX 1
static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
* TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
{ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
{ 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
};
void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *polaris10_hwmgr = (struct polaris10_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
if (table_info &&
table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
table_info->cac_dtp_table->usPowerTuneDataSetID)
polaris10_hwmgr->power_tune_defaults =
&polaris10_power_tune_data_set_array
[table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
else
polaris10_hwmgr->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
}
static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
{
uint32_t tmp;
tmp = raw_setting * 4096 / 100;
return (uint16_t)tmp;
}
int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
SMU74_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
struct pp_advance_fan_control_parameters *fan_table=
&hwmgr->thermal_controller.advanceFanControlParameters;
int i, j, k;
const uint16_t *pdef1;
const uint16_t *pdef2;
dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
"Target Operating Temp is out of Range!",
);
dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
cac_dtp_table->usTargetOperatingTemp * 256);
dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
cac_dtp_table->usTemperatureLimitHotspot * 256);
dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
scale_fan_gain_settings(fan_table->usFanGainEdge));
dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
scale_fan_gain_settings(fan_table->usFanGainHotspot));
pdef1 = defaults->BAPMTI_R;
pdef2 = defaults->BAPMTI_RC;
for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
for (j = 0; j < SMU74_DTE_SOURCES; j++) {
for (k = 0; k < SMU74_DTE_SINKS; k++) {
dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
pdef1++;
pdef2++;
}
}
}
return 0;
}
static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
data->power_tune_table.SviLoadLineTrimVddC = 3;
data->power_tune_table.SviLoadLineOffsetVddC = 0;
return 0;
}
static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
{
uint16_t tdc_limit;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
data->power_tune_table.TDC_VDDC_PkgLimit =
CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
return 0;
}
static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
uint32_t temp;
if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
fuse_table_offset +
offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
(uint32_t *)&temp, data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
return -EINVAL);
else {
data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
data->power_tune_table.LPMLTemperatureMin =
(uint8_t)((temp >> 16) & 0xff);
data->power_tune_table.LPMLTemperatureMax =
(uint8_t)((temp >> 8) & 0xff);
data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
}
return 0;
}
static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
{
int i;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
/* Currently not used. Set all to zero. */
for (i = 0; i < 16; i++)
data->power_tune_table.LPMLTemperatureScaler[i] = 0;
return 0;
}
static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
|| 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
return 0;
}
static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
{
int i;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
/* Currently not used. Set all to zero. */
for (i = 0; i < 16; i++)
data->power_tune_table.GnbLPML[i] = 0;
return 0;
}
static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
{
return 0;
}
static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
data->power_tune_table.BapmVddCBaseLeakageHiSidd =
CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
data->power_tune_table.BapmVddCBaseLeakageLoSidd =
CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
return 0;
}
int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
uint32_t pm_fuse_table_offset;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU74_Firmware_Header, PmFuseTable),
&pm_fuse_table_offset, data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to get pm_fuse_table_offset Failed!",
return -EINVAL);
if (polaris10_populate_svi_load_line(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate SviLoadLine Failed!",
return -EINVAL);
if (polaris10_populate_tdc_limit(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate TDCLimit Failed!", return -EINVAL);
if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate TdcWaterfallCtl, "
"LPMLTemperature Min and Max Failed!",
return -EINVAL);
if (0 != polaris10_populate_temperature_scaler(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate LPMLTemperatureScaler Failed!",
return -EINVAL);
if (polaris10_populate_fuzzy_fan(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate Fuzzy Fan Control parameters Failed!",
return -EINVAL);
if (polaris10_populate_gnb_lpml(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate GnbLPML Failed!",
return -EINVAL);
if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate GnbLPML Min and Max Vid Failed!",
return -EINVAL);
if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate BapmVddCBaseLeakage Hi and Lo "
"Sidd Failed!", return -EINVAL);
if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
(uint8_t *)&data->power_tune_table,
sizeof(struct SMU74_Discrete_PmFuses), data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to download PmFuseTable Failed!",
return -EINVAL);
}
return 0;
}
int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC)) {
int smc_result;
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_EnableCac));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable CAC in SMC.", result = -1);
data->cac_enabled = (0 == smc_result) ? true : false;
}
return result;
}
int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit)
return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_PkgPwrSetLimit, n);
return 0;
}
static int polaris10_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
{
return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
}
int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
int smc_result;
int result = 0;
data->power_containment_features = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
if (data->enable_tdc_limit_feature) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_TDCLimitEnable));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable TDCLimit in SMC.", result = -1;);
if (0 == smc_result)
data->power_containment_features |=
POWERCONTAINMENT_FEATURE_TDCLimit;
}
if (data->enable_pkg_pwr_tracking_feature) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable PkgPwrTracking in SMC.", result = -1;);
if (0 == smc_result) {
struct phm_cac_tdp_table *cac_table =
table_info->cac_dtp_table;
uint32_t default_limit =
(uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
data->power_containment_features |=
POWERCONTAINMENT_FEATURE_PkgPwrLimit;
if (polaris10_set_power_limit(hwmgr, default_limit))
printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
}
}
}
return result;
}
int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
int adjust_percent, target_tdp;
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
/* adjustment percentage has already been validated */
adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
hwmgr->platform_descriptor.TDPAdjustment :
(-1 * hwmgr->platform_descriptor.TDPAdjustment);
/* SMC requested that target_tdp to be 7 bit fraction in DPM table
* but message to be 8 bit fraction for messages
*/
target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
result = polaris10_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
}
return result;
}

View File

@ -0,0 +1,70 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef POLARIS10_POWERTUNE_H
#define POLARIS10_POWERTUNE_H
enum polaris10_pt_config_reg_type {
POLARIS10_CONFIGREG_MMR = 0,
POLARIS10_CONFIGREG_SMC_IND,
POLARIS10_CONFIGREG_DIDT_IND,
POLARIS10_CONFIGREG_CACHE,
POLARIS10_CONFIGREG_MAX
};
/* PowerContainment Features */
#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
struct polaris10_pt_config_reg {
uint32_t offset;
uint32_t mask;
uint32_t shift;
uint32_t value;
enum polaris10_pt_config_reg_type type;
};
struct polaris10_pt_defaults {
uint8_t SviLoadLineEn;
uint8_t SviLoadLineVddC;
uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
uint8_t TDC_MAWt;
uint8_t TdcWaterfallCtl;
uint8_t DTEAmbientTempBase;
uint32_t DisplayCac;
uint32_t BAPM_TEMP_GRADIENT;
uint16_t BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
};
void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr);
int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr);
int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr);
int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
#endif /* POLARIS10_POWERTUNE_H */

View File

@ -0,0 +1,711 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "polaris10_thermal.h"
#include "polaris10_hwmgr.h"
#include "polaris10_smumgr.h"
#include "polaris10_ppsmc.h"
#include "smu/smu_7_1_3_d.h"
#include "smu/smu_7_1_3_sh_mask.h"
int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
struct phm_fan_speed_info *fan_speed_info)
{
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
fan_speed_info->supports_percent_read = true;
fan_speed_info->supports_percent_write = true;
fan_speed_info->min_percent = 0;
fan_speed_info->max_percent = 100;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
fan_speed_info->supports_rpm_read = true;
fan_speed_info->supports_rpm_write = true;
fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
} else {
fan_speed_info->min_rpm = 0;
fan_speed_info->max_rpm = 0;
}
return 0;
}
int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint32_t *speed)
{
uint32_t duty100;
uint32_t duty;
uint64_t tmp64;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL1, FMAX_DUTY100);
duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_STATUS, FDO_PWM_DUTY);
if (duty100 == 0)
return -EINVAL;
tmp64 = (uint64_t)duty * 100;
do_div(tmp64, duty100);
*speed = (uint32_t)tmp64;
if (*speed > 100)
*speed = 100;
return 0;
}
int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
{
uint32_t tach_period;
uint32_t crystal_clock_freq;
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution == 0))
return 0;
tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_STATUS, TACH_PERIOD);
if (tach_period == 0)
return -EINVAL;
crystal_clock_freq = tonga_get_xclk(hwmgr);
*speed = 60 * crystal_clock_freq * 10000 / tach_period;
return 0;
}
/**
* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
* @param hwmgr the address of the powerplay hardware manager.
* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
* @exception Should always succeed.
*/
int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
if (hwmgr->fan_ctrl_is_in_default_mode) {
hwmgr->fan_ctrl_default_mode =
PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, FDO_PWM_MODE);
hwmgr->tmin =
PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, TMIN);
hwmgr->fan_ctrl_is_in_default_mode = false;
}
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, TMIN, 0);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, FDO_PWM_MODE, mode);
return 0;
}
/**
* Reset Fan Speed Control to default mode.
* @param hwmgr the address of the powerplay hardware manager.
* @exception Should always succeed.
*/
int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
{
if (!hwmgr->fan_ctrl_is_in_default_mode) {
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, TMIN, hwmgr->tmin);
hwmgr->fan_ctrl_is_in_default_mode = true;
}
return 0;
}
int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
int result;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_FanSpeedInTableIsRPM))
hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
hwmgr->thermal_controller.
advanceFanControlParameters.usMaxFanRPM);
else
hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr,
hwmgr->thermal_controller.
advanceFanControlParameters.usMaxFanPWM);
} else {
cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
}
if (!result && hwmgr->thermal_controller.
advanceFanControlParameters.ucTargetTemperature)
result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetFanTemperatureTarget,
hwmgr->thermal_controller.
advanceFanControlParameters.ucTargetTemperature);
return result;
}
int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
}
/**
* Set Fan Speed in percent.
* @param hwmgr the address of the powerplay hardware manager.
* @param speed is the percentage value (0% - 100%) to be set.
* @exception Fails is the 100% setting appears to be 0.
*/
int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint32_t speed)
{
uint32_t duty100;
uint32_t duty;
uint64_t tmp64;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
if (speed > 100)
speed = 100;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl))
polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL1, FMAX_DUTY100);
if (duty100 == 0)
return -EINVAL;
tmp64 = (uint64_t)speed * duty100;
do_div(tmp64, 100);
duty = (uint32_t)tmp64;
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
/**
* Reset Fan Speed to default.
* @param hwmgr the address of the powerplay hardware manager.
* @exception Always succeeds.
*/
int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
{
int result;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) {
result = polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
if (!result)
result = polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
} else
result = polaris10_fan_ctrl_set_default_mode(hwmgr);
return result;
}
/**
* Set Fan Speed in RPM.
* @param hwmgr the address of the powerplay hardware manager.
* @param speed is the percentage value (min - max) to be set.
* @exception Fails is the speed not lie between min and max.
*/
int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
{
uint32_t tach_period;
uint32_t crystal_clock_freq;
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution == 0) ||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
return 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl))
polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
crystal_clock_freq = tonga_get_xclk(hwmgr);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_STATUS, TACH_PERIOD, tach_period);
return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
/**
* Reads the remote temperature from the SIslands thermal controller.
*
* @param hwmgr The address of the hardware manager.
*/
int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
int temp;
temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_MULT_THERMAL_STATUS, CTF_TEMP);
/* Bit 9 means the reading is lower than the lowest usable value. */
if (temp & 0x200)
temp = POLARIS10_THERMAL_MAXIMUM_TEMP_READING;
else
temp = temp & 0x1ff;
temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return temp;
}
/**
* Set the requested temperature range for high and low alert signals
*
* @param hwmgr The address of the hardware manager.
* @param range Temperature range to be programmed for high and low alert signals
* @exception PP_Result_BadInput if the input data is not valid.
*/
static int polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
uint32_t low_temp, uint32_t high_temp)
{
uint32_t low = POLARIS10_THERMAL_MINIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
uint32_t high = POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
if (low < low_temp)
low = low_temp;
if (high > high_temp)
high = high_temp;
if (low > high)
return -EINVAL;
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, DIG_THERM_INTH,
(high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, DIG_THERM_INTL,
(low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_CTRL, DIG_THERM_DPM,
(high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
return 0;
}
/**
* Programs thermal controller one-time setting registers
*
* @param hwmgr The address of the hardware manager.
*/
static int polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
{
if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_CTRL, EDGE_PER_REV,
hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution - 1);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
return 0;
}
/**
* Enable thermal alerts on the RV770 thermal controller.
*
* @param hwmgr The address of the hardware manager.
*/
static int polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
uint32_t alert;
alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK);
alert &= ~(POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to enable internal thermal interrupts */
return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
}
/**
* Disable thermal alerts on the RV770 thermal controller.
* @param hwmgr The address of the hardware manager.
*/
static int polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
{
uint32_t alert;
alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK);
alert |= (POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to disable internal thermal interrupts */
return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
}
/**
* Uninitialize the thermal controller.
* Currently just disables alerts.
* @param hwmgr The address of the hardware manager.
*/
int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
{
int result = polaris10_thermal_disable_alert(hwmgr);
if (!hwmgr->thermal_controller.fanInfo.bNoFan)
polaris10_fan_ctrl_set_default_mode(hwmgr);
return result;
}
/**
* Set up the fan table to control the fan using the SMC.
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from set temperature range routine
*/
int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
uint32_t duty100;
uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
uint16_t fdo_min, slope1, slope2;
uint32_t reference_clock;
int res;
uint64_t tmp64;
if (data->fan_table_start == 0) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL1, FMAX_DUTY100);
if (duty100 == 0) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
usPWMMin * duty100;
do_div(tmp64, 10000);
fdo_min = (uint16_t)tmp64;
t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
fan_table.TempMin = cpu_to_be16((50 + hwmgr->
thermal_controller.advanceFanControlParameters.usTMin) / 100);
fan_table.TempMed = cpu_to_be16((50 + hwmgr->
thermal_controller.advanceFanControlParameters.usTMed) / 100);
fan_table.TempMax = cpu_to_be16((50 + hwmgr->
thermal_controller.advanceFanControlParameters.usTMax) / 100);
fan_table.Slope1 = cpu_to_be16(slope1);
fan_table.Slope2 = cpu_to_be16(slope2);
fan_table.FdoMin = cpu_to_be16(fdo_min);
fan_table.HystDown = cpu_to_be16(hwmgr->
thermal_controller.advanceFanControlParameters.ucTHyst);
fan_table.HystUp = cpu_to_be16(1);
fan_table.HystSlope = cpu_to_be16(1);
fan_table.TempRespLim = cpu_to_be16(5);
reference_clock = tonga_get_xclk(hwmgr);
fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
thermal_controller.advanceFanControlParameters.ulCycleDelay *
reference_clock) / 1600);
fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
hwmgr->device, CGS_IND_REG__SMC,
CG_MULT_THERMAL_CTRL, TEMP_SEL);
res = polaris10_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
(uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
data->sram_end);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ucMinimumPWMLimit)
res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetFanMinPwm,
hwmgr->thermal_controller.
advanceFanControlParameters.ucMinimumPWMLimit);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetFanSclkTarget,
hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
if (res)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
/**
* Start the fan control on the SMC.
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from set temperature range routine
*/
int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
/* If the fantable setup has failed we could have disabled
* PHM_PlatformCaps_MicrocodeFanControl even after
* this function was included in the table.
* Make sure that we still think controlling the fan is OK.
*/
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) {
polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
return 0;
}
/**
* Set temperature range for high and low alerts
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from set temperature range routine
*/
int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
if (range == NULL)
return -EINVAL;
return polaris10_thermal_set_temperature_range(hwmgr, range->min, range->max);
}
/**
* Programs one-time setting registers
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from initialize thermal controller routine
*/
int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
return polaris10_thermal_initialize(hwmgr);
}
/**
* Enable high and low alerts
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from enable alert routine
*/
int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
return polaris10_thermal_enable_alert(hwmgr);
}
/**
* Disable high and low alerts
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from disable alert routine
*/
static int tf_polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
return polaris10_thermal_disable_alert(hwmgr);
}
static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
int ret;
struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
return 0;
ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
0 : -1;
if (!ret)
/* If this param is not changed, this function could fire unnecessarily */
smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
return ret;
}
static const struct phm_master_table_item
polaris10_thermal_start_thermal_controller_master_list[] = {
{NULL, tf_polaris10_thermal_initialize},
{NULL, tf_polaris10_thermal_set_temperature_range},
{NULL, tf_polaris10_thermal_enable_alert},
{NULL, tf_polaris10_thermal_avfs_enable},
/* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this
* so it would be pointless.
* If this assumption changes we have to revisit this table.
*/
{NULL, tf_polaris10_thermal_setup_fan_table},
{NULL, tf_polaris10_thermal_start_smc_fan_control},
{NULL, NULL}
};
static const struct phm_master_table_header
polaris10_thermal_start_thermal_controller_master = {
0,
PHM_MasterTableFlag_None,
polaris10_thermal_start_thermal_controller_master_list
};
static const struct phm_master_table_item
polaris10_thermal_set_temperature_range_master_list[] = {
{NULL, tf_polaris10_thermal_disable_alert},
{NULL, tf_polaris10_thermal_set_temperature_range},
{NULL, tf_polaris10_thermal_enable_alert},
{NULL, NULL}
};
static const struct phm_master_table_header
polaris10_thermal_set_temperature_range_master = {
0,
PHM_MasterTableFlag_None,
polaris10_thermal_set_temperature_range_master_list
};
int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
{
if (!hwmgr->thermal_controller.fanInfo.bNoFan)
polaris10_fan_ctrl_set_default_mode(hwmgr);
return 0;
}
/**
* Initializes the thermal controller related functions in the Hardware Manager structure.
* @param hwmgr The address of the hardware manager.
* @exception Any error code from the low-level communication.
*/
int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
{
int result;
result = phm_construct_table(hwmgr,
&polaris10_thermal_set_temperature_range_master,
&(hwmgr->set_temperature_range));
if (!result) {
result = phm_construct_table(hwmgr,
&polaris10_thermal_start_thermal_controller_master,
&(hwmgr->start_thermal_controller));
if (result)
phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
}
if (!result)
hwmgr->fan_ctrl_is_in_default_mode = true;
return result;
}

View File

@ -0,0 +1,62 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _POLARIS10_THERMAL_H_
#define _POLARIS10_THERMAL_H_
#include "hwmgr.h"
#define POLARIS10_THERMAL_HIGH_ALERT_MASK 0x1
#define POLARIS10_THERMAL_LOW_ALERT_MASK 0x2
#define POLARIS10_THERMAL_MINIMUM_TEMP_READING -256
#define POLARIS10_THERMAL_MAXIMUM_TEMP_READING 255
#define POLARIS10_THERMAL_MINIMUM_ALERT_TEMP 0
#define POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP 255
#define FDO_PWM_MODE_STATIC 1
#define FDO_PWM_MODE_STATIC_RPM 5
extern int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
extern int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
extern int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
extern int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
extern int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
extern int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
extern int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
extern int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
extern int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
extern int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr);
extern int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
extern int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
#endif

View File

@ -373,6 +373,37 @@ int atomctrl_get_engine_pll_dividers_vi(
return result;
}
int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
uint32_t clock_value,
pp_atomctrl_clock_dividers_ai *dividers)
{
COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
int result;
pll_patameters.ulClock.ulClock = clock_value;
pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
result = cgs_atom_exec_cmd_table
(hwmgr->device,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
&pll_patameters);
if (0 == result) {
dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
dividers->usSclk_fcw_int = le16_to_cpu(pll_patameters.usSclk_fcw_int);
dividers->ucSclkPostDiv = pll_patameters.ucSclkPostDiv;
dividers->ucSclkVcoMode = pll_patameters.ucSclkVcoMode;
dividers->ucSclkPllRange = pll_patameters.ucSclkPllRange;
dividers->ucSscEnable = pll_patameters.ucSscEnable;
dividers->usSsc_fcw1_frac = le16_to_cpu(pll_patameters.usSsc_fcw1_frac);
dividers->usSsc_fcw1_int = le16_to_cpu(pll_patameters.usSsc_fcw1_int);
dividers->usPcc_fcw_int = le16_to_cpu(pll_patameters.usPcc_fcw_int);
dividers->usSsc_fcw_slew_frac = le16_to_cpu(pll_patameters.usSsc_fcw_slew_frac);
dividers->usPcc_fcw_slew_frac = le16_to_cpu(pll_patameters.usPcc_fcw_slew_frac);
}
return result;
}
int atomctrl_get_dfs_pll_dividers_vi(
struct pp_hwmgr *hwmgr,
uint32_t clock_value,
@ -618,7 +649,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
if (!getASICProfilingInfo)
return -1;
if(getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
(getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
return -1;
@ -891,18 +922,18 @@ int atomctrl_calculate_voltage_evv_on_sclk(
*-----------------------
*/
fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4,fSclk), fSM_A5));
fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
fC_Term = fAdd(fMargin_RO_c,
fAdd(fMultiply(fSM_A0,fLkg_FT),
fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT,fSclk)),
fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
fAdd(fMultiply(fSM_A3, fSclk),
fSubtract(fSM_A7,fRO_fused)))));
fSubtract(fSM_A7, fRO_fused)))));
fVDDC_base = fSubtract(fRO_fused,
fSubtract(fMargin_RO_c,
fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk))));
fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0,fSclk), fSM_A2));
fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2));
repeat = fSubtract(fVDDC_base,
fDivide(fMargin_DC_sigma, ConvertToFraction(1000)));
@ -916,7 +947,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
fSubtract(fRO_DC_margin,
fSubtract(fSM_A3,
fMultiply(fSM_A2, repeat))));
fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0,repeat), fSM_A1));
fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1));
fSigma_DC = fSubtract(fSclk, fDC_SCLK);
@ -996,7 +1027,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0);
if (GreaterThan(fV_max, fV_NL) &&
(GreaterThan(fV_NL,fEVV_V) ||
(GreaterThan(fV_NL, fEVV_V) ||
Equal(fV_NL, fEVV_V))) {
fV_NL = fMultiply(fV_NL, ConvertToFraction(1000));
@ -1205,3 +1236,69 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
return result;
}
int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
uint8_t level)
{
DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
int result;
memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = memory_clock & SET_CLOCK_FREQ_MASK;
memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = ADJUST_MC_SETTING_PARAM;
memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
result = cgs_atom_exec_cmd_table
(hwmgr->device,
GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
&memory_clock_parameters);
return result;
}
int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage)
{
int result;
GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space;
get_voltage_info_param_space.ucVoltageType = voltage_type;
get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
get_voltage_info_param_space.usVoltageLevel = virtual_voltage_Id;
get_voltage_info_param_space.ulSCLKFreq = sclk;
result = cgs_atom_exec_cmd_table(hwmgr->device,
GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
&get_voltage_info_param_space);
if (0 != result)
return result;
*voltage = get_voltage_info_param_space.usVoltageLevel;
return result;
}
int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table)
{
int i;
u8 frev, crev;
u16 size;
ATOM_SMU_INFO_V2_1 *psmu_info =
(ATOM_SMU_INFO_V2_1 *)cgs_atom_get_data_table(hwmgr->device,
GetIndexIntoMasterTable(DATA, SMU_Info),
&size, &frev, &crev);
for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
table->entry[i].usFcw_pcc = psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc;
table->entry[i].usFcw_trans_upper = psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper;
table->entry[i].usRcw_trans_lower = psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower;
}
return 0;
}

Some files were not shown because too many files have changed in this diff Show More