mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-26 12:34:41 +08:00
Merge branch 'drm-next-4.2-amdgpu' of git://people.freedesktop.org/~agd5f/linux into drm-next
More fixes for amdgpu for 4.2. We've integrated Jerome's comments about the interface among other things. I'll be on vacation next week so Christian will be handling any updates next week. * 'drm-next-4.2-amdgpu' of git://people.freedesktop.org/~agd5f/linux: (23 commits) drm/amdgpu: fix a amdgpu_dpm=0 bug drm/amdgpu: don't enable/disable display twice on suspend/resume drm/amdgpu: fix UVD/VCE VM emulation drm/amdgpu: enable vce powergating drm/amdgpu/iceland: don't call smu_init on resume drm/amdgpu/tonga: don't call smu_init on resume drm/amdgpu/cz: don't call smu_init on resume drm/amdgpu: update to latest gfx8 golden register settings drm/amdgpu: whitespace cleanup in gmc8 golden regs drm/admgpu: move XDMA golden registers to dce code drm/amdgpu: fix the build on big endian drm/amdgpu: cleanup UAPI comments drm/amdgpu: remove AMDGPU_CTX_OP_STATE_RUNNING drm/amdgpu: remove the VI hardware semaphore in ring sync drm/amdgpu: set the gfx config properly for all CZ variants (v2) drm/amdgpu: also print the pci revision when printing the pci ids drm/amdgpu: cleanup VA IOCTL drm/amdgpu: fix saddr handling in amdgpu_vm_bo_unmap drm/amdgpu: fix amdgpu_vm_bo_map drm/amdgpu: remove unused AMDGPU_IB_FLAG_GDS ...
This commit is contained in:
commit
dc9be8e217
@ -317,7 +317,7 @@ struct amdgpu_ring_funcs {
|
||||
void (*emit_ib)(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib);
|
||||
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
|
||||
uint64_t seq, bool write64bit);
|
||||
uint64_t seq, unsigned flags);
|
||||
bool (*emit_semaphore)(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore,
|
||||
bool emit_wait);
|
||||
@ -392,6 +392,9 @@ struct amdgpu_fence_driver {
|
||||
#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
|
||||
#define AMDGPU_FENCE_OWNER_MOVE ((void*)2ul)
|
||||
|
||||
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
|
||||
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
|
||||
|
||||
struct amdgpu_fence {
|
||||
struct fence base;
|
||||
|
||||
@ -1506,6 +1509,7 @@ struct amdgpu_dpm_funcs {
|
||||
int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
|
||||
bool (*vblank_too_short)(struct amdgpu_device *adev);
|
||||
void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
|
||||
void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
|
||||
void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
|
||||
void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
|
||||
u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
|
||||
@ -2142,7 +2146,7 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
|
||||
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
|
||||
#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
|
||||
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
|
||||
#define amdgpu_ring_emit_fence(r, addr, seq, write64bit) (r)->funcs->emit_fence((r), (addr), (seq), (write64bit))
|
||||
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
|
||||
#define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait))
|
||||
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
|
||||
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
|
||||
@ -2179,6 +2183,7 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
|
||||
#define amdgpu_dpm_force_performance_level(adev, l) (adev)->pm.funcs->force_performance_level((adev), (l))
|
||||
#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
|
||||
#define amdgpu_dpm_powergate_uvd(adev, g) (adev)->pm.funcs->powergate_uvd((adev), (g))
|
||||
#define amdgpu_dpm_powergate_vce(adev, g) (adev)->pm.funcs->powergate_vce((adev), (g))
|
||||
#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
|
||||
#define amdgpu_dpm_set_fan_control_mode(adev, m) (adev)->pm.funcs->set_fan_control_mode((adev), (m))
|
||||
#define amdgpu_dpm_get_fan_control_mode(adev) (adev)->pm.funcs->get_fan_control_mode((adev))
|
||||
|
@ -564,21 +564,33 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
||||
return r;
|
||||
|
||||
if (ring->funcs->parse_cs) {
|
||||
struct amdgpu_bo_va_mapping *m;
|
||||
struct amdgpu_bo *aobj = NULL;
|
||||
void *kptr;
|
||||
uint64_t offset;
|
||||
uint8_t *kptr;
|
||||
|
||||
amdgpu_cs_find_mapping(parser, chunk_ib->va_start, &aobj);
|
||||
m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
|
||||
&aobj);
|
||||
if (!aobj) {
|
||||
DRM_ERROR("IB va_start is invalid\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
|
||||
(m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
|
||||
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* the IB should be reserved at this point */
|
||||
r = amdgpu_bo_kmap(aobj, &kptr);
|
||||
r = amdgpu_bo_kmap(aobj, (void **)&kptr);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
|
||||
kptr += chunk_ib->va_start - offset;
|
||||
|
||||
r = amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get ib !\n");
|
||||
|
@ -1388,9 +1388,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
|
||||
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
|
||||
|
||||
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
|
||||
amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
|
||||
pdev->subsystem_vendor, pdev->subsystem_device);
|
||||
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
|
||||
amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
|
||||
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
|
||||
|
||||
/* mutex initialization are all done here so we
|
||||
* can recall function without having locking issues */
|
||||
|
@ -128,7 +128,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
|
||||
fence_init(&(*fence)->base, &amdgpu_fence_ops,
|
||||
&adev->fence_queue.lock, adev->fence_context + ring->idx,
|
||||
(*fence)->seq);
|
||||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, (*fence)->seq, false);
|
||||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
||||
(*fence)->seq,
|
||||
AMDGPU_FENCE_FLAG_INT);
|
||||
trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
|
||||
return 0;
|
||||
}
|
||||
@ -522,6 +524,10 @@ long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, u64 *target_seq,
|
||||
bool signaled;
|
||||
int i, r;
|
||||
|
||||
if (timeout == 0) {
|
||||
return amdgpu_fence_any_seq_signaled(adev, target_seq);
|
||||
}
|
||||
|
||||
while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) {
|
||||
|
||||
/* Save current sequence values, used to check for GPU lockups */
|
||||
|
@ -37,6 +37,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
|
||||
if (robj) {
|
||||
if (robj->gem_base.import_attach)
|
||||
drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
|
||||
amdgpu_mn_unregister(robj);
|
||||
amdgpu_bo_unref(&robj);
|
||||
}
|
||||
}
|
||||
@ -504,7 +505,7 @@ error_free:
|
||||
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
union drm_amdgpu_gem_va *args = data;
|
||||
struct drm_amdgpu_gem_va *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
@ -513,95 +514,73 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
uint32_t invalid_flags, va_flags = 0;
|
||||
int r = 0;
|
||||
|
||||
if (!adev->vm_manager.enabled) {
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
if (!adev->vm_manager.enabled)
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
if (args->in.va_address < AMDGPU_VA_RESERVED_SIZE) {
|
||||
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
|
||||
dev_err(&dev->pdev->dev,
|
||||
"va_address 0x%lX is in reserved area 0x%X\n",
|
||||
(unsigned long)args->in.va_address,
|
||||
(unsigned long)args->va_address,
|
||||
AMDGPU_VA_RESERVED_SIZE);
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
invalid_flags = ~(AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
|
||||
AMDGPU_VM_PAGE_EXECUTABLE);
|
||||
if ((args->in.flags & invalid_flags)) {
|
||||
if ((args->flags & invalid_flags)) {
|
||||
dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
|
||||
args->in.flags, invalid_flags);
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
args->flags, invalid_flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (args->in.operation) {
|
||||
switch (args->operation) {
|
||||
case AMDGPU_VA_OP_MAP:
|
||||
case AMDGPU_VA_OP_UNMAP:
|
||||
break;
|
||||
default:
|
||||
dev_err(&dev->pdev->dev, "unsupported operation %d\n",
|
||||
args->in.operation);
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
args->operation);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gobj = drm_gem_object_lookup(dev, filp, args->in.handle);
|
||||
if (gobj == NULL) {
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||
if (gobj == NULL)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
rbo = gem_to_amdgpu_bo(gobj);
|
||||
r = amdgpu_bo_reserve(rbo, false);
|
||||
if (r) {
|
||||
if (r != -ERESTARTSYS) {
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
|
||||
if (!bo_va) {
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
switch (args->in.operation) {
|
||||
switch (args->operation) {
|
||||
case AMDGPU_VA_OP_MAP:
|
||||
if (args->in.flags & AMDGPU_VM_PAGE_READABLE)
|
||||
if (args->flags & AMDGPU_VM_PAGE_READABLE)
|
||||
va_flags |= AMDGPU_PTE_READABLE;
|
||||
if (args->in.flags & AMDGPU_VM_PAGE_WRITEABLE)
|
||||
if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
|
||||
va_flags |= AMDGPU_PTE_WRITEABLE;
|
||||
if (args->in.flags & AMDGPU_VM_PAGE_EXECUTABLE)
|
||||
if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
|
||||
va_flags |= AMDGPU_PTE_EXECUTABLE;
|
||||
r = amdgpu_vm_bo_map(adev, bo_va, args->in.va_address,
|
||||
args->in.offset_in_bo, args->in.map_size,
|
||||
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
|
||||
args->offset_in_bo, args->map_size,
|
||||
va_flags);
|
||||
break;
|
||||
case AMDGPU_VA_OP_UNMAP:
|
||||
r = amdgpu_vm_bo_unmap(adev, bo_va, args->in.va_address);
|
||||
r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!r) {
|
||||
if (!r)
|
||||
amdgpu_gem_va_update_vm(adev, bo_va);
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_OK;
|
||||
} else {
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
}
|
||||
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
|
@ -216,7 +216,8 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
if (ib->user) {
|
||||
uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
|
||||
addr += ib->user->offset;
|
||||
amdgpu_ring_emit_fence(ring, addr, ib->fence->seq, true);
|
||||
amdgpu_ring_emit_fence(ring, addr, ib->fence->seq,
|
||||
AMDGPU_FENCE_FLAG_64BIT);
|
||||
}
|
||||
|
||||
if (ib->vm)
|
||||
|
@ -95,7 +95,6 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
||||
bo = container_of(tbo, struct amdgpu_bo, tbo);
|
||||
|
||||
amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
|
||||
amdgpu_mn_unregister(bo);
|
||||
|
||||
mutex_lock(&bo->adev->gem.mutex);
|
||||
list_del_init(&bo->list);
|
||||
|
@ -656,19 +656,27 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
|
||||
|
||||
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
if (enable) {
|
||||
if (adev->pm.funcs->powergate_vce) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm.vce_active = true;
|
||||
/* XXX select vce level based on ring/task */
|
||||
adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
|
||||
/* enable/disable VCE */
|
||||
amdgpu_dpm_powergate_vce(adev, !enable);
|
||||
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
} else {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm.vce_active = false;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
if (enable) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm.vce_active = true;
|
||||
/* XXX select vce level based on ring/task */
|
||||
adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
} else {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm.vce_active = false;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
|
||||
|
@ -637,9 +637,9 @@ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
|
||||
*
|
||||
*/
|
||||
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
bool write64bits)
|
||||
unsigned flags)
|
||||
{
|
||||
WARN_ON(write64bits);
|
||||
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
|
||||
|
||||
amdgpu_ring_write(ring, VCE_CMD_FENCE);
|
||||
amdgpu_ring_write(ring, addr);
|
||||
|
@ -40,7 +40,7 @@ bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
|
||||
bool emit_wait);
|
||||
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
|
||||
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
bool write64bit);
|
||||
unsigned flags);
|
||||
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
|
||||
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring);
|
||||
|
||||
|
@ -1002,6 +1002,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||
list_add(&mapping->list, &bo_va->mappings);
|
||||
interval_tree_insert(&mapping->it, &vm->va);
|
||||
|
||||
bo_va->addr = 0;
|
||||
|
||||
/* Make sure the page tables are allocated */
|
||||
saddr >>= amdgpu_vm_block_size;
|
||||
eaddr >>= amdgpu_vm_block_size;
|
||||
@ -1082,6 +1084,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
struct amdgpu_vm *vm = bo_va->vm;
|
||||
|
||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
|
||||
list_for_each_entry(mapping, &bo_va->mappings, list) {
|
||||
if (mapping->it.start == saddr)
|
||||
break;
|
||||
|
@ -259,8 +259,9 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
* an interrupt if needed (CIK).
|
||||
*/
|
||||
static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
bool write64bit)
|
||||
unsigned flags)
|
||||
{
|
||||
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
|
||||
/* write the fence */
|
||||
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
|
||||
amdgpu_ring_write(ring, lower_32_bits(addr));
|
||||
@ -410,7 +411,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
|
||||
rb_bufsz = order_base_2(ring->ring_size / 4);
|
||||
rb_cntl = rb_bufsz << 1;
|
||||
#ifdef __BIG_ENDIAN
|
||||
rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
|
||||
rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK |
|
||||
SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
|
||||
#endif
|
||||
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
|
||||
|
||||
|
@ -43,6 +43,7 @@
|
||||
#include "gfx_v8_0.h"
|
||||
|
||||
static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
|
||||
static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
|
||||
|
||||
static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
|
||||
{
|
||||
@ -556,8 +557,11 @@ static int cz_dpm_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* powerdown unused blocks for now */
|
||||
cz_dpm_powergate_uvd(adev, true);
|
||||
if (amdgpu_dpm) {
|
||||
/* powerdown unused blocks for now */
|
||||
cz_dpm_powergate_uvd(adev, true);
|
||||
cz_dpm_powergate_vce(adev, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -826,16 +830,16 @@ static void cz_init_vce_limit(struct amdgpu_device *adev)
|
||||
return;
|
||||
}
|
||||
|
||||
pi->vce_dpm.soft_min_clk = 0;
|
||||
pi->vce_dpm.hard_min_clk = 0;
|
||||
pi->vce_dpm.soft_min_clk = table->entries[0].ecclk;
|
||||
pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;
|
||||
cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
|
||||
level = cz_get_argument(adev);
|
||||
if (level < table->count)
|
||||
clock = table->entries[level].evclk;
|
||||
clock = table->entries[level].ecclk;
|
||||
else {
|
||||
/* future BIOS would fix this error */
|
||||
DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
|
||||
clock = table->entries[table->count - 1].evclk;
|
||||
clock = table->entries[table->count - 1].ecclk;
|
||||
}
|
||||
|
||||
pi->vce_dpm.soft_max_clk = clock;
|
||||
@ -1004,6 +1008,36 @@ static uint32_t cz_get_sclk_level(struct amdgpu_device *adev,
|
||||
return i;
|
||||
}
|
||||
|
||||
static uint32_t cz_get_eclk_level(struct amdgpu_device *adev,
|
||||
uint32_t clock, uint16_t msg)
|
||||
{
|
||||
int i = 0;
|
||||
struct amdgpu_vce_clock_voltage_dependency_table *table =
|
||||
&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
|
||||
|
||||
if (table->count == 0)
|
||||
return 0;
|
||||
|
||||
switch (msg) {
|
||||
case PPSMC_MSG_SetEclkSoftMin:
|
||||
case PPSMC_MSG_SetEclkHardMin:
|
||||
for (i = 0; i < table->count-1; i++)
|
||||
if (clock <= table->entries[i].ecclk)
|
||||
break;
|
||||
break;
|
||||
case PPSMC_MSG_SetEclkSoftMax:
|
||||
case PPSMC_MSG_SetEclkHardMax:
|
||||
for (i = table->count - 1; i > 0; i--)
|
||||
if (clock >= table->entries[i].ecclk)
|
||||
break;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static int cz_program_bootup_state(struct amdgpu_device *adev)
|
||||
{
|
||||
struct cz_power_info *pi = cz_get_pi(adev);
|
||||
@ -1230,7 +1264,10 @@ static int cz_dpm_hw_init(void *handle)
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
/* init smc in dpm hw init */
|
||||
/* smu init only needs to be called at startup, not resume.
|
||||
* It should be in sw_init, but requires the fw info gathered
|
||||
* in sw_init from other IP modules.
|
||||
*/
|
||||
ret = cz_smu_init(adev);
|
||||
if (ret) {
|
||||
DRM_ERROR("amdgpu: smc initialization failed\n");
|
||||
@ -1282,6 +1319,7 @@ static int cz_dpm_disable(struct amdgpu_device *adev)
|
||||
|
||||
/* powerup blocks */
|
||||
cz_dpm_powergate_uvd(adev, false);
|
||||
cz_dpm_powergate_vce(adev, false);
|
||||
|
||||
cz_clear_voting_clients(adev);
|
||||
cz_stop_dpm(adev);
|
||||
@ -1297,6 +1335,10 @@ static int cz_dpm_hw_fini(void *handle)
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
/* smu fini only needs to be called at teardown, not suspend.
|
||||
* It should be in sw_fini, but we put it here for symmetry
|
||||
* with smu init.
|
||||
*/
|
||||
cz_smu_fini(adev);
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
@ -1340,12 +1382,6 @@ static int cz_dpm_resume(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
ret = cz_smu_init(adev);
|
||||
if (ret) {
|
||||
DRM_ERROR("amdgpu: smc resume failed\n");
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* do the actual fw loading */
|
||||
ret = cz_smu_start(adev);
|
||||
@ -1774,6 +1810,96 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
|
||||
}
|
||||
}
|
||||
|
||||
static int cz_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
struct cz_power_info *pi = cz_get_pi(adev);
|
||||
int ret = 0;
|
||||
|
||||
if (enable && pi->caps_vce_dpm) {
|
||||
pi->dpm_flags |= DPMFlags_VCE_Enabled;
|
||||
DRM_DEBUG("VCE DPM Enabled.\n");
|
||||
|
||||
ret = cz_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_EnableAllSmuFeatures, VCE_DPM_MASK);
|
||||
|
||||
} else {
|
||||
pi->dpm_flags &= ~DPMFlags_VCE_Enabled;
|
||||
DRM_DEBUG("VCE DPM Stopped\n");
|
||||
|
||||
ret = cz_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_DisableAllSmuFeatures, VCE_DPM_MASK);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cz_update_vce_dpm(struct amdgpu_device *adev)
|
||||
{
|
||||
struct cz_power_info *pi = cz_get_pi(adev);
|
||||
struct amdgpu_vce_clock_voltage_dependency_table *table =
|
||||
&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
|
||||
|
||||
/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
|
||||
if (pi->caps_stable_power_state) {
|
||||
pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk;
|
||||
|
||||
} else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */
|
||||
pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;
|
||||
}
|
||||
|
||||
cz_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_SetEclkHardMin,
|
||||
cz_get_eclk_level(adev,
|
||||
pi->vce_dpm.hard_min_clk,
|
||||
PPSMC_MSG_SetEclkHardMin));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
|
||||
{
|
||||
struct cz_power_info *pi = cz_get_pi(adev);
|
||||
|
||||
if (pi->caps_vce_pg) {
|
||||
if (pi->vce_power_gated != gate) {
|
||||
if (gate) {
|
||||
/* disable clockgating so we can properly shut down the block */
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
/* shutdown the VCE block */
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
|
||||
cz_enable_vce_dpm(adev, false);
|
||||
/* TODO: to figure out why vce can't be poweroff. */
|
||||
/* cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); */
|
||||
pi->vce_power_gated = true;
|
||||
} else {
|
||||
cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON);
|
||||
pi->vce_power_gated = false;
|
||||
|
||||
/* re-init the VCE block */
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
/* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
|
||||
cz_update_vce_dpm(adev);
|
||||
cz_enable_vce_dpm(adev, true);
|
||||
}
|
||||
} else {
|
||||
if (! pi->vce_power_gated) {
|
||||
cz_update_vce_dpm(adev);
|
||||
}
|
||||
}
|
||||
} else { /*pi->caps_vce_pg*/
|
||||
cz_update_vce_dpm(adev);
|
||||
cz_enable_vce_dpm(adev, true);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs cz_dpm_ip_funcs = {
|
||||
.early_init = cz_dpm_early_init,
|
||||
.late_init = cz_dpm_late_init,
|
||||
@ -1805,6 +1931,7 @@ static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
|
||||
.force_performance_level = cz_dpm_force_dpm_level,
|
||||
.vblank_too_short = NULL,
|
||||
.powergate_uvd = cz_dpm_powergate_uvd,
|
||||
.powergate_vce = cz_dpm_powergate_vce,
|
||||
};
|
||||
|
||||
static void cz_dpm_set_funcs(struct amdgpu_device *adev)
|
||||
|
@ -120,10 +120,19 @@ static const u32 golden_settings_tonga_a11[] =
|
||||
mmHDMI_CONTROL, 0x31000111, 0x00000011,
|
||||
};
|
||||
|
||||
static const u32 tonga_mgcg_cgcg_init[] =
|
||||
{
|
||||
mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
|
||||
mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
|
||||
};
|
||||
|
||||
static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TONGA:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
tonga_mgcg_cgcg_init,
|
||||
(const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
|
||||
amdgpu_program_register_sequence(adev,
|
||||
golden_settings_tonga_a11,
|
||||
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
|
||||
@ -3008,16 +3017,8 @@ static int dce_v10_0_hw_fini(void *handle)
|
||||
|
||||
static int dce_v10_0_suspend(void *handle)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
drm_kms_helper_poll_disable(adev->ddev);
|
||||
|
||||
/* turn off display hw */
|
||||
list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) {
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
|
||||
dce_v10_0_hpd_fini(adev);
|
||||
@ -3027,7 +3028,6 @@ static int dce_v10_0_suspend(void *handle)
|
||||
|
||||
static int dce_v10_0_resume(void *handle)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
dce_v10_0_init_golden_registers(adev);
|
||||
@ -3048,15 +3048,6 @@ static int dce_v10_0_resume(void *handle)
|
||||
/* initialize hpd */
|
||||
dce_v10_0_hpd_init(adev);
|
||||
|
||||
/* blat the mode back in */
|
||||
drm_helper_resume_force_mode(adev->ddev);
|
||||
/* turn on display hw */
|
||||
list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) {
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
|
||||
drm_kms_helper_poll_enable(adev->ddev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -120,10 +120,19 @@ static const u32 cz_golden_settings_a11[] =
|
||||
mmFBC_MISC, 0x1f311fff, 0x14300000,
|
||||
};
|
||||
|
||||
static const u32 cz_mgcg_cgcg_init[] =
|
||||
{
|
||||
mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
|
||||
mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
|
||||
};
|
||||
|
||||
static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_CARRIZO:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
cz_mgcg_cgcg_init,
|
||||
(const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
|
||||
amdgpu_program_register_sequence(adev,
|
||||
cz_golden_settings_a11,
|
||||
(const u32)ARRAY_SIZE(cz_golden_settings_a11));
|
||||
@ -3006,16 +3015,8 @@ static int dce_v11_0_hw_fini(void *handle)
|
||||
|
||||
static int dce_v11_0_suspend(void *handle)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
drm_kms_helper_poll_disable(adev->ddev);
|
||||
|
||||
/* turn off display hw */
|
||||
list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) {
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
|
||||
dce_v11_0_hpd_fini(adev);
|
||||
@ -3025,7 +3026,6 @@ static int dce_v11_0_suspend(void *handle)
|
||||
|
||||
static int dce_v11_0_resume(void *handle)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
dce_v11_0_init_golden_registers(adev);
|
||||
@ -3047,15 +3047,6 @@ static int dce_v11_0_resume(void *handle)
|
||||
/* initialize hpd */
|
||||
dce_v11_0_hpd_init(adev);
|
||||
|
||||
/* blat the mode back in */
|
||||
drm_helper_resume_force_mode(adev->ddev);
|
||||
/* turn on display hw */
|
||||
list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) {
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
|
||||
drm_kms_helper_poll_enable(adev->ddev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2948,16 +2948,8 @@ static int dce_v8_0_hw_fini(void *handle)
|
||||
|
||||
static int dce_v8_0_suspend(void *handle)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
drm_kms_helper_poll_disable(adev->ddev);
|
||||
|
||||
/* turn off display hw */
|
||||
list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) {
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
|
||||
dce_v8_0_hpd_fini(adev);
|
||||
@ -2967,7 +2959,6 @@ static int dce_v8_0_suspend(void *handle)
|
||||
|
||||
static int dce_v8_0_resume(void *handle)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
@ -2986,15 +2977,6 @@ static int dce_v8_0_resume(void *handle)
|
||||
/* initialize hpd */
|
||||
dce_v8_0_hpd_init(adev);
|
||||
|
||||
/* blat the mode back in */
|
||||
drm_helper_resume_force_mode(adev->ddev);
|
||||
/* turn on display hw */
|
||||
list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) {
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
|
||||
drm_kms_helper_poll_enable(adev->ddev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2414,8 +2414,10 @@ static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
* GPU caches.
|
||||
*/
|
||||
static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
|
||||
u64 seq, bool write64bit)
|
||||
u64 seq, unsigned flags)
|
||||
{
|
||||
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
|
||||
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
|
||||
/* Workaround for cache flush problems. First send a dummy EOP
|
||||
* event down the pipe with seq one below.
|
||||
*/
|
||||
@ -2438,7 +2440,7 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
|
||||
EVENT_INDEX(5)));
|
||||
amdgpu_ring_write(ring, addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
|
||||
DATA_SEL(write64bit ? 2 : 1) | INT_SEL(2));
|
||||
DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
|
||||
amdgpu_ring_write(ring, lower_32_bits(seq));
|
||||
amdgpu_ring_write(ring, upper_32_bits(seq));
|
||||
}
|
||||
@ -2454,15 +2456,18 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
|
||||
*/
|
||||
static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
|
||||
u64 addr, u64 seq,
|
||||
bool write64bits)
|
||||
unsigned flags)
|
||||
{
|
||||
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
|
||||
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
|
||||
|
||||
/* RELEASE_MEM - flush caches, send int */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
|
||||
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
|
||||
EOP_TC_ACTION_EN |
|
||||
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
|
||||
EVENT_INDEX(5)));
|
||||
amdgpu_ring_write(ring, DATA_SEL(write64bits ? 2 : 1) | INT_SEL(2));
|
||||
amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
|
||||
amdgpu_ring_write(ring, addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr));
|
||||
amdgpu_ring_write(ring, lower_32_bits(seq));
|
||||
@ -2876,7 +2881,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
|
||||
rb_bufsz = order_base_2(ring->ring_size / 8);
|
||||
tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
|
||||
#ifdef __BIG_ENDIAN
|
||||
tmp |= BUF_SWAP_32BIT;
|
||||
tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT;
|
||||
#endif
|
||||
WREG32(mmCP_RB0_CNTL, tmp);
|
||||
|
||||
@ -3395,7 +3400,8 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
|
||||
mqd->queue_state.cp_hqd_pq_control |=
|
||||
(order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
|
||||
#ifdef __BIG_ENDIAN
|
||||
mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
|
||||
mqd->queue_state.cp_hqd_pq_control |=
|
||||
2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT;
|
||||
#endif
|
||||
mqd->queue_state.cp_hqd_pq_control &=
|
||||
~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
|
||||
|
@ -116,11 +116,14 @@ static const u32 golden_settings_tonga_a11[] =
|
||||
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
|
||||
mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
|
||||
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
|
||||
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
|
||||
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
|
||||
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
|
||||
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
|
||||
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
|
||||
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
|
||||
mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
|
||||
mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
|
||||
};
|
||||
|
||||
static const u32 tonga_golden_common_all[] =
|
||||
@ -224,8 +227,10 @@ static const u32 golden_settings_iceland_a11[] =
|
||||
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
|
||||
mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
|
||||
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
|
||||
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
|
||||
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
|
||||
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
|
||||
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
|
||||
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
|
||||
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
|
||||
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010,
|
||||
@ -318,7 +323,9 @@ static const u32 cz_golden_settings_a11[] =
|
||||
mmGB_GPU_ID, 0x0000000f, 0x00000000,
|
||||
mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
|
||||
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
|
||||
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
|
||||
mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
|
||||
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
|
||||
mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
|
||||
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
|
||||
};
|
||||
@ -1933,9 +1940,43 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
|
||||
case CHIP_CARRIZO:
|
||||
adev->gfx.config.max_shader_engines = 1;
|
||||
adev->gfx.config.max_tile_pipes = 2;
|
||||
adev->gfx.config.max_cu_per_sh = 8;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
|
||||
switch (adev->pdev->revision) {
|
||||
case 0xc4:
|
||||
case 0x84:
|
||||
case 0xc8:
|
||||
case 0xcc:
|
||||
/* B10 */
|
||||
adev->gfx.config.max_cu_per_sh = 8;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
break;
|
||||
case 0xc5:
|
||||
case 0x81:
|
||||
case 0x85:
|
||||
case 0xc9:
|
||||
case 0xcd:
|
||||
/* B8 */
|
||||
adev->gfx.config.max_cu_per_sh = 6;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
break;
|
||||
case 0xc6:
|
||||
case 0xca:
|
||||
case 0xce:
|
||||
/* B6 */
|
||||
adev->gfx.config.max_cu_per_sh = 6;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
break;
|
||||
case 0xc7:
|
||||
case 0x87:
|
||||
case 0xcb:
|
||||
default:
|
||||
/* B4 */
|
||||
adev->gfx.config.max_cu_per_sh = 4;
|
||||
adev->gfx.config.max_backends_per_se = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
adev->gfx.config.max_texture_channel_caches = 2;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
adev->gfx.config.max_gs_threads = 32;
|
||||
@ -3713,8 +3754,11 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
|
||||
u64 seq, bool write64bit)
|
||||
u64 seq, unsigned flags)
|
||||
{
|
||||
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
|
||||
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
|
||||
|
||||
/* EVENT_WRITE_EOP - flush caches, send int */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
|
||||
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
|
||||
@ -3723,7 +3767,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
|
||||
EVENT_INDEX(5)));
|
||||
amdgpu_ring_write(ring, addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
|
||||
DATA_SEL(write64bit ? 2 : 1) | INT_SEL(2));
|
||||
DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
|
||||
amdgpu_ring_write(ring, lower_32_bits(seq));
|
||||
amdgpu_ring_write(ring, upper_32_bits(seq));
|
||||
}
|
||||
@ -3746,11 +3790,10 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring,
|
||||
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
|
||||
|
||||
if (ring->adev->asic_type == CHIP_TOPAZ ||
|
||||
ring->adev->asic_type == CHIP_TONGA) {
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
|
||||
amdgpu_ring_write(ring, lower_32_bits(addr));
|
||||
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
|
||||
} else {
|
||||
ring->adev->asic_type == CHIP_TONGA)
|
||||
/* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */
|
||||
return false;
|
||||
else {
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 2));
|
||||
amdgpu_ring_write(ring, lower_32_bits(addr));
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr));
|
||||
@ -3880,15 +3923,18 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
|
||||
|
||||
static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
|
||||
u64 addr, u64 seq,
|
||||
bool write64bits)
|
||||
unsigned flags)
|
||||
{
|
||||
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
|
||||
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
|
||||
|
||||
/* RELEASE_MEM - flush caches, send int */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
|
||||
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
|
||||
EOP_TC_ACTION_EN |
|
||||
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
|
||||
EVENT_INDEX(5)));
|
||||
amdgpu_ring_write(ring, DATA_SEL(write64bits ? 2 : 1) | INT_SEL(2));
|
||||
amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
|
||||
amdgpu_ring_write(ring, addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr));
|
||||
amdgpu_ring_write(ring, lower_32_bits(seq));
|
||||
|
@ -71,7 +71,7 @@ static const u32 golden_settings_iceland_a11[] =
|
||||
|
||||
static const u32 iceland_mgcg_cgcg_init[] =
|
||||
{
|
||||
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
|
||||
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
|
||||
};
|
||||
|
||||
static const u32 cz_mgcg_cgcg_init[] =
|
||||
|
@ -82,6 +82,10 @@ static int iceland_dpm_hw_init(void *handle)
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
/* smu init only needs to be called at startup, not resume.
|
||||
* It should be in sw_init, but requires the fw info gathered
|
||||
* in sw_init from other IP modules.
|
||||
*/
|
||||
ret = iceland_smu_init(adev);
|
||||
if (ret) {
|
||||
DRM_ERROR("SMU initialization failed\n");
|
||||
@ -108,6 +112,10 @@ static int iceland_dpm_hw_fini(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
/* smu fini only needs to be called at teardown, not suspend.
|
||||
* It should be in sw_fini, but we put it here for symmetry
|
||||
* with smu init.
|
||||
*/
|
||||
iceland_smu_fini(adev);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
return 0;
|
||||
@ -115,20 +123,25 @@ static int iceland_dpm_hw_fini(void *handle)
|
||||
|
||||
static int iceland_dpm_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
iceland_dpm_hw_fini(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iceland_dpm_resume(void *handle)
|
||||
{
|
||||
int ret;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
iceland_dpm_hw_init(adev);
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
return 0;
|
||||
ret = iceland_smu_start(adev);
|
||||
if (ret) {
|
||||
DRM_ERROR("SMU start failed\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iceland_dpm_set_clockgating_state(void *handle,
|
||||
|
@ -292,8 +292,9 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
* an interrupt if needed (VI).
|
||||
*/
|
||||
static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
bool write64bits)
|
||||
unsigned flags)
|
||||
{
|
||||
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
|
||||
/* write the fence */
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
|
||||
amdgpu_ring_write(ring, lower_32_bits(addr));
|
||||
@ -301,7 +302,7 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
|
||||
amdgpu_ring_write(ring, lower_32_bits(seq));
|
||||
|
||||
/* optionally write high bits as well */
|
||||
if (write64bits) {
|
||||
if (write64bit) {
|
||||
addr += 4;
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
|
||||
amdgpu_ring_write(ring, lower_32_bits(addr));
|
||||
|
@ -347,8 +347,9 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
* an interrupt if needed (VI).
|
||||
*/
|
||||
static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
bool write64bits)
|
||||
unsigned flags)
|
||||
{
|
||||
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
|
||||
/* write the fence */
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
|
||||
amdgpu_ring_write(ring, lower_32_bits(addr));
|
||||
@ -356,7 +357,7 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
|
||||
amdgpu_ring_write(ring, lower_32_bits(seq));
|
||||
|
||||
/* optionally write high bits as well */
|
||||
if (write64bits) {
|
||||
if (write64bit) {
|
||||
addr += 4;
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
|
||||
amdgpu_ring_write(ring, lower_32_bits(addr));
|
||||
|
@ -81,6 +81,10 @@ static int tonga_dpm_hw_init(void *handle)
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
/* smu init only needs to be called at startup, not resume.
|
||||
* It should be in sw_init, but requires the fw info gathered
|
||||
* in sw_init from other IP modules.
|
||||
*/
|
||||
ret = tonga_smu_init(adev);
|
||||
if (ret) {
|
||||
DRM_ERROR("SMU initialization failed\n");
|
||||
@ -107,6 +111,10 @@ static int tonga_dpm_hw_fini(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
/* smu fini only needs to be called at teardown, not suspend.
|
||||
* It should be in sw_fini, but we put it here for symmetry
|
||||
* with smu init.
|
||||
*/
|
||||
tonga_smu_fini(adev);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
return 0;
|
||||
@ -114,20 +122,25 @@ static int tonga_dpm_hw_fini(void *handle)
|
||||
|
||||
static int tonga_dpm_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
tonga_dpm_hw_fini(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tonga_dpm_resume(void *handle)
|
||||
{
|
||||
int ret;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
tonga_dpm_hw_init(adev);
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
return 0;
|
||||
ret = tonga_smu_start(adev);
|
||||
if (ret) {
|
||||
DRM_ERROR("SMU start failed\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tonga_dpm_set_clockgating_state(void *handle,
|
||||
|
@ -417,9 +417,9 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev)
|
||||
* Write a fence and a trap command to the ring.
|
||||
*/
|
||||
static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
bool write64bit)
|
||||
unsigned flags)
|
||||
{
|
||||
WARN_ON(write64bit);
|
||||
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
|
||||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
|
||||
amdgpu_ring_write(ring, seq);
|
||||
|
@ -461,9 +461,9 @@ static void uvd_v5_0_stop(struct amdgpu_device *adev)
|
||||
* Write a fence and a trap command to the ring.
|
||||
*/
|
||||
static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
bool write64bit)
|
||||
unsigned flags)
|
||||
{
|
||||
WARN_ON(write64bit);
|
||||
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
|
||||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
|
||||
amdgpu_ring_write(ring, seq);
|
||||
|
@ -457,9 +457,9 @@ static void uvd_v6_0_stop(struct amdgpu_device *adev)
|
||||
* Write a fence and a trap command to the ring.
|
||||
*/
|
||||
static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
bool write64bit)
|
||||
unsigned flags)
|
||||
{
|
||||
WARN_ON(write64bit);
|
||||
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
|
||||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
|
||||
amdgpu_ring_write(ring, seq);
|
||||
|
@ -173,8 +173,6 @@ static const u32 tonga_mgcg_cgcg_init[] =
|
||||
mmPCIE_DATA, 0x000f0000, 0x00000000,
|
||||
mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
|
||||
mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
|
||||
mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
|
||||
mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
|
||||
mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
|
||||
mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
|
||||
};
|
||||
@ -193,8 +191,6 @@ static const u32 cz_mgcg_cgcg_init[] =
|
||||
mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
|
||||
mmPCIE_INDEX, 0xffffffff, 0x0140001c,
|
||||
mmPCIE_DATA, 0x000f0000, 0x00000000,
|
||||
mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
|
||||
mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
|
||||
mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
|
||||
mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
|
||||
};
|
||||
@ -1267,7 +1263,7 @@ static int vi_common_early_init(void *handle)
|
||||
case CHIP_CARRIZO:
|
||||
adev->has_uvd = true;
|
||||
adev->cg_flags = 0;
|
||||
adev->pg_flags = AMDGPU_PG_SUPPORT_UVD;
|
||||
adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE;
|
||||
adev->external_rev_id = adev->rev_id + 0x1;
|
||||
if (amdgpu_smc_load_fw && smc_enabled)
|
||||
adev->firmware.smu_load = true;
|
||||
|
@ -55,7 +55,7 @@
|
||||
#define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info)
|
||||
#define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata)
|
||||
#define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle)
|
||||
#define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, union drm_amdgpu_gem_va)
|
||||
#define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, struct drm_amdgpu_gem_va)
|
||||
#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
|
||||
#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
|
||||
#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
|
||||
@ -139,16 +139,19 @@ union drm_amdgpu_bo_list {
|
||||
#define AMDGPU_CTX_OP_FREE_CTX 2
|
||||
#define AMDGPU_CTX_OP_QUERY_STATE 3
|
||||
|
||||
#define AMDGPU_CTX_OP_STATE_RUNNING 1
|
||||
|
||||
/* GPU reset status */
|
||||
#define AMDGPU_CTX_NO_RESET 0
|
||||
#define AMDGPU_CTX_GUILTY_RESET 1 /* this the context caused it */
|
||||
#define AMDGPU_CTX_INNOCENT_RESET 2 /* some other context caused it */
|
||||
#define AMDGPU_CTX_UNKNOWN_RESET 3 /* unknown cause */
|
||||
/* this the context caused it */
|
||||
#define AMDGPU_CTX_GUILTY_RESET 1
|
||||
/* some other context caused it */
|
||||
#define AMDGPU_CTX_INNOCENT_RESET 2
|
||||
/* unknown cause */
|
||||
#define AMDGPU_CTX_UNKNOWN_RESET 3
|
||||
|
||||
struct drm_amdgpu_ctx_in {
|
||||
/** AMDGPU_CTX_OP_* */
|
||||
uint32_t op;
|
||||
/** For future use, no flags defined so far */
|
||||
uint32_t flags;
|
||||
uint32_t ctx_id;
|
||||
uint32_t _pad;
|
||||
@ -161,6 +164,7 @@ union drm_amdgpu_ctx_out {
|
||||
} alloc;
|
||||
|
||||
struct {
|
||||
/** For future use, no flags defined so far */
|
||||
uint64_t flags;
|
||||
/** Number of resets caused by this context so far. */
|
||||
uint32_t hangs;
|
||||
@ -187,7 +191,9 @@ union drm_amdgpu_ctx {
|
||||
struct drm_amdgpu_gem_userptr {
|
||||
uint64_t addr;
|
||||
uint64_t size;
|
||||
/* AMDGPU_GEM_USERPTR_* */
|
||||
uint32_t flags;
|
||||
/* Resulting GEM handle */
|
||||
uint32_t handle;
|
||||
};
|
||||
|
||||
@ -219,23 +225,29 @@ struct drm_amdgpu_gem_userptr {
|
||||
|
||||
/** The same structure is shared for input/output */
|
||||
struct drm_amdgpu_gem_metadata {
|
||||
uint32_t handle; /* GEM Object handle */
|
||||
uint32_t op; /** Do we want get or set metadata */
|
||||
/** GEM Object handle */
|
||||
uint32_t handle;
|
||||
/** Do we want get or set metadata */
|
||||
uint32_t op;
|
||||
struct {
|
||||
/** For future use, no flags defined so far */
|
||||
uint64_t flags;
|
||||
uint64_t tiling_info; /* family specific tiling info */
|
||||
/** family specific tiling info */
|
||||
uint64_t tiling_info;
|
||||
uint32_t data_size_bytes;
|
||||
uint32_t data[64];
|
||||
} data;
|
||||
};
|
||||
|
||||
struct drm_amdgpu_gem_mmap_in {
|
||||
uint32_t handle; /** the GEM object handle */
|
||||
/** the GEM object handle */
|
||||
uint32_t handle;
|
||||
uint32_t _pad;
|
||||
};
|
||||
|
||||
struct drm_amdgpu_gem_mmap_out {
|
||||
uint64_t addr_ptr; /** mmap offset from the vma offset manager */
|
||||
/** mmap offset from the vma offset manager */
|
||||
uint64_t addr_ptr;
|
||||
};
|
||||
|
||||
union drm_amdgpu_gem_mmap {
|
||||
@ -244,14 +256,19 @@ union drm_amdgpu_gem_mmap {
|
||||
};
|
||||
|
||||
struct drm_amdgpu_gem_wait_idle_in {
|
||||
uint32_t handle; /* GEM object handle */
|
||||
/** GEM object handle */
|
||||
uint32_t handle;
|
||||
/** For future use, no flags defined so far */
|
||||
uint32_t flags;
|
||||
uint64_t timeout; /* Timeout to wait. If 0 then returned immediately with the status */
|
||||
/** Absolute timeout to wait */
|
||||
uint64_t timeout;
|
||||
};
|
||||
|
||||
struct drm_amdgpu_gem_wait_idle_out {
|
||||
uint32_t status; /* BO status: 0 - BO is idle, 1 - BO is busy */
|
||||
uint32_t domain; /* Returned current memory domain */
|
||||
/** BO status: 0 - BO is idle, 1 - BO is busy */
|
||||
uint32_t status;
|
||||
/** Returned current memory domain */
|
||||
uint32_t domain;
|
||||
};
|
||||
|
||||
union drm_amdgpu_gem_wait_idle {
|
||||
@ -260,7 +277,9 @@ union drm_amdgpu_gem_wait_idle {
|
||||
};
|
||||
|
||||
struct drm_amdgpu_wait_cs_in {
|
||||
/** Command submission handle */
|
||||
uint64_t handle;
|
||||
/** Absolute timeout to wait */
|
||||
uint64_t timeout;
|
||||
uint32_t ip_type;
|
||||
uint32_t ip_instance;
|
||||
@ -269,6 +288,7 @@ struct drm_amdgpu_wait_cs_in {
|
||||
};
|
||||
|
||||
struct drm_amdgpu_wait_cs_out {
|
||||
/** CS status: 0 - CS completed, 1 - CS still busy */
|
||||
uint64_t status;
|
||||
};
|
||||
|
||||
@ -277,23 +297,22 @@ union drm_amdgpu_wait_cs {
|
||||
struct drm_amdgpu_wait_cs_out out;
|
||||
};
|
||||
|
||||
/* Sets or returns a value associated with a buffer. */
|
||||
struct drm_amdgpu_gem_op {
|
||||
uint32_t handle; /* buffer */
|
||||
uint32_t op; /* AMDGPU_GEM_OP_* */
|
||||
uint64_t value; /* input or return value */
|
||||
};
|
||||
|
||||
#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
|
||||
#define AMDGPU_GEM_OP_SET_PLACEMENT 1
|
||||
|
||||
/* Sets or returns a value associated with a buffer. */
|
||||
struct drm_amdgpu_gem_op {
|
||||
/** GEM object handle */
|
||||
uint32_t handle;
|
||||
/** AMDGPU_GEM_OP_* */
|
||||
uint32_t op;
|
||||
/** Input or return value */
|
||||
uint64_t value;
|
||||
};
|
||||
|
||||
#define AMDGPU_VA_OP_MAP 1
|
||||
#define AMDGPU_VA_OP_UNMAP 2
|
||||
|
||||
#define AMDGPU_VA_RESULT_OK 0
|
||||
#define AMDGPU_VA_RESULT_ERROR 1
|
||||
#define AMDGPU_VA_RESULT_VA_INVALID_ALIGNMENT 2
|
||||
|
||||
/* Mapping flags */
|
||||
/* readable mapping */
|
||||
#define AMDGPU_VM_PAGE_READABLE (1 << 1)
|
||||
@ -302,33 +321,22 @@ struct drm_amdgpu_gem_op {
|
||||
/* executable mapping, new for VI */
|
||||
#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3)
|
||||
|
||||
struct drm_amdgpu_gem_va_in {
|
||||
/* GEM object handle */
|
||||
struct drm_amdgpu_gem_va {
|
||||
/** GEM object handle */
|
||||
uint32_t handle;
|
||||
uint32_t _pad;
|
||||
/* map or unmap*/
|
||||
/** AMDGPU_VA_OP_* */
|
||||
uint32_t operation;
|
||||
/* specify mapping flags */
|
||||
/** AMDGPU_VM_PAGE_* */
|
||||
uint32_t flags;
|
||||
/* va address to assign . Must be correctly aligned.*/
|
||||
/** va address to assign . Must be correctly aligned.*/
|
||||
uint64_t va_address;
|
||||
/* Specify offset inside of BO to assign. Must be correctly aligned.*/
|
||||
/** Specify offset inside of BO to assign. Must be correctly aligned.*/
|
||||
uint64_t offset_in_bo;
|
||||
/* Specify mapping size. If 0 and offset is 0 then map the whole BO.*/
|
||||
/* Must be correctly aligned. */
|
||||
/** Specify mapping size. Must be correctly aligned. */
|
||||
uint64_t map_size;
|
||||
};
|
||||
|
||||
struct drm_amdgpu_gem_va_out {
|
||||
uint32_t result;
|
||||
uint32_t _pad;
|
||||
};
|
||||
|
||||
union drm_amdgpu_gem_va {
|
||||
struct drm_amdgpu_gem_va_in in;
|
||||
struct drm_amdgpu_gem_va_out out;
|
||||
};
|
||||
|
||||
#define AMDGPU_HW_IP_GFX 0
|
||||
#define AMDGPU_HW_IP_COMPUTE 1
|
||||
#define AMDGPU_HW_IP_DMA 2
|
||||
@ -340,6 +348,7 @@ union drm_amdgpu_gem_va {
|
||||
|
||||
#define AMDGPU_CHUNK_ID_IB 0x01
|
||||
#define AMDGPU_CHUNK_ID_FENCE 0x02
|
||||
|
||||
struct drm_amdgpu_cs_chunk {
|
||||
uint32_t chunk_id;
|
||||
uint32_t length_dw;
|
||||
@ -353,7 +362,7 @@ struct drm_amdgpu_cs_in {
|
||||
uint32_t bo_list_handle;
|
||||
uint32_t num_chunks;
|
||||
uint32_t _pad;
|
||||
/* this points to uint64_t * which point to cs chunks */
|
||||
/** this points to uint64_t * which point to cs chunks */
|
||||
uint64_t chunks;
|
||||
};
|
||||
|
||||
@ -362,8 +371,8 @@ struct drm_amdgpu_cs_out {
|
||||
};
|
||||
|
||||
union drm_amdgpu_cs {
|
||||
struct drm_amdgpu_cs_in in;
|
||||
struct drm_amdgpu_cs_out out;
|
||||
struct drm_amdgpu_cs_in in;
|
||||
struct drm_amdgpu_cs_out out;
|
||||
};
|
||||
|
||||
/* Specify flags to be used for IB */
|
||||
@ -371,20 +380,23 @@ union drm_amdgpu_cs {
|
||||
/* This IB should be submitted to CE */
|
||||
#define AMDGPU_IB_FLAG_CE (1<<0)
|
||||
|
||||
/* GDS is used by this IB */
|
||||
#define AMDGPU_IB_FLAG_GDS (1<<1)
|
||||
|
||||
/* CE Preamble */
|
||||
#define AMDGPU_IB_FLAG_PREAMBLE (1<<2)
|
||||
#define AMDGPU_IB_FLAG_PREAMBLE (1<<1)
|
||||
|
||||
struct drm_amdgpu_cs_chunk_ib {
|
||||
uint32_t _pad;
|
||||
uint32_t flags; /* IB Flags */
|
||||
uint64_t va_start; /* Virtual address to begin IB execution */
|
||||
uint32_t ib_bytes; /* Size of submission */
|
||||
uint32_t ip_type; /* HW IP to submit to */
|
||||
uint32_t ip_instance; /* HW IP index of the same type to submit to */
|
||||
uint32_t ring; /* Ring index to submit to */
|
||||
/** AMDGPU_IB_FLAG_* */
|
||||
uint32_t flags;
|
||||
/** Virtual address to begin IB execution */
|
||||
uint64_t va_start;
|
||||
/** Size of submission */
|
||||
uint32_t ib_bytes;
|
||||
/** HW IP to submit to */
|
||||
uint32_t ip_type;
|
||||
/** HW IP index of the same type to submit to */
|
||||
uint32_t ip_instance;
|
||||
/** Ring index to submit to */
|
||||
uint32_t ring;
|
||||
};
|
||||
|
||||
struct drm_amdgpu_cs_chunk_fence {
|
||||
@ -479,23 +491,28 @@ struct drm_amdgpu_info {
|
||||
/** AMDGPU_HW_IP_* */
|
||||
uint32_t type;
|
||||
/**
|
||||
* Index of the IP if there are more IPs of the same type.
|
||||
* Ignored by AMDGPU_INFO_HW_IP_COUNT.
|
||||
* Index of the IP if there are more IPs of the same
|
||||
* type. Ignored by AMDGPU_INFO_HW_IP_COUNT.
|
||||
*/
|
||||
uint32_t ip_instance;
|
||||
} query_hw_ip;
|
||||
|
||||
struct {
|
||||
uint32_t dword_offset;
|
||||
uint32_t count; /* number of registers to read */
|
||||
/** number of registers to read */
|
||||
uint32_t count;
|
||||
uint32_t instance;
|
||||
/** For future use, no flags defined so far */
|
||||
uint32_t flags;
|
||||
} read_mmr_reg;
|
||||
|
||||
struct {
|
||||
/** AMDGPU_INFO_FW_* */
|
||||
uint32_t fw_type;
|
||||
/** Index of the IP if there are more IPs of the same type. */
|
||||
/**
|
||||
* Index of the IP if there are more IPs of
|
||||
* the same type.
|
||||
*/
|
||||
uint32_t ip_instance;
|
||||
/**
|
||||
* Index of the engine. Whether this is used depends
|
||||
@ -556,9 +573,10 @@ struct drm_amdgpu_info_device {
|
||||
uint32_t family;
|
||||
uint32_t num_shader_engines;
|
||||
uint32_t num_shader_arrays_per_engine;
|
||||
uint32_t gpu_counter_freq; /* in KHz */
|
||||
uint64_t max_engine_clock; /* in KHz */
|
||||
uint64_t max_memory_clock; /* in KHz */
|
||||
/* in KHz */
|
||||
uint32_t gpu_counter_freq;
|
||||
uint64_t max_engine_clock;
|
||||
uint64_t max_memory_clock;
|
||||
/* cu information */
|
||||
uint32_t cu_active_number;
|
||||
uint32_t cu_ao_mask;
|
||||
@ -580,7 +598,7 @@ struct drm_amdgpu_info_device {
|
||||
uint32_t gart_page_size;
|
||||
/** constant engine ram size*/
|
||||
uint32_t ce_ram_size;
|
||||
/** video memory type infro*/
|
||||
/** video memory type info*/
|
||||
uint32_t vram_type;
|
||||
/** video memory bit width*/
|
||||
uint32_t vram_bit_width;
|
||||
|
Loading…
Reference in New Issue
Block a user