drm/amdgpu: add optional ring to *_hdp callbacks

This adds an optional ring to the invalidate_hdp and flush_hdp
callbacks. If the ring isn't specified or the emit_wreg function not
available the HDP operation will be done with the CPU otherwise by
writing on the ring.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2018-01-19 14:17:40 +01:00 committed by Alex Deucher
parent 3c9d1fde7f
commit 698825653f
10 changed files with 81 additions and 36 deletions

View File

@ -1221,9 +1221,10 @@ struct amdgpu_asic_funcs {
/* get config memsize register */
u32 (*get_config_memsize)(struct amdgpu_device *adev);
/* flush hdp write queue */
void (*flush_hdp)(struct amdgpu_device *adev);
void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
/* invalidate hdp read cache */
void (*invalidate_hdp)(struct amdgpu_device *adev);
void (*invalidate_hdp)(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
};
/*
@ -1367,7 +1368,7 @@ struct amdgpu_nbio_funcs {
u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
u32 (*get_rev_id)(struct amdgpu_device *adev);
void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
void (*hdp_flush)(struct amdgpu_device *adev);
void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
u32 (*get_memsize)(struct amdgpu_device *adev);
void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index);
@ -1774,8 +1775,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
#define amdgpu_asic_flush_hdp(adev) (adev)->asic_funcs->flush_hdp((adev))
#define amdgpu_asic_invalidate_hdp(adev) (adev)->asic_funcs->invalidate_hdp((adev))
#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, pasid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (pasid), (addr))
#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))

View File

@ -247,7 +247,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
}
}
mb();
amdgpu_asic_flush_hdp(adev);
amdgpu_asic_flush_hdp(adev, NULL);
amdgpu_gmc_flush_gpu_tlb(adev, 0);
return 0;
}
@ -330,7 +330,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
return r;
mb();
amdgpu_asic_flush_hdp(adev);
amdgpu_asic_flush_hdp(adev, NULL);
amdgpu_gmc_flush_gpu_tlb(adev, 0);
return 0;
}

View File

@ -854,7 +854,7 @@ restart:
if (vm->use_cpu_for_update) {
/* Flush HDP */
mb();
amdgpu_asic_flush_hdp(adev);
amdgpu_asic_flush_hdp(adev, NULL);
} else if (params.ib->length_dw == 0) {
amdgpu_job_free(job);
} else {
@ -1436,7 +1436,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
if (vm->use_cpu_for_update) {
/* Flush HDP */
mb();
amdgpu_asic_flush_hdp(adev);
amdgpu_asic_flush_hdp(adev, NULL);
}
spin_lock(&vm->status_lock);

View File

@ -1715,16 +1715,25 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
static void cik_flush_hdp(struct amdgpu_device *adev)
static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
}
}
static void cik_invalidate_hdp(struct amdgpu_device *adev)
static void cik_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
WREG32(mmHDP_DEBUG0, 1);
RREG32(mmHDP_DEBUG0);
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_DEBUG0, 1);
RREG32(mmHDP_DEBUG0);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
}
}
static const struct amdgpu_asic_funcs cik_asic_funcs =

View File

@ -1009,7 +1009,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
/* After HDP is initialized, flush HDP.*/
adev->nbio_funcs->hdp_flush(adev);
adev->nbio_funcs->hdp_flush(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;

View File

@ -53,9 +53,16 @@ static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
if (!ring || !ring->funcs->emit_wreg)
WREG32_SOC15_NO_KIQ(NBIO, 0,
mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL,
0);
else
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
}
static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)

View File

@ -53,9 +53,14 @@ static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev)
static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
if (!ring || !ring->funcs->emit_wreg)
WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
else
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0);
}
static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)

View File

@ -1230,16 +1230,25 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
static void si_flush_hdp(struct amdgpu_device *adev)
static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
}
}
static void si_invalidate_hdp(struct amdgpu_device *adev)
static void si_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
WREG32(mmHDP_DEBUG0, 1);
RREG32(mmHDP_DEBUG0);
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_DEBUG0, 1);
RREG32(mmHDP_DEBUG0);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
}
}
static const struct amdgpu_asic_funcs si_asic_funcs =

View File

@ -583,14 +583,19 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
return adev->nbio_funcs->get_rev_id(adev);
}
static void soc15_flush_hdp(struct amdgpu_device *adev)
static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
adev->nbio_funcs->hdp_flush(adev);
adev->nbio_funcs->hdp_flush(adev, ring);
}
static void soc15_invalidate_hdp(struct amdgpu_device *adev)
static void soc15_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
if (!ring || !ring->funcs->emit_wreg)
WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
else
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
}
static const struct amdgpu_asic_funcs soc15_asic_funcs =

View File

@ -856,16 +856,25 @@ static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
}
static void vi_flush_hdp(struct amdgpu_device *adev)
static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
}
}
static void vi_invalidate_hdp(struct amdgpu_device *adev)
static void vi_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
WREG32(mmHDP_DEBUG0, 1);
RREG32(mmHDP_DEBUG0);
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_DEBUG0, 1);
RREG32(mmHDP_DEBUG0);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
}
}
static const struct amdgpu_asic_funcs vi_asic_funcs =