mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-13 01:04:35 +08:00
drm/amdgpu: move struct amdgpu_mc into amdgpu_gmc.h
And rename it to amdgpu_gmc as well. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Samuel Li <Samuel.Li@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
da320625de
commit
770d13b19f
drivers/gpu/drm/amd
amdgpu
amdgpu.hamdgpu_amdkfd.camdgpu_cs.camdgpu_device.camdgpu_fb.camdgpu_gart.camdgpu_gmc.hamdgpu_gtt_mgr.camdgpu_kms.camdgpu_object.camdgpu_test.camdgpu_ttm.camdgpu_vm.camdgpu_vram_mgr.cci_dpm.ccik_sdma.cdce_v10_0.cdce_v11_0.cdce_v6_0.cdce_v8_0.cdce_virtual.cgfx_v7_0.cgfx_v8_0.cgfx_v9_0.cgfxhub_v1_0.cgmc_v6_0.cgmc_v7_0.cgmc_v8_0.cgmc_v9_0.cmmhub_v1_0.csdma_v2_4.csdma_v3_0.csdma_v4_0.csi_dma.csi_dpm.c
display/amdgpu_dm
@ -68,6 +68,7 @@
|
||||
#include "amdgpu_vce.h"
|
||||
#include "amdgpu_vcn.h"
|
||||
#include "amdgpu_mn.h"
|
||||
#include "amdgpu_gmc.h"
|
||||
#include "amdgpu_dm.h"
|
||||
#include "amdgpu_virt.h"
|
||||
#include "amdgpu_gart.h"
|
||||
@ -495,55 +496,6 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
|
||||
int amdgpu_fence_slab_init(void);
|
||||
void amdgpu_fence_slab_fini(void);
|
||||
|
||||
/*
|
||||
* VMHUB structures, functions & helpers
|
||||
*/
|
||||
struct amdgpu_vmhub {
|
||||
uint32_t ctx0_ptb_addr_lo32;
|
||||
uint32_t ctx0_ptb_addr_hi32;
|
||||
uint32_t vm_inv_eng0_req;
|
||||
uint32_t vm_inv_eng0_ack;
|
||||
uint32_t vm_context0_cntl;
|
||||
uint32_t vm_l2_pro_fault_status;
|
||||
uint32_t vm_l2_pro_fault_cntl;
|
||||
};
|
||||
|
||||
/*
|
||||
* GPU MC structures, functions & helpers
|
||||
*/
|
||||
struct amdgpu_mc {
|
||||
resource_size_t aper_size;
|
||||
resource_size_t aper_base;
|
||||
/* for some chips with <= 32MB we need to lie
|
||||
* about vram size near mc fb location */
|
||||
u64 mc_vram_size;
|
||||
u64 visible_vram_size;
|
||||
u64 gart_size;
|
||||
u64 gart_start;
|
||||
u64 gart_end;
|
||||
u64 vram_start;
|
||||
u64 vram_end;
|
||||
unsigned vram_width;
|
||||
u64 real_vram_size;
|
||||
int vram_mtrr;
|
||||
u64 mc_mask;
|
||||
const struct firmware *fw; /* MC firmware */
|
||||
uint32_t fw_version;
|
||||
struct amdgpu_irq_src vm_fault;
|
||||
uint32_t vram_type;
|
||||
uint32_t srbm_soft_reset;
|
||||
bool prt_warning;
|
||||
uint64_t stolen_size;
|
||||
/* apertures */
|
||||
u64 shared_aperture_start;
|
||||
u64 shared_aperture_end;
|
||||
u64 private_aperture_start;
|
||||
u64 private_aperture_end;
|
||||
/* protects concurrent invalidation */
|
||||
spinlock_t invalidate_lock;
|
||||
bool translate_further;
|
||||
};
|
||||
|
||||
/*
|
||||
* GPU doorbell structures, functions & helpers
|
||||
*/
|
||||
@ -1579,7 +1531,7 @@ struct amdgpu_device {
|
||||
struct amdgpu_clock clock;
|
||||
|
||||
/* MC */
|
||||
struct amdgpu_mc mc;
|
||||
struct amdgpu_gmc gmc;
|
||||
struct amdgpu_gart gart;
|
||||
struct amdgpu_dummy_page dummy_page;
|
||||
struct amdgpu_vm_manager vm_manager;
|
||||
@ -1908,9 +1860,9 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
|
||||
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
|
||||
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
|
||||
void amdgpu_device_vram_location(struct amdgpu_device *adev,
|
||||
struct amdgpu_mc *mc, u64 base);
|
||||
struct amdgpu_gmc *mc, u64 base);
|
||||
void amdgpu_device_gart_location(struct amdgpu_device *adev,
|
||||
struct amdgpu_mc *mc);
|
||||
struct amdgpu_gmc *mc);
|
||||
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
|
||||
int amdgpu_ttm_init(struct amdgpu_device *adev);
|
||||
|
@ -281,21 +281,21 @@ void get_local_mem_info(struct kgd_dev *kgd,
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||
uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
|
||||
~((1ULL << 32) - 1);
|
||||
resource_size_t aper_limit = adev->mc.aper_base + adev->mc.aper_size;
|
||||
resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
|
||||
|
||||
memset(mem_info, 0, sizeof(*mem_info));
|
||||
if (!(adev->mc.aper_base & address_mask || aper_limit & address_mask)) {
|
||||
mem_info->local_mem_size_public = adev->mc.visible_vram_size;
|
||||
mem_info->local_mem_size_private = adev->mc.real_vram_size -
|
||||
adev->mc.visible_vram_size;
|
||||
if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
|
||||
mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
|
||||
mem_info->local_mem_size_private = adev->gmc.real_vram_size -
|
||||
adev->gmc.visible_vram_size;
|
||||
} else {
|
||||
mem_info->local_mem_size_public = 0;
|
||||
mem_info->local_mem_size_private = adev->mc.real_vram_size;
|
||||
mem_info->local_mem_size_private = adev->gmc.real_vram_size;
|
||||
}
|
||||
mem_info->vram_width = adev->mc.vram_width;
|
||||
mem_info->vram_width = adev->gmc.vram_width;
|
||||
|
||||
pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
|
||||
&adev->mc.aper_base, &aper_limit,
|
||||
&adev->gmc.aper_base, &aper_limit,
|
||||
mem_info->local_mem_size_public,
|
||||
mem_info->local_mem_size_private);
|
||||
|
||||
|
@ -257,7 +257,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
|
||||
return;
|
||||
}
|
||||
|
||||
total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
|
||||
total_vram = adev->gmc.real_vram_size - adev->vram_pin_size;
|
||||
used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
||||
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
|
||||
|
||||
@ -302,8 +302,8 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
|
||||
*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
|
||||
|
||||
/* Do the same for visible VRAM if half of it is free */
|
||||
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
|
||||
u64 total_vis_vram = adev->mc.visible_vram_size;
|
||||
if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size) {
|
||||
u64 total_vis_vram = adev->gmc.visible_vram_size;
|
||||
u64 used_vis_vram =
|
||||
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
||||
|
||||
@ -359,7 +359,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
|
||||
* to move it. Don't move anything if the threshold is zero.
|
||||
*/
|
||||
if (p->bytes_moved < p->bytes_moved_threshold) {
|
||||
if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
|
||||
if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
|
||||
(bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
|
||||
/* And don't move a CPU_ACCESS_REQUIRED BO to limited
|
||||
* visible VRAM if we've depleted our allowance to do
|
||||
@ -381,9 +381,9 @@ retry:
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||
|
||||
p->bytes_moved += ctx.bytes_moved;
|
||||
if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
|
||||
if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
|
||||
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
||||
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
|
||||
bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
|
||||
p->bytes_moved_vis += ctx.bytes_moved;
|
||||
|
||||
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
|
||||
@ -437,9 +437,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
|
||||
/* Good we can try to move this BO somewhere else */
|
||||
amdgpu_ttm_placement_from_domain(bo, other);
|
||||
update_bytes_moved_vis =
|
||||
adev->mc.visible_vram_size < adev->mc.real_vram_size &&
|
||||
adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
|
||||
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
||||
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT;
|
||||
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||
bytes_moved = atomic64_read(&adev->num_bytes_moved) -
|
||||
|
@ -544,7 +544,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
|
||||
* as parameter.
|
||||
*/
|
||||
void amdgpu_device_vram_location(struct amdgpu_device *adev,
|
||||
struct amdgpu_mc *mc, u64 base)
|
||||
struct amdgpu_gmc *mc, u64 base)
|
||||
{
|
||||
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
|
||||
|
||||
@ -570,11 +570,11 @@ void amdgpu_device_vram_location(struct amdgpu_device *adev,
|
||||
* FIXME: when reducing GTT size align new size on power of 2.
|
||||
*/
|
||||
void amdgpu_device_gart_location(struct amdgpu_device *adev,
|
||||
struct amdgpu_mc *mc)
|
||||
struct amdgpu_gmc *mc)
|
||||
{
|
||||
u64 size_af, size_bf;
|
||||
|
||||
size_af = adev->mc.mc_mask - mc->vram_end;
|
||||
size_af = adev->gmc.mc_mask - mc->vram_end;
|
||||
size_bf = mc->vram_start;
|
||||
if (size_bf > size_af) {
|
||||
if (mc->gart_size > size_bf) {
|
||||
@ -608,7 +608,7 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
|
||||
*/
|
||||
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
|
||||
{
|
||||
u64 space_needed = roundup_pow_of_two(adev->mc.real_vram_size);
|
||||
u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
|
||||
u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
|
||||
struct pci_bus *root;
|
||||
struct resource *res;
|
||||
@ -1768,7 +1768,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
adev->flags = flags;
|
||||
adev->asic_type = flags & AMD_ASIC_MASK;
|
||||
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
|
||||
adev->mc.gart_size = 512 * 1024 * 1024;
|
||||
adev->gmc.gart_size = 512 * 1024 * 1024;
|
||||
adev->accel_working = false;
|
||||
adev->num_rings = 0;
|
||||
adev->mman.buffer_funcs = NULL;
|
||||
|
@ -244,8 +244,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||
|
||||
info->fbops = &amdgpufb_ops;
|
||||
|
||||
tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start;
|
||||
info->fix.smem_start = adev->mc.aper_base + tmp;
|
||||
tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start;
|
||||
info->fix.smem_start = adev->gmc.aper_base + tmp;
|
||||
info->fix.smem_len = amdgpu_bo_size(abo);
|
||||
info->screen_base = amdgpu_bo_kptr(abo);
|
||||
info->screen_size = amdgpu_bo_size(abo);
|
||||
@ -254,7 +254,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||
|
||||
/* setup aperture base/size for vesafb takeover */
|
||||
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
|
||||
info->apertures->ranges[0].size = adev->mc.aper_size;
|
||||
info->apertures->ranges[0].size = adev->gmc.aper_size;
|
||||
|
||||
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
|
||||
|
||||
@ -264,7 +264,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||
}
|
||||
|
||||
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
|
||||
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base);
|
||||
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->gmc.aper_base);
|
||||
DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
|
||||
DRM_INFO("fb depth is %d\n", fb->format->depth);
|
||||
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
|
||||
@ -321,7 +321,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
|
||||
/* select 8 bpp console on low vram cards */
|
||||
if (adev->mc.real_vram_size <= (32*1024*1024))
|
||||
if (adev->gmc.real_vram_size <= (32*1024*1024))
|
||||
bpp_sel = 8;
|
||||
|
||||
rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL);
|
||||
|
@ -359,8 +359,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
|
||||
if (r)
|
||||
return r;
|
||||
/* Compute table size */
|
||||
adev->gart.num_cpu_pages = adev->mc.gart_size / PAGE_SIZE;
|
||||
adev->gart.num_gpu_pages = adev->mc.gart_size / AMDGPU_GPU_PAGE_SIZE;
|
||||
adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
|
||||
adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
|
||||
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
|
||||
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
|
||||
|
||||
|
84
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
Normal file
84
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*/
|
||||
#ifndef __AMDGPU_GMC_H__
|
||||
#define __AMDGPU_GMC_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "amdgpu_irq.h"
|
||||
|
||||
struct firmware;
|
||||
|
||||
/*
|
||||
* VMHUB structures, functions & helpers
|
||||
*/
|
||||
struct amdgpu_vmhub {
|
||||
uint32_t ctx0_ptb_addr_lo32;
|
||||
uint32_t ctx0_ptb_addr_hi32;
|
||||
uint32_t vm_inv_eng0_req;
|
||||
uint32_t vm_inv_eng0_ack;
|
||||
uint32_t vm_context0_cntl;
|
||||
uint32_t vm_l2_pro_fault_status;
|
||||
uint32_t vm_l2_pro_fault_cntl;
|
||||
};
|
||||
|
||||
/*
|
||||
* GPU MC structures, functions & helpers
|
||||
*/
|
||||
struct amdgpu_gmc {
|
||||
resource_size_t aper_size;
|
||||
resource_size_t aper_base;
|
||||
/* for some chips with <= 32MB we need to lie
|
||||
* about vram size near mc fb location */
|
||||
u64 mc_vram_size;
|
||||
u64 visible_vram_size;
|
||||
u64 gart_size;
|
||||
u64 gart_start;
|
||||
u64 gart_end;
|
||||
u64 vram_start;
|
||||
u64 vram_end;
|
||||
unsigned vram_width;
|
||||
u64 real_vram_size;
|
||||
int vram_mtrr;
|
||||
u64 mc_mask;
|
||||
const struct firmware *fw; /* MC firmware */
|
||||
uint32_t fw_version;
|
||||
struct amdgpu_irq_src vm_fault;
|
||||
uint32_t vram_type;
|
||||
uint32_t srbm_soft_reset;
|
||||
bool prt_warning;
|
||||
uint64_t stolen_size;
|
||||
/* apertures */
|
||||
u64 shared_aperture_start;
|
||||
u64 shared_aperture_end;
|
||||
u64 private_aperture_start;
|
||||
u64 private_aperture_end;
|
||||
/* protects concurrent invalidation */
|
||||
spinlock_t invalidate_lock;
|
||||
bool translate_further;
|
||||
};
|
||||
|
||||
#endif
|
@ -56,7 +56,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
|
||||
return -ENOMEM;
|
||||
|
||||
start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
|
||||
size = (adev->mc.gart_size >> PAGE_SHIFT) - start;
|
||||
size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
|
||||
drm_mm_init(&mgr->mm, start, size);
|
||||
spin_lock_init(&mgr->lock);
|
||||
atomic64_set(&mgr->available, p_size);
|
||||
|
@ -191,7 +191,7 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
||||
fw_info->feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GMC:
|
||||
fw_info->ver = adev->mc.fw_version;
|
||||
fw_info->ver = adev->gmc.fw_version;
|
||||
fw_info->feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GFX_ME:
|
||||
@ -470,9 +470,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
case AMDGPU_INFO_VRAM_GTT: {
|
||||
struct drm_amdgpu_info_vram_gtt vram_gtt;
|
||||
|
||||
vram_gtt.vram_size = adev->mc.real_vram_size;
|
||||
vram_gtt.vram_size = adev->gmc.real_vram_size;
|
||||
vram_gtt.vram_size -= adev->vram_pin_size;
|
||||
vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
|
||||
vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
|
||||
vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
|
||||
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
|
||||
vram_gtt.gtt_size *= PAGE_SIZE;
|
||||
@ -484,17 +484,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
struct drm_amdgpu_memory_info mem;
|
||||
|
||||
memset(&mem, 0, sizeof(mem));
|
||||
mem.vram.total_heap_size = adev->mc.real_vram_size;
|
||||
mem.vram.total_heap_size = adev->gmc.real_vram_size;
|
||||
mem.vram.usable_heap_size =
|
||||
adev->mc.real_vram_size - adev->vram_pin_size;
|
||||
adev->gmc.real_vram_size - adev->vram_pin_size;
|
||||
mem.vram.heap_usage =
|
||||
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
||||
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
|
||||
|
||||
mem.cpu_accessible_vram.total_heap_size =
|
||||
adev->mc.visible_vram_size;
|
||||
adev->gmc.visible_vram_size;
|
||||
mem.cpu_accessible_vram.usable_heap_size =
|
||||
adev->mc.visible_vram_size -
|
||||
adev->gmc.visible_vram_size -
|
||||
(adev->vram_pin_size - adev->invisible_pin_size);
|
||||
mem.cpu_accessible_vram.heap_usage =
|
||||
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
||||
@ -599,8 +599,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
sizeof(adev->gfx.cu_info.ao_cu_bitmap));
|
||||
memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
|
||||
sizeof(adev->gfx.cu_info.bitmap));
|
||||
dev_info.vram_type = adev->mc.vram_type;
|
||||
dev_info.vram_bit_width = adev->mc.vram_width;
|
||||
dev_info.vram_type = adev->gmc.vram_type;
|
||||
dev_info.vram_bit_width = adev->gmc.vram_width;
|
||||
dev_info.vce_harvest_config = adev->vce.harvest_config;
|
||||
dev_info.gc_double_offchip_lds_buf =
|
||||
adev->gfx.config.double_offchip_lds_buf;
|
||||
|
@ -83,7 +83,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
||||
u32 c = 0;
|
||||
|
||||
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
|
||||
|
||||
places[c].fpfn = 0;
|
||||
places[c].lpfn = 0;
|
||||
@ -103,7 +103,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
||||
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
|
||||
places[c].fpfn = 0;
|
||||
if (flags & AMDGPU_GEM_CREATE_SHADOW)
|
||||
places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT;
|
||||
places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
|
||||
else
|
||||
places[c].lpfn = 0;
|
||||
places[c].flags = TTM_PL_FLAG_TT;
|
||||
@ -428,9 +428,9 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
|
||||
if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
|
||||
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
||||
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
|
||||
bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
|
||||
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
|
||||
ctx.bytes_moved);
|
||||
else
|
||||
@ -832,25 +832,25 @@ static const char *amdgpu_vram_names[] = {
|
||||
int amdgpu_bo_init(struct amdgpu_device *adev)
|
||||
{
|
||||
/* reserve PAT memory space to WC for VRAM */
|
||||
arch_io_reserve_memtype_wc(adev->mc.aper_base,
|
||||
adev->mc.aper_size);
|
||||
arch_io_reserve_memtype_wc(adev->gmc.aper_base,
|
||||
adev->gmc.aper_size);
|
||||
|
||||
/* Add an MTRR for the VRAM */
|
||||
adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
|
||||
adev->mc.aper_size);
|
||||
adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
|
||||
adev->gmc.aper_size);
|
||||
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
|
||||
adev->mc.mc_vram_size >> 20,
|
||||
(unsigned long long)adev->mc.aper_size >> 20);
|
||||
adev->gmc.mc_vram_size >> 20,
|
||||
(unsigned long long)adev->gmc.aper_size >> 20);
|
||||
DRM_INFO("RAM width %dbits %s\n",
|
||||
adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
|
||||
adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
|
||||
return amdgpu_ttm_init(adev);
|
||||
}
|
||||
|
||||
void amdgpu_bo_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_ttm_fini(adev);
|
||||
arch_phys_wc_del(adev->mc.vram_mtrr);
|
||||
arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
|
||||
arch_phys_wc_del(adev->gmc.vram_mtrr);
|
||||
arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
|
||||
}
|
||||
|
||||
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
|
||||
@ -980,7 +980,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||
|
||||
size = bo->mem.num_pages << PAGE_SHIFT;
|
||||
offset = bo->mem.start << PAGE_SHIFT;
|
||||
if ((offset + size) <= adev->mc.visible_vram_size)
|
||||
if ((offset + size) <= adev->gmc.visible_vram_size)
|
||||
return 0;
|
||||
|
||||
/* Can't move a pinned BO to visible VRAM */
|
||||
@ -1003,7 +1003,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||
offset = bo->mem.start << PAGE_SHIFT;
|
||||
/* this should never happen */
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM &&
|
||||
(offset + size) > adev->mc.visible_vram_size)
|
||||
(offset + size) > adev->gmc.visible_vram_size)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
||||
/* Number of tests =
|
||||
* (Total GTT - IB pool - writeback page - ring buffers) / test size
|
||||
*/
|
||||
n = adev->mc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
|
||||
n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||
if (adev->rings[i])
|
||||
n -= adev->rings[i]->ring_size;
|
||||
@ -142,10 +142,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
||||
"0x%16llx/0x%16llx)\n",
|
||||
i, *vram_start, gart_start,
|
||||
(unsigned long long)
|
||||
(gart_addr - adev->mc.gart_start +
|
||||
(gart_addr - adev->gmc.gart_start +
|
||||
(void*)gart_start - gtt_map),
|
||||
(unsigned long long)
|
||||
(vram_addr - adev->mc.vram_start +
|
||||
(vram_addr - adev->gmc.vram_start +
|
||||
(void*)gart_start - gtt_map));
|
||||
amdgpu_bo_kunmap(vram_obj);
|
||||
goto out_lclean_unpin;
|
||||
@ -187,10 +187,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
||||
"0x%16llx/0x%16llx)\n",
|
||||
i, *gart_start, vram_start,
|
||||
(unsigned long long)
|
||||
(vram_addr - adev->mc.vram_start +
|
||||
(vram_addr - adev->gmc.vram_start +
|
||||
(void*)vram_start - vram_map),
|
||||
(unsigned long long)
|
||||
(gart_addr - adev->mc.gart_start +
|
||||
(gart_addr - adev->gmc.gart_start +
|
||||
(void*)vram_start - vram_map));
|
||||
amdgpu_bo_kunmap(gtt_obj[i]);
|
||||
goto out_lclean_unpin;
|
||||
@ -200,7 +200,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
||||
amdgpu_bo_kunmap(gtt_obj[i]);
|
||||
|
||||
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
|
||||
gart_addr - adev->mc.gart_start);
|
||||
gart_addr - adev->gmc.gart_start);
|
||||
continue;
|
||||
|
||||
out_lclean_unpin:
|
||||
|
@ -161,7 +161,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
break;
|
||||
case TTM_PL_TT:
|
||||
man->func = &amdgpu_gtt_mgr_func;
|
||||
man->gpu_offset = adev->mc.gart_start;
|
||||
man->gpu_offset = adev->gmc.gart_start;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
|
||||
@ -169,7 +169,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
case TTM_PL_VRAM:
|
||||
/* "On-card" video ram */
|
||||
man->func = &amdgpu_vram_mgr_func;
|
||||
man->gpu_offset = adev->mc.vram_start;
|
||||
man->gpu_offset = adev->gmc.vram_start;
|
||||
man->flags = TTM_MEMTYPE_FLAG_FIXED |
|
||||
TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
|
||||
@ -217,9 +217,9 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
||||
adev->mman.buffer_funcs_ring &&
|
||||
adev->mman.buffer_funcs_ring->ready == false) {
|
||||
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
|
||||
} else if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
|
||||
} else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
|
||||
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
|
||||
unsigned fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
|
||||
struct drm_mm_node *node = bo->mem.mm_node;
|
||||
unsigned long pages_left;
|
||||
|
||||
@ -638,9 +638,9 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
|
||||
case TTM_PL_VRAM:
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
/* check if it's visible */
|
||||
if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
|
||||
if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
|
||||
return -EINVAL;
|
||||
mem->bus.base = adev->mc.aper_base;
|
||||
mem->bus.base = adev->gmc.aper_base;
|
||||
mem->bus.is_iomem = true;
|
||||
break;
|
||||
default:
|
||||
@ -891,7 +891,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
|
||||
placement.num_busy_placement = 1;
|
||||
placement.busy_placement = &placements;
|
||||
placements.fpfn = 0;
|
||||
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
|
||||
placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
|
||||
placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
|
||||
TTM_PL_FLAG_TT;
|
||||
|
||||
@ -1212,7 +1212,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
|
||||
nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
|
||||
pos = (nodes->start << PAGE_SHIFT) + offset;
|
||||
|
||||
while (len && pos < adev->mc.mc_vram_size) {
|
||||
while (len && pos < adev->gmc.mc_vram_size) {
|
||||
uint64_t aligned_pos = pos & ~(uint64_t)3;
|
||||
uint32_t bytes = 4 - (pos & 3);
|
||||
uint32_t shift = (pos & 3) * 8;
|
||||
@ -1298,7 +1298,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
int r = 0;
|
||||
int i;
|
||||
u64 vram_size = adev->mc.visible_vram_size;
|
||||
u64 vram_size = adev->gmc.visible_vram_size;
|
||||
u64 offset = adev->fw_vram_usage.start_offset;
|
||||
u64 size = adev->fw_vram_usage.size;
|
||||
struct amdgpu_bo *bo;
|
||||
@ -1388,7 +1388,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
}
|
||||
adev->mman.initialized = true;
|
||||
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
|
||||
adev->mc.real_vram_size >> PAGE_SHIFT);
|
||||
adev->gmc.real_vram_size >> PAGE_SHIFT);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed initializing VRAM heap.\n");
|
||||
return r;
|
||||
@ -1397,11 +1397,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
/* Reduce size of CPU-visible VRAM if requested */
|
||||
vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
|
||||
if (amdgpu_vis_vram_limit > 0 &&
|
||||
vis_vram_limit <= adev->mc.visible_vram_size)
|
||||
adev->mc.visible_vram_size = vis_vram_limit;
|
||||
vis_vram_limit <= adev->gmc.visible_vram_size)
|
||||
adev->gmc.visible_vram_size = vis_vram_limit;
|
||||
|
||||
/* Change the size here instead of the init above so only lpfn is affected */
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
|
||||
|
||||
/*
|
||||
*The reserved vram for firmware must be pinned to the specified
|
||||
@ -1412,21 +1412,21 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
|
||||
r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->stolen_vga_memory,
|
||||
NULL, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
|
||||
(unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
|
||||
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
|
||||
|
||||
if (amdgpu_gtt_size == -1) {
|
||||
struct sysinfo si;
|
||||
|
||||
si_meminfo(&si);
|
||||
gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
|
||||
adev->mc.mc_vram_size),
|
||||
adev->gmc.mc_vram_size),
|
||||
((uint64_t)si.totalram * si.mem_unit * 3/4));
|
||||
}
|
||||
else
|
||||
@ -1559,7 +1559,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
|
||||
BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
|
||||
AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
|
||||
|
||||
*addr = adev->mc.gart_start;
|
||||
*addr = adev->gmc.gart_start;
|
||||
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
|
||||
AMDGPU_GPU_PAGE_SIZE;
|
||||
|
||||
@ -1811,14 +1811,14 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
if (*pos >= adev->mc.mc_vram_size)
|
||||
if (*pos >= adev->gmc.mc_vram_size)
|
||||
return -ENXIO;
|
||||
|
||||
while (size) {
|
||||
unsigned long flags;
|
||||
uint32_t value;
|
||||
|
||||
if (*pos >= adev->mc.mc_vram_size)
|
||||
if (*pos >= adev->gmc.mc_vram_size)
|
||||
return result;
|
||||
|
||||
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
||||
@ -1850,14 +1850,14 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
if (*pos >= adev->mc.mc_vram_size)
|
||||
if (*pos >= adev->gmc.mc_vram_size)
|
||||
return -ENXIO;
|
||||
|
||||
while (size) {
|
||||
unsigned long flags;
|
||||
uint32_t value;
|
||||
|
||||
if (*pos >= adev->mc.mc_vram_size)
|
||||
if (*pos >= adev->gmc.mc_vram_size)
|
||||
return result;
|
||||
|
||||
r = get_user(value, (uint32_t *)buf);
|
||||
@ -2001,9 +2001,9 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
|
||||
if (IS_ERR(ent))
|
||||
return PTR_ERR(ent);
|
||||
if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
|
||||
i_size_write(ent->d_inode, adev->mc.mc_vram_size);
|
||||
i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
|
||||
else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
|
||||
i_size_write(ent->d_inode, adev->mc.gart_size);
|
||||
i_size_write(ent->d_inode, adev->gmc.gart_size);
|
||||
adev->mman.debugfs_entries[count] = ent;
|
||||
}
|
||||
|
||||
|
@ -465,7 +465,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
|
||||
|
||||
static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
|
||||
{
|
||||
return (adev->mc.real_vram_size == adev->mc.visible_vram_size);
|
||||
return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -89,11 +89,11 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
|
||||
uint64_t start = node->start << PAGE_SHIFT;
|
||||
uint64_t end = (node->size + node->start) << PAGE_SHIFT;
|
||||
|
||||
if (start >= adev->mc.visible_vram_size)
|
||||
if (start >= adev->gmc.visible_vram_size)
|
||||
return 0;
|
||||
|
||||
return (end > adev->mc.visible_vram_size ?
|
||||
adev->mc.visible_vram_size : end) - start;
|
||||
return (end > adev->gmc.visible_vram_size ?
|
||||
adev->gmc.visible_vram_size : end) - start;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -905,7 +905,7 @@ static bool ci_dpm_vblank_too_short(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
|
||||
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
|
||||
u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
|
||||
|
||||
/* disable mclk switching if the refresh is >120Hz, even if the
|
||||
* blanking period would allow it
|
||||
@ -2954,7 +2954,7 @@ static int ci_calculate_mclk_params(struct amdgpu_device *adev,
|
||||
mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
|
||||
mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
|
||||
|
||||
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
|
||||
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
|
||||
mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
|
||||
MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
|
||||
mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
|
||||
@ -3077,7 +3077,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev,
|
||||
(memory_clock <= pi->mclk_strobe_mode_threshold))
|
||||
memory_level->StrobeEnable = 1;
|
||||
|
||||
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
|
||||
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
|
||||
memory_level->StrobeRatio =
|
||||
ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
|
||||
if (pi->mclk_edc_enable_threshold &&
|
||||
@ -3752,7 +3752,7 @@ static int ci_init_smc_table(struct amdgpu_device *adev)
|
||||
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
|
||||
table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
|
||||
|
||||
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
|
||||
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
|
||||
table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
|
||||
|
||||
if (ulv->supported) {
|
||||
@ -4549,12 +4549,12 @@ static int ci_set_mc_special_registers(struct amdgpu_device *adev,
|
||||
for (k = 0; k < table->num_entries; k++) {
|
||||
table->mc_reg_table_entry[k].mc_data[j] =
|
||||
(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
|
||||
if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
|
||||
if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
|
||||
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
|
||||
}
|
||||
j++;
|
||||
|
||||
if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
|
||||
if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
|
||||
if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
|
||||
return -EINVAL;
|
||||
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
|
||||
|
@ -317,7 +317,7 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma0) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
|
||||
@ -517,7 +517,7 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -2824,7 +2824,7 @@ static int dce_v10_0_sw_init(void *handle)
|
||||
adev->ddev->mode_config.preferred_depth = 24;
|
||||
adev->ddev->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev->ddev->mode_config.fb_base = adev->mc.aper_base;
|
||||
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
r = amdgpu_modeset_create_props(adev);
|
||||
if (r)
|
||||
|
@ -2939,7 +2939,7 @@ static int dce_v11_0_sw_init(void *handle)
|
||||
adev->ddev->mode_config.preferred_depth = 24;
|
||||
adev->ddev->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev->ddev->mode_config.fb_base = adev->mc.aper_base;
|
||||
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
r = amdgpu_modeset_create_props(adev);
|
||||
if (r)
|
||||
|
@ -2693,7 +2693,7 @@ static int dce_v6_0_sw_init(void *handle)
|
||||
adev->ddev->mode_config.max_height = 16384;
|
||||
adev->ddev->mode_config.preferred_depth = 24;
|
||||
adev->ddev->mode_config.prefer_shadow = 1;
|
||||
adev->ddev->mode_config.fb_base = adev->mc.aper_base;
|
||||
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
r = amdgpu_modeset_create_props(adev);
|
||||
if (r)
|
||||
|
@ -2724,7 +2724,7 @@ static int dce_v8_0_sw_init(void *handle)
|
||||
adev->ddev->mode_config.preferred_depth = 24;
|
||||
adev->ddev->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev->ddev->mode_config.fb_base = adev->mc.aper_base;
|
||||
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
r = amdgpu_modeset_create_props(adev);
|
||||
if (r)
|
||||
|
@ -406,7 +406,7 @@ static int dce_virtual_sw_init(void *handle)
|
||||
adev->ddev->mode_config.preferred_depth = 24;
|
||||
adev->ddev->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev->ddev->mode_config.fb_base = adev->mc.aper_base;
|
||||
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
r = amdgpu_modeset_create_props(adev);
|
||||
if (r)
|
||||
|
@ -1946,7 +1946,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
|
||||
if (i == 0)
|
||||
sh_mem_base = 0;
|
||||
else
|
||||
sh_mem_base = adev->mc.shared_aperture_start >> 48;
|
||||
sh_mem_base = adev->gmc.shared_aperture_start >> 48;
|
||||
cik_srbm_select(adev, 0, 0, 0, i);
|
||||
/* CP and shaders */
|
||||
WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
|
||||
|
@ -3796,7 +3796,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
|
||||
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
|
||||
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
|
||||
WREG32(mmSH_MEM_CONFIG, tmp);
|
||||
tmp = adev->mc.shared_aperture_start >> 48;
|
||||
tmp = adev->gmc.shared_aperture_start >> 48;
|
||||
WREG32(mmSH_MEM_BASES, tmp);
|
||||
}
|
||||
|
||||
|
@ -1539,7 +1539,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
|
||||
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
|
||||
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
|
||||
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
|
||||
tmp = adev->mc.shared_aperture_start >> 48;
|
||||
tmp = adev->gmc.shared_aperture_start >> 48;
|
||||
WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
|
||||
}
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
|
||||
uint64_t value;
|
||||
|
||||
BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
|
||||
value = adev->gart.table_addr - adev->mc.vram_start
|
||||
value = adev->gart.table_addr - adev->gmc.vram_start
|
||||
+ adev->vm_manager.vram_base_offset;
|
||||
value &= 0x0000FFFFFFFFF000ULL;
|
||||
value |= 0x1; /*valid bit*/
|
||||
@ -57,14 +57,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
|
||||
gfxhub_v1_0_init_gart_pt_regs(adev);
|
||||
|
||||
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
|
||||
(u32)(adev->mc.gart_start >> 12));
|
||||
(u32)(adev->gmc.gart_start >> 12));
|
||||
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
|
||||
(u32)(adev->mc.gart_start >> 44));
|
||||
(u32)(adev->gmc.gart_start >> 44));
|
||||
|
||||
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
|
||||
(u32)(adev->mc.gart_end >> 12));
|
||||
(u32)(adev->gmc.gart_end >> 12));
|
||||
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
|
||||
(u32)(adev->mc.gart_end >> 44));
|
||||
(u32)(adev->gmc.gart_end >> 44));
|
||||
}
|
||||
|
||||
static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||
@ -78,12 +78,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||
|
||||
/* Program the system aperture low logical page number. */
|
||||
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
adev->mc.vram_start >> 18);
|
||||
adev->gmc.vram_start >> 18);
|
||||
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
adev->mc.vram_end >> 18);
|
||||
adev->gmc.vram_end >> 18);
|
||||
|
||||
/* Set default page address. */
|
||||
value = adev->vram_scratch.gpu_addr - adev->mc.vram_start
|
||||
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
|
||||
+ adev->vm_manager.vram_base_offset;
|
||||
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
|
||||
(u32)(value >> 12));
|
||||
@ -143,7 +143,7 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
|
||||
WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp);
|
||||
|
||||
tmp = mmVM_L2_CNTL3_DEFAULT;
|
||||
if (adev->mc.translate_further) {
|
||||
if (adev->gmc.translate_further) {
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
|
||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
|
||||
@ -195,7 +195,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
|
||||
|
||||
num_level = adev->vm_manager.num_level;
|
||||
block_size = adev->vm_manager.block_size;
|
||||
if (adev->mc.translate_further)
|
||||
if (adev->gmc.translate_further)
|
||||
num_level -= 1;
|
||||
else
|
||||
block_size -= 9;
|
||||
@ -257,9 +257,9 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
|
||||
* SRIOV driver need to program them
|
||||
*/
|
||||
WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE,
|
||||
adev->mc.vram_start >> 24);
|
||||
adev->gmc.vram_start >> 24);
|
||||
WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP,
|
||||
adev->mc.vram_end >> 24);
|
||||
adev->gmc.vram_end >> 24);
|
||||
}
|
||||
|
||||
/* GART Enable. */
|
||||
|
@ -137,19 +137,19 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
|
||||
err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
|
||||
err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = amdgpu_ucode_validate(adev->mc.fw);
|
||||
err = amdgpu_ucode_validate(adev->gmc.fw);
|
||||
|
||||
out:
|
||||
if (err) {
|
||||
dev_err(adev->dev,
|
||||
"si_mc: Failed to load firmware \"%s\"\n",
|
||||
fw_name);
|
||||
release_firmware(adev->mc.fw);
|
||||
adev->mc.fw = NULL;
|
||||
release_firmware(adev->gmc.fw);
|
||||
adev->gmc.fw = NULL;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@ -162,20 +162,20 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
|
||||
int i, regs_size, ucode_size;
|
||||
const struct mc_firmware_header_v1_0 *hdr;
|
||||
|
||||
if (!adev->mc.fw)
|
||||
if (!adev->gmc.fw)
|
||||
return -EINVAL;
|
||||
|
||||
hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
|
||||
hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
|
||||
|
||||
amdgpu_ucode_print_mc_hdr(&hdr->header);
|
||||
|
||||
adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
|
||||
adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
|
||||
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
|
||||
new_io_mc_regs = (const __le32 *)
|
||||
(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
|
||||
(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
|
||||
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
new_fw_data = (const __le32 *)
|
||||
(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
|
||||
running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
|
||||
|
||||
@ -218,12 +218,12 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||
struct amdgpu_mc *mc)
|
||||
struct amdgpu_gmc *mc)
|
||||
{
|
||||
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
|
||||
base <<= 24;
|
||||
|
||||
amdgpu_device_vram_location(adev, &adev->mc, base);
|
||||
amdgpu_device_vram_location(adev, &adev->gmc, base);
|
||||
amdgpu_device_gart_location(adev, mc);
|
||||
}
|
||||
|
||||
@ -260,9 +260,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
|
||||
}
|
||||
/* Update configuration */
|
||||
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
adev->mc.vram_start >> 12);
|
||||
adev->gmc.vram_start >> 12);
|
||||
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
adev->mc.vram_end >> 12);
|
||||
adev->gmc.vram_end >> 12);
|
||||
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
|
||||
adev->vram_scratch.gpu_addr >> 12);
|
||||
WREG32(mmMC_VM_AGP_BASE, 0);
|
||||
@ -320,39 +320,39 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
|
||||
numchan = 16;
|
||||
break;
|
||||
}
|
||||
adev->mc.vram_width = numchan * chansize;
|
||||
adev->gmc.vram_width = numchan * chansize;
|
||||
/* size in MB on si */
|
||||
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
r = amdgpu_device_resize_fb_bar(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||
adev->mc.visible_vram_size = adev->mc.aper_size;
|
||||
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||
adev->gmc.visible_vram_size = adev->gmc.aper_size;
|
||||
|
||||
/* set the gart size */
|
||||
if (amdgpu_gart_size == -1) {
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_HAINAN: /* no MM engines */
|
||||
default:
|
||||
adev->mc.gart_size = 256ULL << 20;
|
||||
adev->gmc.gart_size = 256ULL << 20;
|
||||
break;
|
||||
case CHIP_VERDE: /* UVD, VCE do not support GPUVM */
|
||||
case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */
|
||||
case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */
|
||||
case CHIP_OLAND: /* UVD, VCE do not support GPUVM */
|
||||
adev->mc.gart_size = 1024ULL << 20;
|
||||
adev->gmc.gart_size = 1024ULL << 20;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
|
||||
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
|
||||
}
|
||||
|
||||
gmc_v6_0_vram_gtt_location(adev, &adev->mc);
|
||||
gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -431,9 +431,9 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
if (enable && !adev->mc.prt_warning) {
|
||||
if (enable && !adev->gmc.prt_warning) {
|
||||
dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
|
||||
adev->mc.prt_warning = true;
|
||||
adev->gmc.prt_warning = true;
|
||||
}
|
||||
|
||||
tmp = RREG32(mmVM_PRT_CNTL);
|
||||
@ -513,8 +513,8 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
|
||||
(field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
|
||||
(field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
|
||||
/* setup context0 */
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
|
||||
(u32)(adev->dummy_page.addr >> 12));
|
||||
@ -561,7 +561,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
|
||||
|
||||
gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
|
||||
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
(unsigned)(adev->mc.gart_size >> 20),
|
||||
(unsigned)(adev->gmc.gart_size >> 20),
|
||||
(unsigned long long)adev->gart.table_addr);
|
||||
adev->gart.ready = true;
|
||||
return 0;
|
||||
@ -804,7 +804,7 @@ static int gmc_v6_0_late_init(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
||||
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
@ -816,26 +816,26 @@ static int gmc_v6_0_sw_init(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
} else {
|
||||
u32 tmp = RREG32(mmMC_SEQ_MISC0);
|
||||
tmp &= MC_SEQ_MISC0__MT__MASK;
|
||||
adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
|
||||
adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
|
||||
}
|
||||
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
|
||||
|
||||
adev->mc.mc_mask = 0xffffffffffULL;
|
||||
adev->gmc.mc_mask = 0xffffffffffULL;
|
||||
|
||||
adev->mc.stolen_size = 256 * 1024;
|
||||
adev->gmc.stolen_size = 256 * 1024;
|
||||
|
||||
adev->need_dma32 = false;
|
||||
dma_bits = adev->need_dma32 ? 32 : 40;
|
||||
@ -900,8 +900,8 @@ static int gmc_v6_0_sw_fini(void *handle)
|
||||
amdgpu_vm_manager_fini(adev);
|
||||
gmc_v6_0_gart_fini(adev);
|
||||
amdgpu_bo_fini(adev);
|
||||
release_firmware(adev->mc.fw);
|
||||
adev->mc.fw = NULL;
|
||||
release_firmware(adev->gmc.fw);
|
||||
adev->gmc.fw = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -932,7 +932,7 @@ static int gmc_v6_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
|
||||
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
|
||||
gmc_v6_0_gart_disable(adev);
|
||||
|
||||
return 0;
|
||||
@ -1148,8 +1148,8 @@ static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
|
||||
|
||||
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->mc.vm_fault.num_types = 1;
|
||||
adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
|
||||
adev->gmc.vm_fault.num_types = 1;
|
||||
adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
|
||||
|
@ -152,16 +152,16 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
|
||||
|
||||
err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
|
||||
err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
err = amdgpu_ucode_validate(adev->mc.fw);
|
||||
err = amdgpu_ucode_validate(adev->gmc.fw);
|
||||
|
||||
out:
|
||||
if (err) {
|
||||
pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
|
||||
release_firmware(adev->mc.fw);
|
||||
adev->mc.fw = NULL;
|
||||
release_firmware(adev->gmc.fw);
|
||||
adev->gmc.fw = NULL;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@ -182,19 +182,19 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
|
||||
u32 running;
|
||||
int i, ucode_size, regs_size;
|
||||
|
||||
if (!adev->mc.fw)
|
||||
if (!adev->gmc.fw)
|
||||
return -EINVAL;
|
||||
|
||||
hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
|
||||
hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
|
||||
amdgpu_ucode_print_mc_hdr(&hdr->header);
|
||||
|
||||
adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
|
||||
adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
|
||||
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
|
||||
io_mc_regs = (const __le32 *)
|
||||
(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
|
||||
(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
|
||||
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
fw_data = (const __le32 *)
|
||||
(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
|
||||
running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
|
||||
|
||||
@ -236,12 +236,12 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||
struct amdgpu_mc *mc)
|
||||
struct amdgpu_gmc *mc)
|
||||
{
|
||||
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
|
||||
base <<= 24;
|
||||
|
||||
amdgpu_device_vram_location(adev, &adev->mc, base);
|
||||
amdgpu_device_vram_location(adev, &adev->gmc, base);
|
||||
amdgpu_device_gart_location(adev, mc);
|
||||
}
|
||||
|
||||
@ -284,9 +284,9 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
|
||||
}
|
||||
/* Update configuration */
|
||||
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
adev->mc.vram_start >> 12);
|
||||
adev->gmc.vram_start >> 12);
|
||||
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
adev->mc.vram_end >> 12);
|
||||
adev->gmc.vram_end >> 12);
|
||||
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
|
||||
adev->vram_scratch.gpu_addr >> 12);
|
||||
WREG32(mmMC_VM_AGP_BASE, 0);
|
||||
@ -319,8 +319,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
|
||||
if (!adev->mc.vram_width) {
|
||||
adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
|
||||
if (!adev->gmc.vram_width) {
|
||||
u32 tmp;
|
||||
int chansize, numchan;
|
||||
|
||||
@ -362,38 +362,38 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
|
||||
numchan = 16;
|
||||
break;
|
||||
}
|
||||
adev->mc.vram_width = numchan * chansize;
|
||||
adev->gmc.vram_width = numchan * chansize;
|
||||
}
|
||||
/* size in MB on si */
|
||||
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
r = amdgpu_device_resize_fb_bar(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
|
||||
adev->mc.aper_size = adev->mc.real_vram_size;
|
||||
adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
|
||||
adev->gmc.aper_size = adev->gmc.real_vram_size;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* In case the PCI BAR is larger than the actual amount of vram */
|
||||
adev->mc.visible_vram_size = adev->mc.aper_size;
|
||||
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
|
||||
adev->mc.visible_vram_size = adev->mc.real_vram_size;
|
||||
adev->gmc.visible_vram_size = adev->gmc.aper_size;
|
||||
if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
|
||||
adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
|
||||
|
||||
/* set the gart size */
|
||||
if (amdgpu_gart_size == -1) {
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TOPAZ: /* no MM engines */
|
||||
default:
|
||||
adev->mc.gart_size = 256ULL << 20;
|
||||
adev->gmc.gart_size = 256ULL << 20;
|
||||
break;
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */
|
||||
@ -401,15 +401,15 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
|
||||
case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */
|
||||
case CHIP_KABINI: /* UVD, VCE do not support GPUVM */
|
||||
case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */
|
||||
adev->mc.gart_size = 1024ULL << 20;
|
||||
adev->gmc.gart_size = 1024ULL << 20;
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
} else {
|
||||
adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
|
||||
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
|
||||
}
|
||||
|
||||
gmc_v7_0_vram_gtt_location(adev, &adev->mc);
|
||||
gmc_v7_0_vram_gtt_location(adev, &adev->gmc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -521,9 +521,9 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
if (enable && !adev->mc.prt_warning) {
|
||||
if (enable && !adev->gmc.prt_warning) {
|
||||
dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
|
||||
adev->mc.prt_warning = true;
|
||||
adev->gmc.prt_warning = true;
|
||||
}
|
||||
|
||||
tmp = RREG32(mmVM_PRT_CNTL);
|
||||
@ -619,8 +619,8 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
|
||||
WREG32(mmVM_L2_CNTL3, tmp);
|
||||
/* setup context0 */
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
|
||||
(u32)(adev->dummy_page.addr >> 12));
|
||||
@ -674,7 +674,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
|
||||
|
||||
gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
|
||||
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
(unsigned)(adev->mc.gart_size >> 20),
|
||||
(unsigned)(adev->gmc.gart_size >> 20),
|
||||
(unsigned long long)adev->gart.table_addr);
|
||||
adev->gart.ready = true;
|
||||
return 0;
|
||||
@ -922,13 +922,13 @@ static int gmc_v7_0_early_init(void *handle)
|
||||
gmc_v7_0_set_gart_funcs(adev);
|
||||
gmc_v7_0_set_irq_funcs(adev);
|
||||
|
||||
adev->mc.shared_aperture_start = 0x2000000000000000ULL;
|
||||
adev->mc.shared_aperture_end =
|
||||
adev->mc.shared_aperture_start + (4ULL << 30) - 1;
|
||||
adev->mc.private_aperture_start =
|
||||
adev->mc.shared_aperture_end + 1;
|
||||
adev->mc.private_aperture_end =
|
||||
adev->mc.private_aperture_start + (4ULL << 30) - 1;
|
||||
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
|
||||
adev->gmc.shared_aperture_end =
|
||||
adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
|
||||
adev->gmc.private_aperture_start =
|
||||
adev->gmc.shared_aperture_end + 1;
|
||||
adev->gmc.private_aperture_end =
|
||||
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -938,7 +938,7 @@ static int gmc_v7_0_late_init(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
||||
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
@ -950,18 +950,18 @@ static int gmc_v7_0_sw_init(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
} else {
|
||||
u32 tmp = RREG32(mmMC_SEQ_MISC0);
|
||||
tmp &= MC_SEQ_MISC0__MT__MASK;
|
||||
adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
|
||||
adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
|
||||
}
|
||||
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -975,9 +975,9 @@ static int gmc_v7_0_sw_init(void *handle)
|
||||
* This is the max address of the GPU's
|
||||
* internal address space.
|
||||
*/
|
||||
adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
||||
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
||||
|
||||
adev->mc.stolen_size = 256 * 1024;
|
||||
adev->gmc.stolen_size = 256 * 1024;
|
||||
|
||||
/* set DMA mask + need_dma32 flags.
|
||||
* PCIE - can handle 40-bits.
|
||||
@ -1048,8 +1048,8 @@ static int gmc_v7_0_sw_fini(void *handle)
|
||||
amdgpu_vm_manager_fini(adev);
|
||||
gmc_v7_0_gart_fini(adev);
|
||||
amdgpu_bo_fini(adev);
|
||||
release_firmware(adev->mc.fw);
|
||||
adev->mc.fw = NULL;
|
||||
release_firmware(adev->gmc.fw);
|
||||
adev->gmc.fw = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1082,7 +1082,7 @@ static int gmc_v7_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
|
||||
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
|
||||
gmc_v7_0_gart_disable(adev);
|
||||
|
||||
return 0;
|
||||
@ -1327,8 +1327,8 @@ static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
|
||||
|
||||
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->mc.vm_fault.num_types = 1;
|
||||
adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
|
||||
adev->gmc.vm_fault.num_types = 1;
|
||||
adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
|
||||
|
@ -236,16 +236,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
|
||||
err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
|
||||
err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
err = amdgpu_ucode_validate(adev->mc.fw);
|
||||
err = amdgpu_ucode_validate(adev->gmc.fw);
|
||||
|
||||
out:
|
||||
if (err) {
|
||||
pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
|
||||
release_firmware(adev->mc.fw);
|
||||
adev->mc.fw = NULL;
|
||||
release_firmware(adev->gmc.fw);
|
||||
adev->gmc.fw = NULL;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@ -274,19 +274,19 @@ static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
|
||||
if (amdgpu_sriov_bios(adev))
|
||||
return 0;
|
||||
|
||||
if (!adev->mc.fw)
|
||||
if (!adev->gmc.fw)
|
||||
return -EINVAL;
|
||||
|
||||
hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
|
||||
hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
|
||||
amdgpu_ucode_print_mc_hdr(&hdr->header);
|
||||
|
||||
adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
|
||||
adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
|
||||
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
|
||||
io_mc_regs = (const __le32 *)
|
||||
(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
|
||||
(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
|
||||
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
fw_data = (const __le32 *)
|
||||
(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
|
||||
running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
|
||||
|
||||
@ -350,19 +350,19 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
|
||||
if (vbios_version == 0)
|
||||
return 0;
|
||||
|
||||
if (!adev->mc.fw)
|
||||
if (!adev->gmc.fw)
|
||||
return -EINVAL;
|
||||
|
||||
hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
|
||||
hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
|
||||
amdgpu_ucode_print_mc_hdr(&hdr->header);
|
||||
|
||||
adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
|
||||
adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
|
||||
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
|
||||
io_mc_regs = (const __le32 *)
|
||||
(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
|
||||
(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
|
||||
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
fw_data = (const __le32 *)
|
||||
(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
|
||||
data = RREG32(mmMC_SEQ_MISC0);
|
||||
data &= ~(0x40);
|
||||
@ -398,7 +398,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||
struct amdgpu_mc *mc)
|
||||
struct amdgpu_gmc *mc)
|
||||
{
|
||||
u64 base = 0;
|
||||
|
||||
@ -406,7 +406,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||
base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
|
||||
base <<= 24;
|
||||
|
||||
amdgpu_device_vram_location(adev, &adev->mc, base);
|
||||
amdgpu_device_vram_location(adev, &adev->gmc, base);
|
||||
amdgpu_device_gart_location(adev, mc);
|
||||
}
|
||||
|
||||
@ -449,18 +449,18 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
|
||||
}
|
||||
/* Update configuration */
|
||||
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
adev->mc.vram_start >> 12);
|
||||
adev->gmc.vram_start >> 12);
|
||||
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
adev->mc.vram_end >> 12);
|
||||
adev->gmc.vram_end >> 12);
|
||||
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
|
||||
adev->vram_scratch.gpu_addr >> 12);
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
|
||||
tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
|
||||
tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
|
||||
tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
|
||||
WREG32(mmMC_VM_FB_LOCATION, tmp);
|
||||
/* XXX double check these! */
|
||||
WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
|
||||
WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
|
||||
WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
|
||||
WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
|
||||
}
|
||||
@ -495,8 +495,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
|
||||
if (!adev->mc.vram_width) {
|
||||
adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
|
||||
if (!adev->gmc.vram_width) {
|
||||
u32 tmp;
|
||||
int chansize, numchan;
|
||||
|
||||
@ -538,31 +538,31 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
|
||||
numchan = 16;
|
||||
break;
|
||||
}
|
||||
adev->mc.vram_width = numchan * chansize;
|
||||
adev->gmc.vram_width = numchan * chansize;
|
||||
}
|
||||
/* size in MB on si */
|
||||
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
r = amdgpu_device_resize_fb_bar(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
|
||||
adev->mc.aper_size = adev->mc.real_vram_size;
|
||||
adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
|
||||
adev->gmc.aper_size = adev->gmc.real_vram_size;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* In case the PCI BAR is larger than the actual amount of vram */
|
||||
adev->mc.visible_vram_size = adev->mc.aper_size;
|
||||
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
|
||||
adev->mc.visible_vram_size = adev->mc.real_vram_size;
|
||||
adev->gmc.visible_vram_size = adev->gmc.aper_size;
|
||||
if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
|
||||
adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
|
||||
|
||||
/* set the gart size */
|
||||
if (amdgpu_gart_size == -1) {
|
||||
@ -571,20 +571,20 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
|
||||
case CHIP_POLARIS10: /* all engines support GPUVM */
|
||||
case CHIP_POLARIS12: /* all engines support GPUVM */
|
||||
default:
|
||||
adev->mc.gart_size = 256ULL << 20;
|
||||
adev->gmc.gart_size = 256ULL << 20;
|
||||
break;
|
||||
case CHIP_TONGA: /* UVD, VCE do not support GPUVM */
|
||||
case CHIP_FIJI: /* UVD, VCE do not support GPUVM */
|
||||
case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
|
||||
case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */
|
||||
adev->mc.gart_size = 1024ULL << 20;
|
||||
adev->gmc.gart_size = 1024ULL << 20;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
|
||||
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
|
||||
}
|
||||
|
||||
gmc_v8_0_vram_gtt_location(adev, &adev->mc);
|
||||
gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -720,9 +720,9 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
if (enable && !adev->mc.prt_warning) {
|
||||
if (enable && !adev->gmc.prt_warning) {
|
||||
dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
|
||||
adev->mc.prt_warning = true;
|
||||
adev->gmc.prt_warning = true;
|
||||
}
|
||||
|
||||
tmp = RREG32(mmVM_PRT_CNTL);
|
||||
@ -834,8 +834,8 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
|
||||
WREG32(mmVM_L2_CNTL4, tmp);
|
||||
/* setup context0 */
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
|
||||
(u32)(adev->dummy_page.addr >> 12));
|
||||
@ -890,7 +890,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
|
||||
|
||||
gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
|
||||
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
(unsigned)(adev->mc.gart_size >> 20),
|
||||
(unsigned)(adev->gmc.gart_size >> 20),
|
||||
(unsigned long long)adev->gart.table_addr);
|
||||
adev->gart.ready = true;
|
||||
return 0;
|
||||
@ -1012,13 +1012,13 @@ static int gmc_v8_0_early_init(void *handle)
|
||||
gmc_v8_0_set_gart_funcs(adev);
|
||||
gmc_v8_0_set_irq_funcs(adev);
|
||||
|
||||
adev->mc.shared_aperture_start = 0x2000000000000000ULL;
|
||||
adev->mc.shared_aperture_end =
|
||||
adev->mc.shared_aperture_start + (4ULL << 30) - 1;
|
||||
adev->mc.private_aperture_start =
|
||||
adev->mc.shared_aperture_end + 1;
|
||||
adev->mc.private_aperture_end =
|
||||
adev->mc.private_aperture_start + (4ULL << 30) - 1;
|
||||
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
|
||||
adev->gmc.shared_aperture_end =
|
||||
adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
|
||||
adev->gmc.private_aperture_start =
|
||||
adev->gmc.shared_aperture_end + 1;
|
||||
adev->gmc.private_aperture_end =
|
||||
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1028,7 +1028,7 @@ static int gmc_v8_0_late_init(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
||||
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
@ -1042,7 +1042,7 @@ static int gmc_v8_0_sw_init(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
} else {
|
||||
u32 tmp;
|
||||
|
||||
@ -1051,14 +1051,14 @@ static int gmc_v8_0_sw_init(void *handle)
|
||||
else
|
||||
tmp = RREG32(mmMC_SEQ_MISC0);
|
||||
tmp &= MC_SEQ_MISC0__MT__MASK;
|
||||
adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
|
||||
adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
|
||||
}
|
||||
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -1072,9 +1072,9 @@ static int gmc_v8_0_sw_init(void *handle)
|
||||
* This is the max address of the GPU's
|
||||
* internal address space.
|
||||
*/
|
||||
adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
||||
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
||||
|
||||
adev->mc.stolen_size = 256 * 1024;
|
||||
adev->gmc.stolen_size = 256 * 1024;
|
||||
|
||||
/* set DMA mask + need_dma32 flags.
|
||||
* PCIE - can handle 40-bits.
|
||||
@ -1146,8 +1146,8 @@ static int gmc_v8_0_sw_fini(void *handle)
|
||||
amdgpu_vm_manager_fini(adev);
|
||||
gmc_v8_0_gart_fini(adev);
|
||||
amdgpu_bo_fini(adev);
|
||||
release_firmware(adev->mc.fw);
|
||||
adev->mc.fw = NULL;
|
||||
release_firmware(adev->gmc.fw);
|
||||
adev->gmc.fw = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1188,7 +1188,7 @@ static int gmc_v8_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
|
||||
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
|
||||
gmc_v8_0_gart_disable(adev);
|
||||
|
||||
return 0;
|
||||
@ -1268,10 +1268,10 @@ static bool gmc_v8_0_check_soft_reset(void *handle)
|
||||
SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
|
||||
}
|
||||
if (srbm_soft_reset) {
|
||||
adev->mc.srbm_soft_reset = srbm_soft_reset;
|
||||
adev->gmc.srbm_soft_reset = srbm_soft_reset;
|
||||
return true;
|
||||
} else {
|
||||
adev->mc.srbm_soft_reset = 0;
|
||||
adev->gmc.srbm_soft_reset = 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -1280,7 +1280,7 @@ static int gmc_v8_0_pre_soft_reset(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!adev->mc.srbm_soft_reset)
|
||||
if (!adev->gmc.srbm_soft_reset)
|
||||
return 0;
|
||||
|
||||
gmc_v8_0_mc_stop(adev);
|
||||
@ -1296,9 +1296,9 @@ static int gmc_v8_0_soft_reset(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
u32 srbm_soft_reset;
|
||||
|
||||
if (!adev->mc.srbm_soft_reset)
|
||||
if (!adev->gmc.srbm_soft_reset)
|
||||
return 0;
|
||||
srbm_soft_reset = adev->mc.srbm_soft_reset;
|
||||
srbm_soft_reset = adev->gmc.srbm_soft_reset;
|
||||
|
||||
if (srbm_soft_reset) {
|
||||
u32 tmp;
|
||||
@ -1326,7 +1326,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!adev->mc.srbm_soft_reset)
|
||||
if (!adev->gmc.srbm_soft_reset)
|
||||
return 0;
|
||||
|
||||
gmc_v8_0_mc_resume(adev);
|
||||
@ -1661,8 +1661,8 @@ static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
|
||||
|
||||
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->mc.vm_fault.num_types = 1;
|
||||
adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
|
||||
adev->gmc.vm_fault.num_types = 1;
|
||||
adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
|
||||
|
@ -285,8 +285,8 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
|
||||
|
||||
static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->mc.vm_fault.num_types = 1;
|
||||
adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
|
||||
adev->gmc.vm_fault.num_types = 1;
|
||||
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
|
||||
}
|
||||
|
||||
static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
|
||||
@ -330,7 +330,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
|
||||
const unsigned eng = 17;
|
||||
unsigned i, j;
|
||||
|
||||
spin_lock(&adev->mc.invalidate_lock);
|
||||
spin_lock(&adev->gmc.invalidate_lock);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
|
||||
struct amdgpu_vmhub *hub = &adev->vmhub[i];
|
||||
@ -363,7 +363,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
|
||||
DRM_ERROR("Timeout waiting for VM flush ACK!\n");
|
||||
}
|
||||
|
||||
spin_unlock(&adev->mc.invalidate_lock);
|
||||
spin_unlock(&adev->gmc.invalidate_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -472,10 +472,10 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
||||
{
|
||||
if (!(*flags & AMDGPU_PDE_PTE))
|
||||
*addr = adev->vm_manager.vram_base_offset + *addr -
|
||||
adev->mc.vram_start;
|
||||
adev->gmc.vram_start;
|
||||
BUG_ON(*addr & 0xFFFF00000000003FULL);
|
||||
|
||||
if (!adev->mc.translate_further)
|
||||
if (!adev->gmc.translate_further)
|
||||
return;
|
||||
|
||||
if (level == AMDGPU_VM_PDB1) {
|
||||
@ -512,13 +512,13 @@ static int gmc_v9_0_early_init(void *handle)
|
||||
gmc_v9_0_set_gart_funcs(adev);
|
||||
gmc_v9_0_set_irq_funcs(adev);
|
||||
|
||||
adev->mc.shared_aperture_start = 0x2000000000000000ULL;
|
||||
adev->mc.shared_aperture_end =
|
||||
adev->mc.shared_aperture_start + (4ULL << 30) - 1;
|
||||
adev->mc.private_aperture_start =
|
||||
adev->mc.shared_aperture_end + 1;
|
||||
adev->mc.private_aperture_end =
|
||||
adev->mc.private_aperture_start + (4ULL << 30) - 1;
|
||||
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
|
||||
adev->gmc.shared_aperture_end =
|
||||
adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
|
||||
adev->gmc.private_aperture_start =
|
||||
adev->gmc.shared_aperture_end + 1;
|
||||
adev->gmc.private_aperture_end =
|
||||
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -644,16 +644,16 @@ static int gmc_v9_0_late_init(void *handle)
|
||||
}
|
||||
}
|
||||
|
||||
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
||||
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
|
||||
}
|
||||
|
||||
static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||
struct amdgpu_mc *mc)
|
||||
struct amdgpu_gmc *mc)
|
||||
{
|
||||
u64 base = 0;
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
base = mmhub_v1_0_get_fb_location(adev);
|
||||
amdgpu_device_vram_location(adev, &adev->mc, base);
|
||||
amdgpu_device_vram_location(adev, &adev->gmc, base);
|
||||
amdgpu_device_gart_location(adev, mc);
|
||||
/* base offset of vram pages */
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
@ -677,8 +677,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
||||
int chansize, numchan;
|
||||
int r;
|
||||
|
||||
adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
|
||||
if (!adev->mc.vram_width) {
|
||||
adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
|
||||
if (!adev->gmc.vram_width) {
|
||||
/* hbm memory channel size */
|
||||
chansize = 128;
|
||||
|
||||
@ -715,43 +715,43 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
||||
numchan = 2;
|
||||
break;
|
||||
}
|
||||
adev->mc.vram_width = numchan * chansize;
|
||||
adev->gmc.vram_width = numchan * chansize;
|
||||
}
|
||||
|
||||
/* size in MB on si */
|
||||
adev->mc.mc_vram_size =
|
||||
adev->gmc.mc_vram_size =
|
||||
adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
|
||||
adev->mc.real_vram_size = adev->mc.mc_vram_size;
|
||||
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
r = amdgpu_device_resize_fb_bar(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||
|
||||
/* In case the PCI BAR is larger than the actual amount of vram */
|
||||
adev->mc.visible_vram_size = adev->mc.aper_size;
|
||||
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
|
||||
adev->mc.visible_vram_size = adev->mc.real_vram_size;
|
||||
adev->gmc.visible_vram_size = adev->gmc.aper_size;
|
||||
if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
|
||||
adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
|
||||
|
||||
/* set the gart size */
|
||||
if (amdgpu_gart_size == -1) {
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA10: /* all engines support GPUVM */
|
||||
default:
|
||||
adev->mc.gart_size = 256ULL << 20;
|
||||
adev->gmc.gart_size = 256ULL << 20;
|
||||
break;
|
||||
case CHIP_RAVEN: /* DCE SG support */
|
||||
adev->mc.gart_size = 1024ULL << 20;
|
||||
adev->gmc.gart_size = 1024ULL << 20;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
|
||||
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
|
||||
}
|
||||
|
||||
gmc_v9_0_vram_gtt_location(adev, &adev->mc);
|
||||
gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -783,23 +783,23 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||
gfxhub_v1_0_init(adev);
|
||||
mmhub_v1_0_init(adev);
|
||||
|
||||
spin_lock_init(&adev->mc.invalidate_lock);
|
||||
spin_lock_init(&adev->gmc.invalidate_lock);
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RAVEN:
|
||||
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
|
||||
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
|
||||
} else {
|
||||
/* vm_size is 128TB + 512GB for legacy 3-level page support */
|
||||
amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
|
||||
adev->mc.translate_further =
|
||||
adev->gmc.translate_further =
|
||||
adev->vm_manager.num_level > 1;
|
||||
}
|
||||
break;
|
||||
case CHIP_VEGA10:
|
||||
/* XXX Don't know how to get VRAM type yet. */
|
||||
adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
|
||||
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
|
||||
/*
|
||||
* To fulfill 4-level page support,
|
||||
* vm size is 256TB (48bit), maximum size of Vega10,
|
||||
@ -813,9 +813,9 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||
|
||||
/* This interrupt is VMC page fault.*/
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
|
||||
&adev->mc.vm_fault);
|
||||
&adev->gmc.vm_fault);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
|
||||
&adev->mc.vm_fault);
|
||||
&adev->gmc.vm_fault);
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
@ -824,13 +824,13 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||
* This is the max address of the GPU's
|
||||
* internal address space.
|
||||
*/
|
||||
adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
|
||||
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
|
||||
|
||||
/*
|
||||
* It needs to reserve 8M stolen memory for vega10
|
||||
* TODO: Figure out how to avoid that...
|
||||
*/
|
||||
adev->mc.stolen_size = 8 * 1024 * 1024;
|
||||
adev->gmc.stolen_size = 8 * 1024 * 1024;
|
||||
|
||||
/* set DMA mask + need_dma32 flags.
|
||||
* PCIE - can handle 44-bits.
|
||||
@ -984,7 +984,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
||||
gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
|
||||
|
||||
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
(unsigned)(adev->mc.gart_size >> 20),
|
||||
(unsigned)(adev->gmc.gart_size >> 20),
|
||||
(unsigned long long)adev->gart.table_addr);
|
||||
adev->gart.ready = true;
|
||||
return 0;
|
||||
@ -1035,7 +1035,7 @@ static int gmc_v9_0_hw_fini(void *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
|
||||
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
|
||||
gmc_v9_0_gart_disable(adev);
|
||||
|
||||
return 0;
|
||||
|
@ -50,7 +50,7 @@ static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
|
||||
uint64_t value;
|
||||
|
||||
BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
|
||||
value = adev->gart.table_addr - adev->mc.vram_start +
|
||||
value = adev->gart.table_addr - adev->gmc.vram_start +
|
||||
adev->vm_manager.vram_base_offset;
|
||||
value &= 0x0000FFFFFFFFF000ULL;
|
||||
value |= 0x1; /* valid bit */
|
||||
@ -67,14 +67,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
|
||||
mmhub_v1_0_init_gart_pt_regs(adev);
|
||||
|
||||
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
|
||||
(u32)(adev->mc.gart_start >> 12));
|
||||
(u32)(adev->gmc.gart_start >> 12));
|
||||
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
|
||||
(u32)(adev->mc.gart_start >> 44));
|
||||
(u32)(adev->gmc.gart_start >> 44));
|
||||
|
||||
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
|
||||
(u32)(adev->mc.gart_end >> 12));
|
||||
(u32)(adev->gmc.gart_end >> 12));
|
||||
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
|
||||
(u32)(adev->mc.gart_end >> 44));
|
||||
(u32)(adev->gmc.gart_end >> 44));
|
||||
}
|
||||
|
||||
static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||
@ -89,12 +89,12 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||
|
||||
/* Program the system aperture low logical page number. */
|
||||
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
adev->mc.vram_start >> 18);
|
||||
adev->gmc.vram_start >> 18);
|
||||
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
adev->mc.vram_end >> 18);
|
||||
adev->gmc.vram_end >> 18);
|
||||
|
||||
/* Set default page address. */
|
||||
value = adev->vram_scratch.gpu_addr - adev->mc.vram_start +
|
||||
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
|
||||
adev->vm_manager.vram_base_offset;
|
||||
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
|
||||
(u32)(value >> 12));
|
||||
@ -155,7 +155,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
|
||||
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
|
||||
|
||||
if (adev->mc.translate_further) {
|
||||
if (adev->gmc.translate_further) {
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
|
||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
|
||||
@ -207,7 +207,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
|
||||
|
||||
num_level = adev->vm_manager.num_level;
|
||||
block_size = adev->vm_manager.block_size;
|
||||
if (adev->mc.translate_further)
|
||||
if (adev->gmc.translate_further)
|
||||
num_level -= 1;
|
||||
else
|
||||
block_size -= 9;
|
||||
@ -499,9 +499,9 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
|
||||
* SRIOV driver need to program them
|
||||
*/
|
||||
WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE,
|
||||
adev->mc.vram_start >> 24);
|
||||
adev->gmc.vram_start >> 24);
|
||||
WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP,
|
||||
adev->mc.vram_end >> 24);
|
||||
adev->gmc.vram_end >> 24);
|
||||
}
|
||||
|
||||
/* GART Enable. */
|
||||
|
@ -346,7 +346,7 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma0) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
|
||||
@ -491,7 +491,7 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -518,7 +518,7 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma0) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
|
||||
@ -758,7 +758,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -440,7 +440,7 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma0) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
|
||||
@ -682,7 +682,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
|
||||
|
||||
}
|
||||
|
||||
|
@ -134,7 +134,7 @@ static void si_dma_stop(struct amdgpu_device *adev)
|
||||
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
|
||||
ring->ready = false;
|
||||
}
|
||||
}
|
||||
@ -197,7 +197,7 @@ static int si_dma_start(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -3064,7 +3064,7 @@ static bool si_dpm_vblank_too_short(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
|
||||
/* we never hit the non-gddr5 limit so disable it */
|
||||
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
|
||||
u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
|
||||
|
||||
if (vblank_time < switch_limit)
|
||||
return true;
|
||||
@ -4350,7 +4350,7 @@ static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
|
||||
if (mclk <= pi->mclk_strobe_mode_threshold)
|
||||
strobe_mode = true;
|
||||
|
||||
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
|
||||
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
|
||||
result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
|
||||
else
|
||||
result = si_get_ddr3_mclk_frequency_ratio(mclk);
|
||||
@ -4937,7 +4937,7 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
|
||||
table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
|
||||
table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
|
||||
|
||||
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
|
||||
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
|
||||
table->initialState.levels[0].strobeMode =
|
||||
si_get_strobe_mode_settings(adev,
|
||||
initial_state->performance_levels[0].mclk);
|
||||
@ -5208,7 +5208,7 @@ static int si_init_smc_table(struct amdgpu_device *adev)
|
||||
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
|
||||
table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
|
||||
|
||||
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
|
||||
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
|
||||
table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
|
||||
|
||||
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
|
||||
@ -5385,7 +5385,7 @@ static int si_populate_mclk_value(struct amdgpu_device *adev,
|
||||
mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
|
||||
mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
|
||||
|
||||
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
|
||||
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
|
||||
mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
|
||||
mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
|
||||
YCLK_POST_DIV(mpll_param.post_div);
|
||||
@ -5397,7 +5397,7 @@ static int si_populate_mclk_value(struct amdgpu_device *adev,
|
||||
u32 tmp;
|
||||
u32 reference_clock = adev->clock.mpll.reference_freq;
|
||||
|
||||
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
|
||||
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
|
||||
freq_nom = memory_clock * 4;
|
||||
else
|
||||
freq_nom = memory_clock * 2;
|
||||
@ -5489,7 +5489,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
|
||||
level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
|
||||
}
|
||||
|
||||
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
|
||||
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
|
||||
if (pl->mclk > pi->mclk_edc_enable_threshold)
|
||||
level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
|
||||
|
||||
@ -5860,12 +5860,12 @@ static int si_set_mc_special_registers(struct amdgpu_device *adev,
|
||||
table->mc_reg_table_entry[k].mc_data[j] =
|
||||
(temp_reg & 0xffff0000) |
|
||||
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
|
||||
if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
|
||||
if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
|
||||
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
|
||||
}
|
||||
j++;
|
||||
|
||||
if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
|
||||
if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
|
||||
if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
|
||||
return -EINVAL;
|
||||
table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
|
||||
|
@ -426,7 +426,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
init_data.asic_id.pci_revision_id = adev->rev_id;
|
||||
init_data.asic_id.hw_internal_rev = adev->external_rev_id;
|
||||
|
||||
init_data.asic_id.vram_width = adev->mc.vram_width;
|
||||
init_data.asic_id.vram_width = adev->gmc.vram_width;
|
||||
/* TODO: initialize init_data.asic_id.vram_type here!!!! */
|
||||
init_data.asic_id.atombios_base_address =
|
||||
adev->mode_info.atom_context->bios;
|
||||
@ -1314,7 +1314,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
|
||||
/* indicate support of immediate flip */
|
||||
adev->ddev->mode_config.async_page_flip = true;
|
||||
|
||||
adev->ddev->mode_config.fb_base = adev->mc.aper_base;
|
||||
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
r = amdgpu_modeset_create_props(adev);
|
||||
if (r)
|
||||
|
Loading…
Reference in New Issue
Block a user