mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-28 21:45:01 +08:00
drm/amdgpu: separate bo_va structure
Split that into vm_bo_base and bo_va to allow other uses as well. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
4ab4016aaf
commit
ec681545af
@ -1487,7 +1487,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||
addr > mapping->last)
|
||||
continue;
|
||||
|
||||
*bo = lobj->bo_va->bo;
|
||||
*bo = lobj->bo_va->base.bo;
|
||||
return mapping;
|
||||
}
|
||||
|
||||
@ -1496,7 +1496,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||
addr > mapping->last)
|
||||
continue;
|
||||
|
||||
*bo = lobj->bo_va->bo;
|
||||
*bo = lobj->bo_va->base.bo;
|
||||
return mapping;
|
||||
}
|
||||
}
|
||||
|
@ -621,7 +621,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
switch (args->operation) {
|
||||
case AMDGPU_VA_OP_MAP:
|
||||
r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
|
||||
r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
|
||||
args->map_size);
|
||||
if (r)
|
||||
goto error_backoff;
|
||||
@ -641,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
args->map_size);
|
||||
break;
|
||||
case AMDGPU_VA_OP_REPLACE:
|
||||
r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
|
||||
r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
|
||||
args->map_size);
|
||||
if (r)
|
||||
goto error_backoff;
|
||||
|
@ -33,6 +33,7 @@
|
||||
|
||||
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
|
||||
|
||||
/* bo virtual addresses in a vm */
|
||||
struct amdgpu_bo_va_mapping {
|
||||
struct list_head list;
|
||||
struct rb_node rb;
|
||||
@ -43,26 +44,19 @@ struct amdgpu_bo_va_mapping {
|
||||
uint64_t flags;
|
||||
};
|
||||
|
||||
/* bo virtual addresses in a specific vm */
|
||||
/* User space allocated BO in a VM */
|
||||
struct amdgpu_bo_va {
|
||||
struct amdgpu_vm_bo_base base;
|
||||
|
||||
/* protected by bo being reserved */
|
||||
struct list_head bo_list;
|
||||
struct dma_fence *last_pt_update;
|
||||
unsigned ref_count;
|
||||
|
||||
/* protected by vm mutex and spinlock */
|
||||
struct list_head vm_status;
|
||||
|
||||
/* mappings for this bo_va */
|
||||
struct list_head invalids;
|
||||
struct list_head valids;
|
||||
|
||||
/* constant after initialization */
|
||||
struct amdgpu_vm *vm;
|
||||
struct amdgpu_bo *bo;
|
||||
};
|
||||
|
||||
|
||||
struct amdgpu_bo {
|
||||
/* Protected by tbo.reserved */
|
||||
u32 preferred_domains;
|
||||
|
@ -284,7 +284,7 @@ TRACE_EVENT(amdgpu_vm_bo_map,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->bo = bo_va ? bo_va->bo : NULL;
|
||||
__entry->bo = bo_va ? bo_va->base.bo : NULL;
|
||||
__entry->start = mapping->start;
|
||||
__entry->last = mapping->last;
|
||||
__entry->offset = mapping->offset;
|
||||
@ -308,7 +308,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->bo = bo_va->bo;
|
||||
__entry->bo = bo_va->base.bo;
|
||||
__entry->start = mapping->start;
|
||||
__entry->last = mapping->last;
|
||||
__entry->offset = mapping->offset;
|
||||
|
@ -76,7 +76,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_alloc_pts(adev, (*bo_va)->vm, AMDGPU_CSA_VADDR,
|
||||
r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
|
||||
AMDGPU_CSA_SIZE);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
|
||||
|
@ -870,8 +870,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
|
||||
{
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
|
||||
list_for_each_entry(bo_va, &bo->va, bo_list) {
|
||||
if (bo_va->vm == vm) {
|
||||
list_for_each_entry(bo_va, &bo->va, base.bo_list) {
|
||||
if (bo_va->base.vm == vm) {
|
||||
return bo_va;
|
||||
}
|
||||
}
|
||||
@ -1726,7 +1726,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
bool clear)
|
||||
{
|
||||
struct amdgpu_vm *vm = bo_va->vm;
|
||||
struct amdgpu_bo *bo = bo_va->base.bo;
|
||||
struct amdgpu_vm *vm = bo_va->base.vm;
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
dma_addr_t *pages_addr = NULL;
|
||||
uint64_t gtt_flags, flags;
|
||||
@ -1735,27 +1736,27 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
struct dma_fence *exclusive;
|
||||
int r;
|
||||
|
||||
if (clear || !bo_va->bo) {
|
||||
if (clear || !bo_va->base.bo) {
|
||||
mem = NULL;
|
||||
nodes = NULL;
|
||||
exclusive = NULL;
|
||||
} else {
|
||||
struct ttm_dma_tt *ttm;
|
||||
|
||||
mem = &bo_va->bo->tbo.mem;
|
||||
mem = &bo_va->base.bo->tbo.mem;
|
||||
nodes = mem->mm_node;
|
||||
if (mem->mem_type == TTM_PL_TT) {
|
||||
ttm = container_of(bo_va->bo->tbo.ttm, struct
|
||||
ttm_dma_tt, ttm);
|
||||
ttm = container_of(bo_va->base.bo->tbo.ttm,
|
||||
struct ttm_dma_tt, ttm);
|
||||
pages_addr = ttm->dma_address;
|
||||
}
|
||||
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
|
||||
exclusive = reservation_object_get_excl(bo->tbo.resv);
|
||||
}
|
||||
|
||||
if (bo_va->bo) {
|
||||
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
|
||||
gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
|
||||
adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
|
||||
if (bo) {
|
||||
flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
|
||||
gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) &&
|
||||
adev == amdgpu_ttm_adev(bo->tbo.bdev)) ?
|
||||
flags : 0;
|
||||
} else {
|
||||
flags = 0x0;
|
||||
@ -1763,7 +1764,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
}
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
if (!list_empty(&bo_va->vm_status))
|
||||
if (!list_empty(&bo_va->base.vm_status))
|
||||
list_splice_init(&bo_va->valids, &bo_va->invalids);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
@ -1786,9 +1787,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
list_splice_init(&bo_va->invalids, &bo_va->valids);
|
||||
list_del_init(&bo_va->vm_status);
|
||||
list_del_init(&bo_va->base.vm_status);
|
||||
if (clear)
|
||||
list_add(&bo_va->vm_status, &vm->cleared);
|
||||
list_add(&bo_va->base.vm_status, &vm->cleared);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
if (vm->use_cpu_for_update) {
|
||||
@ -2001,7 +2002,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
|
||||
spin_lock(&vm->status_lock);
|
||||
while (!list_empty(&vm->invalidated)) {
|
||||
bo_va = list_first_entry(&vm->invalidated,
|
||||
struct amdgpu_bo_va, vm_status);
|
||||
struct amdgpu_bo_va, base.vm_status);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, true);
|
||||
@ -2041,16 +2042,17 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
||||
if (bo_va == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
bo_va->vm = vm;
|
||||
bo_va->bo = bo;
|
||||
bo_va->base.vm = vm;
|
||||
bo_va->base.bo = bo;
|
||||
INIT_LIST_HEAD(&bo_va->base.bo_list);
|
||||
INIT_LIST_HEAD(&bo_va->base.vm_status);
|
||||
|
||||
bo_va->ref_count = 1;
|
||||
INIT_LIST_HEAD(&bo_va->bo_list);
|
||||
INIT_LIST_HEAD(&bo_va->valids);
|
||||
INIT_LIST_HEAD(&bo_va->invalids);
|
||||
INIT_LIST_HEAD(&bo_va->vm_status);
|
||||
|
||||
if (bo)
|
||||
list_add_tail(&bo_va->bo_list, &bo->va);
|
||||
list_add_tail(&bo_va->base.bo_list, &bo->va);
|
||||
|
||||
return bo_va;
|
||||
}
|
||||
@ -2075,7 +2077,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||
uint64_t size, uint64_t flags)
|
||||
{
|
||||
struct amdgpu_bo_va_mapping *mapping, *tmp;
|
||||
struct amdgpu_vm *vm = bo_va->vm;
|
||||
struct amdgpu_bo *bo = bo_va->base.bo;
|
||||
struct amdgpu_vm *vm = bo_va->base.vm;
|
||||
uint64_t eaddr;
|
||||
|
||||
/* validate the parameters */
|
||||
@ -2086,7 +2089,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||
/* make sure object fit at this offset */
|
||||
eaddr = saddr + size - 1;
|
||||
if (saddr >= eaddr ||
|
||||
(bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
|
||||
(bo && offset + size > amdgpu_bo_size(bo)))
|
||||
return -EINVAL;
|
||||
|
||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
@ -2096,7 +2099,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||
if (tmp) {
|
||||
/* bo and tmp overlap, invalid addr */
|
||||
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
|
||||
"0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
|
||||
"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
|
||||
tmp->start, tmp->last + 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2141,7 +2144,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
||||
uint64_t size, uint64_t flags)
|
||||
{
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
struct amdgpu_vm *vm = bo_va->vm;
|
||||
struct amdgpu_bo *bo = bo_va->base.bo;
|
||||
struct amdgpu_vm *vm = bo_va->base.vm;
|
||||
uint64_t eaddr;
|
||||
int r;
|
||||
|
||||
@ -2153,7 +2157,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
||||
/* make sure object fit at this offset */
|
||||
eaddr = saddr + size - 1;
|
||||
if (saddr >= eaddr ||
|
||||
(bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
|
||||
(bo && offset + size > amdgpu_bo_size(bo)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate all the needed memory */
|
||||
@ -2161,7 +2165,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
||||
if (!mapping)
|
||||
return -ENOMEM;
|
||||
|
||||
r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size);
|
||||
r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
|
||||
if (r) {
|
||||
kfree(mapping);
|
||||
return r;
|
||||
@ -2201,7 +2205,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
||||
uint64_t saddr)
|
||||
{
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
struct amdgpu_vm *vm = bo_va->vm;
|
||||
struct amdgpu_vm *vm = bo_va->base.vm;
|
||||
bool valid = true;
|
||||
|
||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
@ -2349,12 +2353,12 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va)
|
||||
{
|
||||
struct amdgpu_bo_va_mapping *mapping, *next;
|
||||
struct amdgpu_vm *vm = bo_va->vm;
|
||||
struct amdgpu_vm *vm = bo_va->base.vm;
|
||||
|
||||
list_del(&bo_va->bo_list);
|
||||
list_del(&bo_va->base.bo_list);
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
list_del(&bo_va->vm_status);
|
||||
list_del(&bo_va->base.vm_status);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
|
||||
@ -2386,13 +2390,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct amdgpu_vm_bo_base *bo_base;
|
||||
|
||||
list_for_each_entry(bo_va, &bo->va, bo_list) {
|
||||
spin_lock(&bo_va->vm->status_lock);
|
||||
if (list_empty(&bo_va->vm_status))
|
||||
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
|
||||
spin_unlock(&bo_va->vm->status_lock);
|
||||
list_for_each_entry(bo_base, &bo->va, bo_list) {
|
||||
spin_lock(&bo_base->vm->status_lock);
|
||||
if (list_empty(&bo_base->vm_status))
|
||||
list_add(&bo_base->vm_status,
|
||||
&bo_base->vm->invalidated);
|
||||
spin_unlock(&bo_base->vm->status_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -99,6 +99,18 @@ struct amdgpu_bo_list_entry;
|
||||
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
|
||||
#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
|
||||
|
||||
/* base structure for tracking BO usage in a VM */
|
||||
struct amdgpu_vm_bo_base {
|
||||
/* constant after initialization */
|
||||
struct amdgpu_vm *vm;
|
||||
struct amdgpu_bo *bo;
|
||||
|
||||
/* protected by bo being reserved */
|
||||
struct list_head bo_list;
|
||||
|
||||
/* protected by spinlock */
|
||||
struct list_head vm_status;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_pt {
|
||||
struct amdgpu_bo *bo;
|
||||
|
Loading…
Reference in New Issue
Block a user