mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-18 16:44:27 +08:00
drm/amdgpu: add spin lock to protect freed list in vm (v2)
there is a protection fault about freed list when OCL test. add a spin lock to protect it. v2: drop changes in vm_fini Signed-off-by: JimQu <jim.qu@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>
This commit is contained in:
parent
eb64526f5a
commit
9c4153b1ee
@ -955,6 +955,8 @@ struct amdgpu_vm {
|
|||||||
struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
|
struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
|
||||||
/* for interval tree */
|
/* for interval tree */
|
||||||
spinlock_t it_lock;
|
spinlock_t it_lock;
|
||||||
|
/* protecting freed */
|
||||||
|
spinlock_t freed_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_vm_manager {
|
struct amdgpu_vm_manager {
|
||||||
|
@ -885,17 +885,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
|||||||
struct amdgpu_bo_va_mapping *mapping;
|
struct amdgpu_bo_va_mapping *mapping;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
spin_lock(&vm->freed_lock);
|
||||||
while (!list_empty(&vm->freed)) {
|
while (!list_empty(&vm->freed)) {
|
||||||
mapping = list_first_entry(&vm->freed,
|
mapping = list_first_entry(&vm->freed,
|
||||||
struct amdgpu_bo_va_mapping, list);
|
struct amdgpu_bo_va_mapping, list);
|
||||||
list_del(&mapping->list);
|
list_del(&mapping->list);
|
||||||
|
spin_unlock(&vm->freed_lock);
|
||||||
r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
|
r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
|
||||||
kfree(mapping);
|
kfree(mapping);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
spin_lock(&vm->freed_lock);
|
||||||
}
|
}
|
||||||
|
spin_unlock(&vm->freed_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -1150,10 +1154,13 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|||||||
spin_unlock(&vm->it_lock);
|
spin_unlock(&vm->it_lock);
|
||||||
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
||||||
|
|
||||||
if (valid)
|
if (valid) {
|
||||||
|
spin_lock(&vm->freed_lock);
|
||||||
list_add(&mapping->list, &vm->freed);
|
list_add(&mapping->list, &vm->freed);
|
||||||
else
|
spin_unlock(&vm->freed_lock);
|
||||||
|
} else {
|
||||||
kfree(mapping);
|
kfree(mapping);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1186,7 +1193,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
|||||||
interval_tree_remove(&mapping->it, &vm->va);
|
interval_tree_remove(&mapping->it, &vm->va);
|
||||||
spin_unlock(&vm->it_lock);
|
spin_unlock(&vm->it_lock);
|
||||||
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
||||||
|
spin_lock(&vm->freed_lock);
|
||||||
list_add(&mapping->list, &vm->freed);
|
list_add(&mapping->list, &vm->freed);
|
||||||
|
spin_unlock(&vm->freed_lock);
|
||||||
}
|
}
|
||||||
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
|
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
|
||||||
list_del(&mapping->list);
|
list_del(&mapping->list);
|
||||||
@ -1247,6 +1256,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||||||
INIT_LIST_HEAD(&vm->cleared);
|
INIT_LIST_HEAD(&vm->cleared);
|
||||||
INIT_LIST_HEAD(&vm->freed);
|
INIT_LIST_HEAD(&vm->freed);
|
||||||
spin_lock_init(&vm->it_lock);
|
spin_lock_init(&vm->it_lock);
|
||||||
|
spin_lock_init(&vm->freed_lock);
|
||||||
pd_size = amdgpu_vm_directory_size(adev);
|
pd_size = amdgpu_vm_directory_size(adev);
|
||||||
pd_entries = amdgpu_vm_num_pdes(adev);
|
pd_entries = amdgpu_vm_num_pdes(adev);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user