drm/amdgpu: split VMID management by VMHUB

This way GFX and MM won't fight for VMIDs any more.

Initially disabled since we need to stop flushing all HUBS
at the same time as well.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Andres Rodriguez <andresx7@gmail.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2017-04-06 17:52:39 +02:00 committed by Alex Deucher
parent 4f618e737f
commit 7645670dec
10 changed files with 69 additions and 45 deletions

View File

@ -1854,7 +1854,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
mutex_init(&adev->vm_manager.lock);
atomic_set(&adev->irq.ih.lock, 0);
mutex_init(&adev->firmware.mutex);
mutex_init(&adev->pm.mutex);

View File

@ -217,7 +217,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vm_id)
amdgpu_vm_reset_id(adev, job->vm_id);
amdgpu_vm_reset_id(adev, ring->funcs->vmhub,
job->vm_id);
amdgpu_ring_undo(ring);
return r;
}

View File

@ -406,6 +406,9 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
/* Temporary use only the first VM manager */
unsigned vmhub = 0; /*ring->funcs->vmhub;*/
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
struct dma_fence *updates = sync->last_vm_update;
struct amdgpu_vm_id *id, *idle;
@ -413,16 +416,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
unsigned i;
int r = 0;
fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
GFP_KERNEL);
fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
if (!fences)
return -ENOMEM;
mutex_lock(&adev->vm_manager.lock);
mutex_lock(&id_mgr->lock);
/* Check if we have an idle VMID */
i = 0;
list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
list_for_each_entry(idle, &id_mgr->ids_lru, list) {
fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
if (!fences[i])
break;
@ -430,7 +432,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
}
/* If we can't find a idle VMID to use, wait till one becomes available */
if (&idle->list == &adev->vm_manager.ids_lru) {
if (&idle->list == &id_mgr->ids_lru) {
u64 fence_context = adev->vm_manager.fence_context + ring->idx;
unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
struct dma_fence_array *array;
@ -455,7 +457,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
mutex_unlock(&adev->vm_manager.lock);
mutex_unlock(&id_mgr->lock);
return 0;
}
@ -463,7 +465,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
job->vm_needs_flush = true;
/* Check if we can use a VMID already assigned to this VM */
list_for_each_entry_reverse(id, &adev->vm_manager.ids_lru, list) {
list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
struct dma_fence *flushed;
/* Check all the prerequisites to using this VMID */
@ -495,13 +497,13 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
list_move_tail(&id->list, &id_mgr->ids_lru);
job->vm_id = id - adev->vm_manager.ids;
job->vm_id = id - id_mgr->ids;
job->vm_needs_flush = false;
trace_amdgpu_vm_grab_id(vm, ring->idx, job);
mutex_unlock(&adev->vm_manager.lock);
mutex_unlock(&id_mgr->lock);
return 0;
};
@ -522,14 +524,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
id->pd_gpu_addr = job->vm_pd_addr;
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
list_move_tail(&id->list, &id_mgr->ids_lru);
atomic64_set(&id->owner, vm->client_id);
job->vm_id = id - adev->vm_manager.ids;
job->vm_id = id - id_mgr->ids;
trace_amdgpu_vm_grab_id(vm, ring->idx, job);
error:
mutex_unlock(&adev->vm_manager.lock);
mutex_unlock(&id_mgr->lock);
return r;
}
@ -581,7 +583,9 @@ static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id];
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
id->gds_base != job->gds_base ||
id->gds_size != job->gds_size ||
@ -619,10 +623,10 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
if (r)
return r;
mutex_lock(&adev->vm_manager.lock);
mutex_lock(&id_mgr->lock);
dma_fence_put(id->last_flush);
id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
mutex_unlock(&id_mgr->lock);
}
if (gds_switch_needed) {
@ -657,9 +661,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
*
* Reset saved GDW, GWS and OA to force switch on next flush.
*/
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
unsigned vmid)
{
struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
id->gds_base = 0;
id->gds_size = 0;
@ -2230,16 +2236,21 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
*/
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
unsigned i;
unsigned i, j;
INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vm_id_manager *id_mgr =
&adev->vm_manager.id_mgr[i];
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < adev->vm_manager.num_ids; ++i) {
amdgpu_vm_reset_id(adev, i);
amdgpu_sync_create(&adev->vm_manager.ids[i].active);
list_add_tail(&adev->vm_manager.ids[i].list,
&adev->vm_manager.ids_lru);
mutex_init(&id_mgr->lock);
INIT_LIST_HEAD(&id_mgr->ids_lru);
/* skip over VMID 0, since it is the system VM */
for (j = 1; j < id_mgr->num_ids; ++j) {
amdgpu_vm_reset_id(adev, i, j);
amdgpu_sync_create(&id_mgr->ids[i].active);
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
}
}
adev->vm_manager.fence_context =
@ -2247,6 +2258,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
adev->vm_manager.seqno[i] = 0;
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
atomic64_set(&adev->vm_manager.client_counter, 0);
spin_lock_init(&adev->vm_manager.prt_lock);
@ -2262,13 +2274,19 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
*/
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
unsigned i;
unsigned i, j;
for (i = 0; i < AMDGPU_NUM_VM; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vm_id_manager *id_mgr =
&adev->vm_manager.id_mgr[i];
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
dma_fence_put(id->flushed_updates);
dma_fence_put(id->last_flush);
mutex_destroy(&id_mgr->lock);
for (j = 0; j < AMDGPU_NUM_VM; ++j) {
struct amdgpu_vm_id *id = &id_mgr->ids[j];
amdgpu_sync_free(&id->active);
dma_fence_put(id->flushed_updates);
dma_fence_put(id->last_flush);
}
}
}

View File

@ -146,12 +146,16 @@ struct amdgpu_vm_id {
uint32_t oa_size;
};
struct amdgpu_vm_id_manager {
struct mutex lock;
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
};
struct amdgpu_vm_manager {
/* Handling of VMIDs */
struct mutex lock;
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
struct amdgpu_vm_id_manager id_mgr[AMDGPU_MAX_VMHUBS];
/* Handling of VM fences */
u64 fence_context;
@ -197,7 +201,8 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
unsigned vmid);
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,

View File

@ -1935,7 +1935,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
INDEX_STRIDE, 3);
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.num_ids; i++) {
for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
if (i == 0)
sh_mem_base = 0;
else

View File

@ -3890,7 +3890,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
INDEX_STRIDE, 3);
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.num_ids; i++) {
for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
vi_srbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
if (i == 0) {

View File

@ -621,7 +621,7 @@ static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.num_level = 1;
amdgpu_vm_manager_init(adev);

View File

@ -746,7 +746,7 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.num_level = 1;
amdgpu_vm_manager_init(adev);

View File

@ -949,7 +949,7 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.num_level = 1;
amdgpu_vm_manager_init(adev);

View File

@ -519,7 +519,8 @@ static int gmc_v9_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
/* TODO: fix num_level for APU when updating vm size and block size */
if (adev->flags & AMD_IS_APU)