mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 20:53:53 +08:00
drm/nouveau/vm: reduce number of entry-points to vm_map()
Pretty much everywhere had to make the decision which to use, so it makes a lot more sense to just have one entrypoint decide the path to take instead. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
2510538fa0
commit
2e2cfbe61b
@ -131,9 +131,5 @@ void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
|
||||
void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
|
||||
void nouveau_vm_unmap(struct nouveau_vma *);
|
||||
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
|
||||
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
|
||||
struct nouveau_mem *);
|
||||
void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
struct nouveau_mem *mem);
|
||||
|
||||
#endif
|
||||
|
@ -72,13 +72,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
|
||||
vmm->flush(vm);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
|
||||
{
|
||||
nouveau_vm_map_at(vma, 0, node);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
struct nouveau_mem *mem)
|
||||
{
|
||||
@ -136,7 +130,7 @@ finish:
|
||||
vmm->flush(vm);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
struct nouveau_mem *mem)
|
||||
{
|
||||
@ -174,6 +168,18 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
vmm->flush(vm);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
|
||||
{
|
||||
if (node->sg)
|
||||
nouveau_vm_map_sg_table(vma, 0, node->size << 12, node);
|
||||
else
|
||||
if (node->pages)
|
||||
nouveau_vm_map_sg(vma, 0, node->size << 12, node);
|
||||
else
|
||||
nouveau_vm_map_at(vma, 0, node);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
|
||||
{
|
||||
|
@ -965,11 +965,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (mem->mem_type == TTM_PL_VRAM)
|
||||
nouveau_vm_map(vma, node);
|
||||
else
|
||||
nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
|
||||
|
||||
nouveau_vm_map(vma, node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1147,19 +1143,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
|
||||
return;
|
||||
|
||||
list_for_each_entry(vma, &nvbo->vma_list, head) {
|
||||
if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
|
||||
if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
|
||||
(new_mem->mem_type == TTM_PL_VRAM ||
|
||||
nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
|
||||
nouveau_vm_map(vma, new_mem->mm_node);
|
||||
} else
|
||||
if (new_mem && new_mem->mem_type == TTM_PL_TT &&
|
||||
nvbo->page_shift == vma->vm->vmm->spg_shift) {
|
||||
if (((struct nouveau_mem *)new_mem->mm_node)->sg)
|
||||
nouveau_vm_map_sg_table(vma, 0, new_mem->
|
||||
num_pages << PAGE_SHIFT,
|
||||
new_mem->mm_node);
|
||||
else
|
||||
nouveau_vm_map_sg(vma, 0, new_mem->
|
||||
num_pages << PAGE_SHIFT,
|
||||
new_mem->mm_node);
|
||||
} else {
|
||||
nouveau_vm_unmap(vma);
|
||||
}
|
||||
@ -1535,7 +1522,6 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
|
||||
struct nouveau_vma *vma)
|
||||
{
|
||||
const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
|
||||
struct nouveau_mem *node = nvbo->bo.mem.mm_node;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_vm_get(vm, size, nvbo->page_shift,
|
||||
@ -1543,15 +1529,10 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
|
||||
if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
|
||||
(nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
|
||||
nvbo->page_shift != vma->vm->vmm->lpg_shift))
|
||||
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
|
||||
else if (nvbo->bo.mem.mem_type == TTM_PL_TT &&
|
||||
nvbo->page_shift == vma->vm->vmm->spg_shift) {
|
||||
if (node->sg)
|
||||
nouveau_vm_map_sg_table(vma, 0, size, node);
|
||||
else
|
||||
nouveau_vm_map_sg(vma, 0, size, node);
|
||||
}
|
||||
|
||||
list_add_tail(&vma->head, &nvbo->vma_list);
|
||||
vma->refcount = 1;
|
||||
|
@ -31,16 +31,17 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct nouveau_mem *node = mem->mm_node;
|
||||
u64 size = mem->num_pages << 12;
|
||||
|
||||
if (ttm->sg) {
|
||||
node->sg = ttm->sg;
|
||||
nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
|
||||
node->sg = ttm->sg;
|
||||
node->pages = NULL;
|
||||
} else {
|
||||
node->sg = NULL;
|
||||
node->pages = nvbe->ttm.dma_address;
|
||||
nouveau_vm_map_sg(&node->vma[0], 0, size, node);
|
||||
}
|
||||
node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
|
||||
|
||||
nouveau_vm_map(&node->vma[0], node);
|
||||
nvbe->node = node;
|
||||
return 0;
|
||||
}
|
||||
@ -67,9 +68,13 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
||||
|
||||
/* noop: bound in move_notify() */
|
||||
if (ttm->sg) {
|
||||
node->sg = ttm->sg;
|
||||
} else
|
||||
node->sg = ttm->sg;
|
||||
node->pages = NULL;
|
||||
} else {
|
||||
node->sg = NULL;
|
||||
node->pages = nvbe->ttm.dma_address;
|
||||
}
|
||||
node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -171,6 +171,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
node->page_shift = 12;
|
||||
|
||||
switch (nv_device(drm->device)->card_type) {
|
||||
|
Loading…
Reference in New Issue
Block a user