mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-11 15:14:03 +08:00
drm/nouveau/fifo/gf100-: use new interfaces for vmm operations
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
8c967c5548
commit
01f349fcad
@ -13,7 +13,7 @@ struct gf100_fifo_chan {
|
||||
|
||||
struct {
|
||||
struct nvkm_gpuobj *inst;
|
||||
struct nvkm_vma vma;
|
||||
struct nvkm_vma *vma;
|
||||
} engn[NVKM_SUBDEV_NR];
|
||||
};
|
||||
|
||||
|
@ -14,7 +14,7 @@ struct gk104_fifo_chan {
|
||||
|
||||
struct {
|
||||
struct nvkm_gpuobj *inst;
|
||||
struct nvkm_vma vma;
|
||||
struct nvkm_vma *vma;
|
||||
} engn[NVKM_SUBDEV_NR];
|
||||
};
|
||||
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include <core/enum.h>
|
||||
#include <core/gpuobj.h>
|
||||
#include <subdev/bar.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <engine/sw.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
@ -586,12 +585,12 @@ gf100_fifo_oneinit(struct nvkm_fifo *base)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_vm_get(bar, nvkm_memory_size(fifo->user.mem), 12,
|
||||
NV_MEM_ACCESS_RW, &fifo->user.bar);
|
||||
ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
|
||||
&fifo->user.bar);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return nvkm_memory_map(fifo->user.mem, 0, bar, &fifo->user.bar, NULL, 0);
|
||||
return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -630,7 +629,7 @@ gf100_fifo_init(struct nvkm_fifo *base)
|
||||
}
|
||||
|
||||
nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
|
||||
nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
|
||||
nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
|
||||
|
||||
nvkm_wr32(device, 0x002100, 0xffffffff);
|
||||
nvkm_wr32(device, 0x002140, 0x7fffffff);
|
||||
@ -641,7 +640,8 @@ static void *
|
||||
gf100_fifo_dtor(struct nvkm_fifo *base)
|
||||
{
|
||||
struct gf100_fifo *fifo = gf100_fifo(base);
|
||||
nvkm_vm_put(&fifo->user.bar);
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
|
||||
nvkm_memory_unref(&fifo->user.mem);
|
||||
nvkm_memory_unref(&fifo->runlist.mem[0]);
|
||||
nvkm_memory_unref(&fifo->runlist.mem[1]);
|
||||
|
@ -26,7 +26,7 @@ struct gf100_fifo {
|
||||
|
||||
struct {
|
||||
struct nvkm_memory *mem;
|
||||
struct nvkm_vma bar;
|
||||
struct nvkm_vma *bar;
|
||||
} user;
|
||||
};
|
||||
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include <core/client.h>
|
||||
#include <core/gpuobj.h>
|
||||
#include <subdev/bar.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/top.h>
|
||||
#include <engine/sw.h>
|
||||
@ -836,12 +835,12 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_vm_get(bar, nvkm_memory_size(fifo->user.mem), 12,
|
||||
NV_MEM_ACCESS_RW, &fifo->user.bar);
|
||||
ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
|
||||
&fifo->user.bar);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return nvkm_memory_map(fifo->user.mem, 0, bar, &fifo->user.bar, NULL, 0);
|
||||
return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -867,7 +866,7 @@ gk104_fifo_init(struct nvkm_fifo *base)
|
||||
nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
|
||||
}
|
||||
|
||||
nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
|
||||
nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
|
||||
|
||||
nvkm_wr32(device, 0x002100, 0xffffffff);
|
||||
nvkm_wr32(device, 0x002140, 0x7fffffff);
|
||||
@ -877,9 +876,10 @@ static void *
|
||||
gk104_fifo_dtor(struct nvkm_fifo *base)
|
||||
{
|
||||
struct gk104_fifo *fifo = gk104_fifo(base);
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
int i;
|
||||
|
||||
nvkm_vm_put(&fifo->user.bar);
|
||||
nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
|
||||
nvkm_memory_unref(&fifo->user.mem);
|
||||
|
||||
for (i = 0; i < fifo->runlist_nr; i++) {
|
||||
|
@ -37,7 +37,7 @@ struct gk104_fifo {
|
||||
|
||||
struct {
|
||||
struct nvkm_memory *mem;
|
||||
struct nvkm_vma bar;
|
||||
struct nvkm_vma *bar;
|
||||
} user;
|
||||
};
|
||||
|
||||
|
@ -111,7 +111,7 @@ gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
|
||||
struct nvkm_gpuobj *inst = chan->base.inst;
|
||||
|
||||
if (offset) {
|
||||
u64 addr = chan->engn[engine->subdev.index].vma.offset;
|
||||
u64 addr = chan->engn[engine->subdev.index].vma->addr;
|
||||
nvkm_kmap(inst);
|
||||
nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
|
||||
nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
|
||||
@ -126,11 +126,7 @@ gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
|
||||
struct nvkm_engine *engine)
|
||||
{
|
||||
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
|
||||
struct nvkm_vma *vma = &chan->engn[engine->subdev.index].vma;
|
||||
if (vma->vm) {
|
||||
nvkm_vm_unmap(vma);
|
||||
nvkm_vm_put(vma);
|
||||
}
|
||||
nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
|
||||
nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
|
||||
}
|
||||
|
||||
@ -150,13 +146,13 @@ gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_vm_get(chan->base.vmm, chan->engn[engn].inst->size, 12,
|
||||
NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
|
||||
ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size,
|
||||
&chan->engn[engn].vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm,
|
||||
&chan->engn[engn].vma, NULL, 0);
|
||||
chan->engn[engn].vma, NULL, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -252,7 +248,7 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
|
||||
(1ULL << NVKM_ENGINE_MSPPP) |
|
||||
(1ULL << NVKM_ENGINE_MSVLD) |
|
||||
(1ULL << NVKM_ENGINE_SW),
|
||||
1, fifo->user.bar.offset, 0x1000,
|
||||
1, fifo->user.bar->addr, 0x1000,
|
||||
oclass, &chan->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -117,7 +117,7 @@ gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
|
||||
u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
|
||||
|
||||
if (offset) {
|
||||
u64 addr = chan->engn[engine->subdev.index].vma.offset;
|
||||
u64 addr = chan->engn[engine->subdev.index].vma->addr;
|
||||
u32 datalo = lower_32_bits(addr) | 0x00000004;
|
||||
u32 datahi = upper_32_bits(addr);
|
||||
nvkm_kmap(inst);
|
||||
@ -138,11 +138,7 @@ gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
|
||||
struct nvkm_engine *engine)
|
||||
{
|
||||
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
|
||||
struct nvkm_vma *vma = &chan->engn[engine->subdev.index].vma;
|
||||
if (vma->vm) {
|
||||
nvkm_vm_unmap(vma);
|
||||
nvkm_vm_put(vma);
|
||||
}
|
||||
nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
|
||||
nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
|
||||
}
|
||||
|
||||
@ -162,13 +158,13 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_vm_get(chan->base.vmm, chan->engn[engn].inst->size, 12,
|
||||
NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
|
||||
ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size,
|
||||
&chan->engn[engn].vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm,
|
||||
&chan->engn[engn].vma, NULL, 0);
|
||||
chan->engn[engn].vma, NULL, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -291,7 +287,7 @@ gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
|
||||
|
||||
ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
|
||||
0x1000, 0x1000, true, vm, 0, subdevs,
|
||||
1, fifo->user.bar.offset, 0x200,
|
||||
1, fifo->user.bar->addr, 0x200,
|
||||
oclass, &chan->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
Loading…
Reference in New Issue
Block a user