mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-20 02:34:23 +08:00
drm/nouveau/dma: audit and version NV_DMA classes
The full object interfaces are about to be exposed to userspace, so we need to check for any security-related issues and version the structs to make it easier to handle any changes we may need in the future. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
b2c817031b
commit
4acfd707e2
@ -23,9 +23,12 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <core/object.h>
|
#include <core/object.h>
|
||||||
#include <core/class.h>
|
#include <core/client.h>
|
||||||
|
#include <nvif/unpack.h>
|
||||||
|
#include <nvif/class.h>
|
||||||
|
|
||||||
#include <subdev/fb.h>
|
#include <subdev/fb.h>
|
||||||
|
#include <subdev/instmem.h>
|
||||||
|
|
||||||
#include "priv.h"
|
#include "priv.h"
|
||||||
|
|
||||||
@ -57,57 +60,87 @@ nvkm_dmaobj_create_(struct nouveau_object *parent,
|
|||||||
struct nouveau_oclass *oclass, void **pdata, u32 *psize,
|
struct nouveau_oclass *oclass, void **pdata, u32 *psize,
|
||||||
int length, void **pobject)
|
int length, void **pobject)
|
||||||
{
|
{
|
||||||
struct nv_dma_class *args = *pdata;
|
union {
|
||||||
|
struct nv_dma_v0 v0;
|
||||||
|
} *args = *pdata;
|
||||||
|
struct nouveau_instmem *instmem = nouveau_instmem(parent);
|
||||||
|
struct nouveau_client *client = nouveau_client(parent);
|
||||||
|
struct nouveau_device *device = nv_device(parent);
|
||||||
|
struct nouveau_fb *pfb = nouveau_fb(parent);
|
||||||
struct nouveau_dmaobj *dmaobj;
|
struct nouveau_dmaobj *dmaobj;
|
||||||
|
void *data = *pdata;
|
||||||
|
u32 size = *psize;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (*psize < sizeof(*args))
|
|
||||||
return -EINVAL;
|
|
||||||
*pdata = &args->conf0;
|
|
||||||
|
|
||||||
ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject);
|
ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject);
|
||||||
dmaobj = *pobject;
|
dmaobj = *pobject;
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
switch (args->flags & NV_DMA_TARGET_MASK) {
|
nv_ioctl(parent, "create dma size %d\n", *psize);
|
||||||
case NV_DMA_TARGET_VM:
|
if (nvif_unpack(args->v0, 0, 0, true)) {
|
||||||
|
nv_ioctl(parent, "create dma vers %d target %d access %d "
|
||||||
|
"start %016llx limit %016llx\n",
|
||||||
|
args->v0.version, args->v0.target, args->v0.access,
|
||||||
|
args->v0.start, args->v0.limit);
|
||||||
|
dmaobj->target = args->v0.target;
|
||||||
|
dmaobj->access = args->v0.access;
|
||||||
|
dmaobj->start = args->v0.start;
|
||||||
|
dmaobj->limit = args->v0.limit;
|
||||||
|
} else
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
*pdata = data;
|
||||||
|
*psize = size;
|
||||||
|
|
||||||
|
if (dmaobj->start > dmaobj->limit)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
switch (dmaobj->target) {
|
||||||
|
case NV_DMA_V0_TARGET_VM:
|
||||||
dmaobj->target = NV_MEM_TARGET_VM;
|
dmaobj->target = NV_MEM_TARGET_VM;
|
||||||
break;
|
break;
|
||||||
case NV_DMA_TARGET_VRAM:
|
case NV_DMA_V0_TARGET_VRAM:
|
||||||
|
if (!client->super) {
|
||||||
|
if (dmaobj->limit >= pfb->ram->size - instmem->reserved)
|
||||||
|
return -EACCES;
|
||||||
|
if (device->card_type >= NV_50)
|
||||||
|
return -EACCES;
|
||||||
|
}
|
||||||
dmaobj->target = NV_MEM_TARGET_VRAM;
|
dmaobj->target = NV_MEM_TARGET_VRAM;
|
||||||
break;
|
break;
|
||||||
case NV_DMA_TARGET_PCI:
|
case NV_DMA_V0_TARGET_PCI:
|
||||||
|
if (!client->super)
|
||||||
|
return -EACCES;
|
||||||
dmaobj->target = NV_MEM_TARGET_PCI;
|
dmaobj->target = NV_MEM_TARGET_PCI;
|
||||||
break;
|
break;
|
||||||
case NV_DMA_TARGET_PCI_US:
|
case NV_DMA_V0_TARGET_PCI_US:
|
||||||
case NV_DMA_TARGET_AGP:
|
case NV_DMA_V0_TARGET_AGP:
|
||||||
|
if (!client->super)
|
||||||
|
return -EACCES;
|
||||||
dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
|
dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (args->flags & NV_DMA_ACCESS_MASK) {
|
switch (dmaobj->access) {
|
||||||
case NV_DMA_ACCESS_VM:
|
case NV_DMA_V0_ACCESS_VM:
|
||||||
dmaobj->access = NV_MEM_ACCESS_VM;
|
dmaobj->access = NV_MEM_ACCESS_VM;
|
||||||
break;
|
break;
|
||||||
case NV_DMA_ACCESS_RD:
|
case NV_DMA_V0_ACCESS_RD:
|
||||||
dmaobj->access = NV_MEM_ACCESS_RO;
|
dmaobj->access = NV_MEM_ACCESS_RO;
|
||||||
break;
|
break;
|
||||||
case NV_DMA_ACCESS_WR:
|
case NV_DMA_V0_ACCESS_WR:
|
||||||
dmaobj->access = NV_MEM_ACCESS_WO;
|
dmaobj->access = NV_MEM_ACCESS_WO;
|
||||||
break;
|
break;
|
||||||
case NV_DMA_ACCESS_RDWR:
|
case NV_DMA_V0_ACCESS_RDWR:
|
||||||
dmaobj->access = NV_MEM_ACCESS_RW;
|
dmaobj->access = NV_MEM_ACCESS_RW;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
dmaobj->start = args->start;
|
|
||||||
dmaobj->limit = args->limit;
|
|
||||||
dmaobj->conf0 = args->conf0;
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
|
|
||||||
#include <core/gpuobj.h>
|
#include <core/gpuobj.h>
|
||||||
#include <core/class.h>
|
#include <core/class.h>
|
||||||
|
#include <nvif/class.h>
|
||||||
|
|
||||||
#include <subdev/fb.h>
|
#include <subdev/fb.h>
|
||||||
#include <subdev/vm/nv04.h>
|
#include <subdev/vm/nv04.h>
|
||||||
@ -94,7 +95,7 @@ nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||||||
|
|
||||||
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
|
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
|
||||||
*pobject = nv_object(priv);
|
*pobject = nv_object(priv);
|
||||||
if (ret)
|
if (ret || (ret = -ENOSYS, size))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (priv->base.target == NV_MEM_TARGET_VM) {
|
if (priv->base.target == NV_MEM_TARGET_VM) {
|
||||||
@ -145,9 +146,9 @@ nv04_dmaobj_ofuncs = {
|
|||||||
|
|
||||||
static struct nouveau_oclass
|
static struct nouveau_oclass
|
||||||
nv04_dmaeng_sclass[] = {
|
nv04_dmaeng_sclass[] = {
|
||||||
{ NV_DMA_FROM_MEMORY_CLASS, &nv04_dmaobj_ofuncs },
|
{ NV_DMA_FROM_MEMORY, &nv04_dmaobj_ofuncs },
|
||||||
{ NV_DMA_TO_MEMORY_CLASS, &nv04_dmaobj_ofuncs },
|
{ NV_DMA_TO_MEMORY, &nv04_dmaobj_ofuncs },
|
||||||
{ NV_DMA_IN_MEMORY_CLASS, &nv04_dmaobj_ofuncs },
|
{ NV_DMA_IN_MEMORY, &nv04_dmaobj_ofuncs },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -22,8 +22,11 @@
|
|||||||
* Authors: Ben Skeggs
|
* Authors: Ben Skeggs
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <core/client.h>
|
||||||
#include <core/gpuobj.h>
|
#include <core/gpuobj.h>
|
||||||
#include <core/class.h>
|
#include <core/class.h>
|
||||||
|
#include <nvif/unpack.h>
|
||||||
|
#include <nvif/class.h>
|
||||||
|
|
||||||
#include <subdev/fb.h>
|
#include <subdev/fb.h>
|
||||||
|
|
||||||
@ -90,10 +93,11 @@ nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||||||
struct nouveau_object **pobject)
|
struct nouveau_object **pobject)
|
||||||
{
|
{
|
||||||
struct nouveau_dmaeng *dmaeng = (void *)engine;
|
struct nouveau_dmaeng *dmaeng = (void *)engine;
|
||||||
struct nv50_dmaobj_priv *priv;
|
|
||||||
union {
|
union {
|
||||||
u32 conf0;
|
struct nv50_dma_v0 v0;
|
||||||
} *args;
|
} *args;
|
||||||
|
struct nv50_dmaobj_priv *priv;
|
||||||
|
u32 user, part, comp, kind;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
|
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
|
||||||
@ -102,24 +106,36 @@ nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||||||
return ret;
|
return ret;
|
||||||
args = data;
|
args = data;
|
||||||
|
|
||||||
if (!(args->conf0 & NV50_DMA_CONF0_ENABLE)) {
|
nv_ioctl(parent, "create nv50 dma size %d\n", size);
|
||||||
if (priv->base.target == NV_MEM_TARGET_VM) {
|
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||||
args->conf0 = NV50_DMA_CONF0_PRIV_VM;
|
nv_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
|
||||||
args->conf0 |= NV50_DMA_CONF0_PART_VM;
|
"comp %d kind %02x\n", args->v0.version,
|
||||||
args->conf0 |= NV50_DMA_CONF0_COMP_VM;
|
args->v0.priv, args->v0.part, args->v0.comp,
|
||||||
args->conf0 |= NV50_DMA_CONF0_TYPE_VM;
|
args->v0.kind);
|
||||||
|
user = args->v0.priv;
|
||||||
|
part = args->v0.part;
|
||||||
|
comp = args->v0.comp;
|
||||||
|
kind = args->v0.kind;
|
||||||
|
} else
|
||||||
|
if (size == 0) {
|
||||||
|
if (priv->base.target != NV_MEM_TARGET_VM) {
|
||||||
|
user = NV50_DMA_V0_PRIV_US;
|
||||||
|
part = NV50_DMA_V0_PART_256;
|
||||||
|
comp = NV50_DMA_V0_COMP_NONE;
|
||||||
|
kind = NV50_DMA_V0_KIND_PITCH;
|
||||||
} else {
|
} else {
|
||||||
args->conf0 = NV50_DMA_CONF0_PRIV_US;
|
user = NV50_DMA_V0_PRIV_VM;
|
||||||
args->conf0 |= NV50_DMA_CONF0_PART_256;
|
part = NV50_DMA_V0_PART_VM;
|
||||||
args->conf0 |= NV50_DMA_CONF0_COMP_NONE;
|
comp = NV50_DMA_V0_COMP_VM;
|
||||||
args->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR;
|
kind = NV50_DMA_V0_KIND_VM;
|
||||||
}
|
}
|
||||||
}
|
} else
|
||||||
|
return ret;
|
||||||
|
|
||||||
priv->flags0 |= (args->conf0 & NV50_DMA_CONF0_COMP) << 22;
|
if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
|
||||||
priv->flags0 |= (args->conf0 & NV50_DMA_CONF0_TYPE) << 22;
|
return -EINVAL;
|
||||||
priv->flags0 |= (args->conf0 & NV50_DMA_CONF0_PRIV);
|
priv->flags0 = (comp << 29) | (kind << 22) | (user << 20);
|
||||||
priv->flags5 |= (args->conf0 & NV50_DMA_CONF0_PART);
|
priv->flags5 = (part << 16);
|
||||||
|
|
||||||
switch (priv->base.target) {
|
switch (priv->base.target) {
|
||||||
case NV_MEM_TARGET_VM:
|
case NV_MEM_TARGET_VM:
|
||||||
@ -165,9 +181,9 @@ nv50_dmaobj_ofuncs = {
|
|||||||
|
|
||||||
static struct nouveau_oclass
|
static struct nouveau_oclass
|
||||||
nv50_dmaeng_sclass[] = {
|
nv50_dmaeng_sclass[] = {
|
||||||
{ NV_DMA_FROM_MEMORY_CLASS, &nv50_dmaobj_ofuncs },
|
{ NV_DMA_FROM_MEMORY, &nv50_dmaobj_ofuncs },
|
||||||
{ NV_DMA_TO_MEMORY_CLASS, &nv50_dmaobj_ofuncs },
|
{ NV_DMA_TO_MEMORY, &nv50_dmaobj_ofuncs },
|
||||||
{ NV_DMA_IN_MEMORY_CLASS, &nv50_dmaobj_ofuncs },
|
{ NV_DMA_IN_MEMORY, &nv50_dmaobj_ofuncs },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -22,9 +22,12 @@
|
|||||||
* Authors: Ben Skeggs
|
* Authors: Ben Skeggs
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <core/client.h>
|
||||||
#include <core/device.h>
|
#include <core/device.h>
|
||||||
#include <core/gpuobj.h>
|
#include <core/gpuobj.h>
|
||||||
#include <core/class.h>
|
#include <core/class.h>
|
||||||
|
#include <nvif/unpack.h>
|
||||||
|
#include <nvif/class.h>
|
||||||
|
|
||||||
#include <subdev/fb.h>
|
#include <subdev/fb.h>
|
||||||
|
|
||||||
@ -76,10 +79,11 @@ nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||||||
struct nouveau_object **pobject)
|
struct nouveau_object **pobject)
|
||||||
{
|
{
|
||||||
struct nouveau_dmaeng *dmaeng = (void *)engine;
|
struct nouveau_dmaeng *dmaeng = (void *)engine;
|
||||||
struct nvc0_dmaobj_priv *priv;
|
|
||||||
union {
|
union {
|
||||||
u32 conf0;
|
struct gf100_dma_v0 v0;
|
||||||
} *args;
|
} *args;
|
||||||
|
struct nvc0_dmaobj_priv *priv;
|
||||||
|
u32 kind, user, unkn;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
|
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
|
||||||
@ -88,20 +92,31 @@ nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||||||
return ret;
|
return ret;
|
||||||
args = data;
|
args = data;
|
||||||
|
|
||||||
if (!(args->conf0 & NVC0_DMA_CONF0_ENABLE)) {
|
nv_ioctl(parent, "create gf100 dma size %d\n", size);
|
||||||
if (priv->base.target == NV_MEM_TARGET_VM) {
|
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||||
args->conf0 = NVC0_DMA_CONF0_PRIV_VM;
|
nv_ioctl(parent, "create gf100 dma vers %d priv %d kind %02x\n",
|
||||||
args->conf0 |= NVC0_DMA_CONF0_TYPE_VM;
|
args->v0.version, args->v0.priv, args->v0.kind);
|
||||||
|
kind = args->v0.kind;
|
||||||
|
user = args->v0.priv;
|
||||||
|
unkn = 0;
|
||||||
|
} else
|
||||||
|
if (size == 0) {
|
||||||
|
if (priv->base.target != NV_MEM_TARGET_VM) {
|
||||||
|
kind = GF100_DMA_V0_KIND_PITCH;
|
||||||
|
user = GF100_DMA_V0_PRIV_US;
|
||||||
|
unkn = 2;
|
||||||
} else {
|
} else {
|
||||||
args->conf0 = NVC0_DMA_CONF0_PRIV_US;
|
kind = GF100_DMA_V0_KIND_VM;
|
||||||
args->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR;
|
user = GF100_DMA_V0_PRIV_VM;
|
||||||
args->conf0 |= 0x00020000;
|
unkn = 0;
|
||||||
}
|
}
|
||||||
}
|
} else
|
||||||
|
return ret;
|
||||||
|
|
||||||
priv->flags0 |= (args->conf0 & NVC0_DMA_CONF0_TYPE) << 22;
|
if (user > 2)
|
||||||
priv->flags0 |= (args->conf0 & NVC0_DMA_CONF0_PRIV);
|
return -EINVAL;
|
||||||
priv->flags5 |= (args->conf0 & NVC0_DMA_CONF0_UNKN);
|
priv->flags0 |= (kind << 22) | (user << 20);
|
||||||
|
priv->flags5 |= (unkn << 16);
|
||||||
|
|
||||||
switch (priv->base.target) {
|
switch (priv->base.target) {
|
||||||
case NV_MEM_TARGET_VM:
|
case NV_MEM_TARGET_VM:
|
||||||
@ -145,9 +160,9 @@ nvc0_dmaobj_ofuncs = {
|
|||||||
|
|
||||||
static struct nouveau_oclass
|
static struct nouveau_oclass
|
||||||
nvc0_dmaeng_sclass[] = {
|
nvc0_dmaeng_sclass[] = {
|
||||||
{ NV_DMA_FROM_MEMORY_CLASS, &nvc0_dmaobj_ofuncs },
|
{ NV_DMA_FROM_MEMORY, &nvc0_dmaobj_ofuncs },
|
||||||
{ NV_DMA_TO_MEMORY_CLASS, &nvc0_dmaobj_ofuncs },
|
{ NV_DMA_TO_MEMORY, &nvc0_dmaobj_ofuncs },
|
||||||
{ NV_DMA_IN_MEMORY_CLASS, &nvc0_dmaobj_ofuncs },
|
{ NV_DMA_IN_MEMORY, &nvc0_dmaobj_ofuncs },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -22,9 +22,12 @@
|
|||||||
* Authors: Ben Skeggs
|
* Authors: Ben Skeggs
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <core/client.h>
|
||||||
#include <core/device.h>
|
#include <core/device.h>
|
||||||
#include <core/gpuobj.h>
|
#include <core/gpuobj.h>
|
||||||
#include <core/class.h>
|
#include <core/class.h>
|
||||||
|
#include <nvif/unpack.h>
|
||||||
|
#include <nvif/class.h>
|
||||||
|
|
||||||
#include <subdev/fb.h>
|
#include <subdev/fb.h>
|
||||||
|
|
||||||
@ -83,10 +86,11 @@ nvd0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||||||
struct nouveau_object **pobject)
|
struct nouveau_object **pobject)
|
||||||
{
|
{
|
||||||
struct nouveau_dmaeng *dmaeng = (void *)engine;
|
struct nouveau_dmaeng *dmaeng = (void *)engine;
|
||||||
struct nvd0_dmaobj_priv *priv;
|
|
||||||
union {
|
union {
|
||||||
u32 conf0;
|
struct gf110_dma_v0 v0;
|
||||||
} *args;
|
} *args;
|
||||||
|
struct nvd0_dmaobj_priv *priv;
|
||||||
|
u32 kind, page;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
|
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
|
||||||
@ -95,18 +99,27 @@ nvd0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||||||
return ret;
|
return ret;
|
||||||
args = data;
|
args = data;
|
||||||
|
|
||||||
if (!(args->conf0 & NVD0_DMA_CONF0_ENABLE)) {
|
nv_ioctl(parent, "create gf110 dma size %d\n", size);
|
||||||
if (priv->base.target == NV_MEM_TARGET_VM) {
|
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||||
args->conf0 |= NVD0_DMA_CONF0_TYPE_VM;
|
nv_ioctl(parent, "create gf100 dma vers %d page %d kind %02x\n",
|
||||||
args->conf0 |= NVD0_DMA_CONF0_PAGE_LP;
|
args->v0.version, args->v0.page, args->v0.kind);
|
||||||
|
kind = args->v0.kind;
|
||||||
|
page = args->v0.page;
|
||||||
|
} else
|
||||||
|
if (size == 0) {
|
||||||
|
if (priv->base.target != NV_MEM_TARGET_VM) {
|
||||||
|
kind = GF110_DMA_V0_KIND_PITCH;
|
||||||
|
page = GF110_DMA_V0_PAGE_SP;
|
||||||
} else {
|
} else {
|
||||||
args->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR;
|
kind = GF110_DMA_V0_KIND_VM;
|
||||||
args->conf0 |= NVD0_DMA_CONF0_PAGE_SP;
|
page = GF110_DMA_V0_PAGE_LP;
|
||||||
}
|
}
|
||||||
}
|
} else
|
||||||
|
return ret;
|
||||||
|
|
||||||
priv->flags0 |= (args->conf0 & NVD0_DMA_CONF0_TYPE) << 20;
|
if (page > 1)
|
||||||
priv->flags0 |= (args->conf0 & NVD0_DMA_CONF0_PAGE) >> 4;
|
return -EINVAL;
|
||||||
|
priv->flags0 = (kind << 20) | (page << 6);
|
||||||
|
|
||||||
switch (priv->base.target) {
|
switch (priv->base.target) {
|
||||||
case NV_MEM_TARGET_VRAM:
|
case NV_MEM_TARGET_VRAM:
|
||||||
@ -138,9 +151,9 @@ nvd0_dmaobj_ofuncs = {
|
|||||||
|
|
||||||
static struct nouveau_oclass
|
static struct nouveau_oclass
|
||||||
nvd0_dmaeng_sclass[] = {
|
nvd0_dmaeng_sclass[] = {
|
||||||
{ NV_DMA_FROM_MEMORY_CLASS, &nvd0_dmaobj_ofuncs },
|
{ NV_DMA_FROM_MEMORY, &nvd0_dmaobj_ofuncs },
|
||||||
{ NV_DMA_TO_MEMORY_CLASS, &nvd0_dmaobj_ofuncs },
|
{ NV_DMA_TO_MEMORY, &nvd0_dmaobj_ofuncs },
|
||||||
{ NV_DMA_IN_MEMORY_CLASS, &nvd0_dmaobj_ofuncs },
|
{ NV_DMA_IN_MEMORY, &nvd0_dmaobj_ofuncs },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -76,8 +76,8 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
|
|||||||
|
|
||||||
dmaeng = (void *)chan->pushdma->base.engine;
|
dmaeng = (void *)chan->pushdma->base.engine;
|
||||||
switch (chan->pushdma->base.oclass->handle) {
|
switch (chan->pushdma->base.oclass->handle) {
|
||||||
case NV_DMA_FROM_MEMORY_CLASS:
|
case NV_DMA_FROM_MEMORY:
|
||||||
case NV_DMA_IN_MEMORY_CLASS:
|
case NV_DMA_IN_MEMORY:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -3,73 +3,6 @@
|
|||||||
|
|
||||||
#include <nvif/class.h>
|
#include <nvif/class.h>
|
||||||
|
|
||||||
/* DMA object classes
|
|
||||||
*
|
|
||||||
* 0002: NV_DMA_FROM_MEMORY
|
|
||||||
* 0003: NV_DMA_TO_MEMORY
|
|
||||||
* 003d: NV_DMA_IN_MEMORY
|
|
||||||
*/
|
|
||||||
#define NV_DMA_FROM_MEMORY_CLASS 0x00000002
|
|
||||||
#define NV_DMA_TO_MEMORY_CLASS 0x00000003
|
|
||||||
#define NV_DMA_IN_MEMORY_CLASS 0x0000003d
|
|
||||||
|
|
||||||
#define NV_DMA_TARGET_MASK 0x000000ff
|
|
||||||
#define NV_DMA_TARGET_VM 0x00000000
|
|
||||||
#define NV_DMA_TARGET_VRAM 0x00000001
|
|
||||||
#define NV_DMA_TARGET_PCI 0x00000002
|
|
||||||
#define NV_DMA_TARGET_PCI_US 0x00000003
|
|
||||||
#define NV_DMA_TARGET_AGP 0x00000004
|
|
||||||
#define NV_DMA_ACCESS_MASK 0x00000f00
|
|
||||||
#define NV_DMA_ACCESS_VM 0x00000000
|
|
||||||
#define NV_DMA_ACCESS_RD 0x00000100
|
|
||||||
#define NV_DMA_ACCESS_WR 0x00000200
|
|
||||||
#define NV_DMA_ACCESS_RDWR 0x00000300
|
|
||||||
|
|
||||||
/* NV50:NVC0 */
|
|
||||||
#define NV50_DMA_CONF0_ENABLE 0x80000000
|
|
||||||
#define NV50_DMA_CONF0_PRIV 0x00300000
|
|
||||||
#define NV50_DMA_CONF0_PRIV_VM 0x00000000
|
|
||||||
#define NV50_DMA_CONF0_PRIV_US 0x00100000
|
|
||||||
#define NV50_DMA_CONF0_PRIV__S 0x00200000
|
|
||||||
#define NV50_DMA_CONF0_PART 0x00030000
|
|
||||||
#define NV50_DMA_CONF0_PART_VM 0x00000000
|
|
||||||
#define NV50_DMA_CONF0_PART_256 0x00010000
|
|
||||||
#define NV50_DMA_CONF0_PART_1KB 0x00020000
|
|
||||||
#define NV50_DMA_CONF0_COMP 0x00000180
|
|
||||||
#define NV50_DMA_CONF0_COMP_NONE 0x00000000
|
|
||||||
#define NV50_DMA_CONF0_COMP_VM 0x00000180
|
|
||||||
#define NV50_DMA_CONF0_TYPE 0x0000007f
|
|
||||||
#define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000
|
|
||||||
#define NV50_DMA_CONF0_TYPE_VM 0x0000007f
|
|
||||||
|
|
||||||
/* NVC0:NVD9 */
|
|
||||||
#define NVC0_DMA_CONF0_ENABLE 0x80000000
|
|
||||||
#define NVC0_DMA_CONF0_PRIV 0x00300000
|
|
||||||
#define NVC0_DMA_CONF0_PRIV_VM 0x00000000
|
|
||||||
#define NVC0_DMA_CONF0_PRIV_US 0x00100000
|
|
||||||
#define NVC0_DMA_CONF0_PRIV__S 0x00200000
|
|
||||||
#define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000
|
|
||||||
#define NVC0_DMA_CONF0_TYPE 0x000000ff
|
|
||||||
#define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000
|
|
||||||
#define NVC0_DMA_CONF0_TYPE_VM 0x000000ff
|
|
||||||
|
|
||||||
/* NVD9- */
|
|
||||||
#define NVD0_DMA_CONF0_ENABLE 0x80000000
|
|
||||||
#define NVD0_DMA_CONF0_PAGE 0x00000400
|
|
||||||
#define NVD0_DMA_CONF0_PAGE_LP 0x00000000
|
|
||||||
#define NVD0_DMA_CONF0_PAGE_SP 0x00000400
|
|
||||||
#define NVD0_DMA_CONF0_TYPE 0x000000ff
|
|
||||||
#define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000
|
|
||||||
#define NVD0_DMA_CONF0_TYPE_VM 0x000000ff
|
|
||||||
|
|
||||||
struct nv_dma_class {
|
|
||||||
u32 flags;
|
|
||||||
u32 pad0;
|
|
||||||
u64 start;
|
|
||||||
u64 limit;
|
|
||||||
u32 conf0;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Perfmon counter class
|
/* Perfmon counter class
|
||||||
*
|
*
|
||||||
* XXXX: NV_PERFCTR
|
* XXXX: NV_PERFCTR
|
||||||
|
@ -12,7 +12,6 @@ struct nouveau_dmaobj {
|
|||||||
u32 access;
|
u32 access;
|
||||||
u64 start;
|
u64 start;
|
||||||
u64 limit;
|
u64 limit;
|
||||||
u32 conf0;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nouveau_dmaeng {
|
struct nouveau_dmaeng {
|
||||||
|
@ -413,7 +413,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
|
|||||||
struct {
|
struct {
|
||||||
struct nvif_ioctl_v0 ioctl;
|
struct nvif_ioctl_v0 ioctl;
|
||||||
struct nvif_ioctl_new_v0 new;
|
struct nvif_ioctl_new_v0 new;
|
||||||
struct nv_dma_class ctxdma;
|
struct nv_dma_v0 ctxdma;
|
||||||
} args = {
|
} args = {
|
||||||
.ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
|
.ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
|
||||||
.ioctl.type = NVIF_IOCTL_V0_NEW,
|
.ioctl.type = NVIF_IOCTL_V0_NEW,
|
||||||
@ -423,7 +423,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
|
|||||||
.ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel),
|
.ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel),
|
||||||
.new.route = NVDRM_OBJECT_ABI16,
|
.new.route = NVDRM_OBJECT_ABI16,
|
||||||
.new.handle = info->handle,
|
.new.handle = info->handle,
|
||||||
.new.oclass = NV_DMA_IN_MEMORY_CLASS,
|
.new.oclass = NV_DMA_IN_MEMORY,
|
||||||
};
|
};
|
||||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||||
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
|
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
|
||||||
@ -460,17 +460,20 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
|
|||||||
args.ctxdma.start = ntfy->node->offset;
|
args.ctxdma.start = ntfy->node->offset;
|
||||||
args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1;
|
args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1;
|
||||||
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||||
args.ctxdma.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
|
args.ctxdma.target = NV_DMA_V0_TARGET_VM;
|
||||||
|
args.ctxdma.access = NV_DMA_V0_ACCESS_VM;
|
||||||
args.ctxdma.start += chan->ntfy_vma.offset;
|
args.ctxdma.start += chan->ntfy_vma.offset;
|
||||||
args.ctxdma.limit += chan->ntfy_vma.offset;
|
args.ctxdma.limit += chan->ntfy_vma.offset;
|
||||||
} else
|
} else
|
||||||
if (drm->agp.stat == ENABLED) {
|
if (drm->agp.stat == ENABLED) {
|
||||||
args.ctxdma.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
|
args.ctxdma.target = NV_DMA_V0_TARGET_AGP;
|
||||||
|
args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
|
||||||
args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset;
|
args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset;
|
||||||
args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset;
|
args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset;
|
||||||
client->super = true;
|
client->super = true;
|
||||||
} else {
|
} else {
|
||||||
args.ctxdma.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
|
args.ctxdma.target = NV_DMA_V0_TARGET_VM;
|
||||||
|
args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
|
||||||
args.ctxdma.start += chan->ntfy->bo.offset;
|
args.ctxdma.start += chan->ntfy->bo.offset;
|
||||||
args.ctxdma.limit += chan->ntfy->bo.offset;
|
args.ctxdma.limit += chan->ntfy->bo.offset;
|
||||||
}
|
}
|
||||||
|
@ -91,7 +91,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
|
|||||||
struct nouveau_instmem *imem = nvkm_instmem(device);
|
struct nouveau_instmem *imem = nvkm_instmem(device);
|
||||||
struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
|
struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
|
||||||
struct nouveau_fb *pfb = nvkm_fb(device);
|
struct nouveau_fb *pfb = nvkm_fb(device);
|
||||||
struct nv_dma_class args = {};
|
struct nv_dma_v0 args = {};
|
||||||
struct nouveau_channel *chan;
|
struct nouveau_channel *chan;
|
||||||
u32 target;
|
u32 target;
|
||||||
int ret;
|
int ret;
|
||||||
@ -135,7 +135,8 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
|
args.target = NV_DMA_V0_TARGET_VM;
|
||||||
|
args.access = NV_DMA_V0_ACCESS_VM;
|
||||||
args.start = 0;
|
args.start = 0;
|
||||||
args.limit = cli->vm->vmm->limit - 1;
|
args.limit = cli->vm->vmm->limit - 1;
|
||||||
} else
|
} else
|
||||||
@ -146,29 +147,33 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
|
|||||||
* the framebuffer bar rather than direct vram access..
|
* the framebuffer bar rather than direct vram access..
|
||||||
* nfi why this exists, it came from the -nv ddx.
|
* nfi why this exists, it came from the -nv ddx.
|
||||||
*/
|
*/
|
||||||
args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR;
|
args.target = NV_DMA_V0_TARGET_PCI;
|
||||||
|
args.access = NV_DMA_V0_ACCESS_RDWR;
|
||||||
args.start = nv_device_resource_start(nvkm_device(device), 1);
|
args.start = nv_device_resource_start(nvkm_device(device), 1);
|
||||||
args.limit = args.start + limit;
|
args.limit = args.start + limit;
|
||||||
} else {
|
} else {
|
||||||
args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
|
args.target = NV_DMA_V0_TARGET_VRAM;
|
||||||
|
args.access = NV_DMA_V0_ACCESS_RDWR;
|
||||||
args.start = 0;
|
args.start = 0;
|
||||||
args.limit = limit;
|
args.limit = limit;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (chan->drm->agp.stat == ENABLED) {
|
if (chan->drm->agp.stat == ENABLED) {
|
||||||
args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
|
args.target = NV_DMA_V0_TARGET_AGP;
|
||||||
|
args.access = NV_DMA_V0_ACCESS_RDWR;
|
||||||
args.start = chan->drm->agp.base;
|
args.start = chan->drm->agp.base;
|
||||||
args.limit = chan->drm->agp.base +
|
args.limit = chan->drm->agp.base +
|
||||||
chan->drm->agp.size - 1;
|
chan->drm->agp.size - 1;
|
||||||
} else {
|
} else {
|
||||||
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
|
args.target = NV_DMA_V0_TARGET_VM;
|
||||||
|
args.access = NV_DMA_V0_ACCESS_RDWR;
|
||||||
args.start = 0;
|
args.start = 0;
|
||||||
args.limit = vmm->limit - 1;
|
args.limit = vmm->limit - 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH |
|
ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH |
|
||||||
(handle & 0xffff), NV_DMA_FROM_MEMORY_CLASS,
|
(handle & 0xffff), NV_DMA_FROM_MEMORY,
|
||||||
&args, sizeof(args), &chan->push.ctxdma);
|
&args, sizeof(args), &chan->push.ctxdma);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
nouveau_channel_del(pchan);
|
nouveau_channel_del(pchan);
|
||||||
@ -259,45 +264,50 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
|
|||||||
struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
|
struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
|
||||||
struct nouveau_fb *pfb = nvkm_fb(device);
|
struct nouveau_fb *pfb = nvkm_fb(device);
|
||||||
struct nouveau_software_chan *swch;
|
struct nouveau_software_chan *swch;
|
||||||
struct nv_dma_class args = {};
|
struct nv_dma_v0 args = {};
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
/* allocate dma objects to cover all allowed vram, and gart */
|
/* allocate dma objects to cover all allowed vram, and gart */
|
||||||
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
|
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
|
||||||
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||||
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
|
args.target = NV_DMA_V0_TARGET_VM;
|
||||||
|
args.access = NV_DMA_V0_ACCESS_VM;
|
||||||
args.start = 0;
|
args.start = 0;
|
||||||
args.limit = cli->vm->vmm->limit - 1;
|
args.limit = cli->vm->vmm->limit - 1;
|
||||||
} else {
|
} else {
|
||||||
args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
|
args.target = NV_DMA_V0_TARGET_VRAM;
|
||||||
|
args.access = NV_DMA_V0_ACCESS_RDWR;
|
||||||
args.start = 0;
|
args.start = 0;
|
||||||
args.limit = pfb->ram->size - imem->reserved - 1;
|
args.limit = pfb->ram->size - imem->reserved - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = nvif_object_init(chan->object, NULL, vram,
|
ret = nvif_object_init(chan->object, NULL, vram,
|
||||||
NV_DMA_IN_MEMORY_CLASS, &args,
|
NV_DMA_IN_MEMORY, &args,
|
||||||
sizeof(args), &chan->vram);
|
sizeof(args), &chan->vram);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||||
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
|
args.target = NV_DMA_V0_TARGET_VM;
|
||||||
|
args.access = NV_DMA_V0_ACCESS_VM;
|
||||||
args.start = 0;
|
args.start = 0;
|
||||||
args.limit = cli->vm->vmm->limit - 1;
|
args.limit = cli->vm->vmm->limit - 1;
|
||||||
} else
|
} else
|
||||||
if (chan->drm->agp.stat == ENABLED) {
|
if (chan->drm->agp.stat == ENABLED) {
|
||||||
args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
|
args.target = NV_DMA_V0_TARGET_AGP;
|
||||||
|
args.access = NV_DMA_V0_ACCESS_RDWR;
|
||||||
args.start = chan->drm->agp.base;
|
args.start = chan->drm->agp.base;
|
||||||
args.limit = chan->drm->agp.base +
|
args.limit = chan->drm->agp.base +
|
||||||
chan->drm->agp.size - 1;
|
chan->drm->agp.size - 1;
|
||||||
} else {
|
} else {
|
||||||
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
|
args.target = NV_DMA_V0_TARGET_VM;
|
||||||
|
args.access = NV_DMA_V0_ACCESS_RDWR;
|
||||||
args.start = 0;
|
args.start = 0;
|
||||||
args.limit = vmm->limit - 1;
|
args.limit = vmm->limit - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = nvif_object_init(chan->object, NULL, gart,
|
ret = nvif_object_init(chan->object, NULL, gart,
|
||||||
NV_DMA_IN_MEMORY_CLASS, &args,
|
NV_DMA_IN_MEMORY, &args,
|
||||||
sizeof(args), &chan->gart);
|
sizeof(args), &chan->gart);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -257,13 +257,13 @@ nouveau_accel_init(struct nouveau_drm *drm)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = nvif_object_init(drm->channel->object, NULL, NvNotify0,
|
ret = nvif_object_init(drm->channel->object, NULL, NvNotify0,
|
||||||
NV_DMA_IN_MEMORY_CLASS,
|
NV_DMA_IN_MEMORY,
|
||||||
&(struct nv_dma_class) {
|
&(struct nv_dma_v0) {
|
||||||
.flags = NV_DMA_TARGET_VRAM |
|
.target = NV_DMA_V0_TARGET_VRAM,
|
||||||
NV_DMA_ACCESS_RDWR,
|
.access = NV_DMA_V0_ACCESS_RDWR,
|
||||||
.start = drm->notify->addr,
|
.start = drm->notify->addr,
|
||||||
.limit = drm->notify->addr + 31
|
.limit = drm->notify->addr + 31
|
||||||
}, sizeof(struct nv_dma_class),
|
}, sizeof(struct nv_dma_v0),
|
||||||
&drm->ntfy);
|
&drm->ntfy);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
nouveau_accel_fini(drm);
|
nouveau_accel_fini(drm);
|
||||||
|
@ -89,14 +89,13 @@ nv17_fence_context_new(struct nouveau_channel *chan)
|
|||||||
fctx->base.read = nv10_fence_read;
|
fctx->base.read = nv10_fence_read;
|
||||||
fctx->base.sync = nv17_fence_sync;
|
fctx->base.sync = nv17_fence_sync;
|
||||||
|
|
||||||
ret = nvif_object_init(chan->object, NULL, NvSema,
|
ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_FROM_MEMORY,
|
||||||
NV_DMA_FROM_MEMORY_CLASS,
|
&(struct nv_dma_v0) {
|
||||||
&(struct nv_dma_class) {
|
.target = NV_DMA_V0_TARGET_VRAM,
|
||||||
.flags = NV_DMA_TARGET_VRAM |
|
.access = NV_DMA_V0_ACCESS_RDWR,
|
||||||
NV_DMA_ACCESS_RDWR,
|
|
||||||
.start = start,
|
.start = start,
|
||||||
.limit = limit,
|
.limit = limit,
|
||||||
}, sizeof(struct nv_dma_class),
|
}, sizeof(struct nv_dma_v0),
|
||||||
&fctx->sema);
|
&fctx->sema);
|
||||||
if (ret)
|
if (ret)
|
||||||
nv10_fence_context_del(chan);
|
nv10_fence_context_del(chan);
|
||||||
|
@ -160,13 +160,13 @@ nv50_dmac_create(struct nvif_object *disp, u32 bclass, u8 head,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = nvif_object_init(nvif_object(nvif_device(disp)), NULL, handle,
|
ret = nvif_object_init(nvif_object(nvif_device(disp)), NULL, handle,
|
||||||
NV_DMA_FROM_MEMORY_CLASS,
|
NV_DMA_FROM_MEMORY,
|
||||||
&(struct nv_dma_class) {
|
&(struct nv_dma_v0) {
|
||||||
.flags = NV_DMA_TARGET_PCI_US |
|
.target = NV_DMA_V0_TARGET_PCI_US,
|
||||||
NV_DMA_ACCESS_RD,
|
.access = NV_DMA_V0_ACCESS_RD,
|
||||||
.start = dmac->handle + 0x0000,
|
.start = dmac->handle + 0x0000,
|
||||||
.limit = dmac->handle + 0x0fff,
|
.limit = dmac->handle + 0x0fff,
|
||||||
}, sizeof(struct nv_dma_class), &pushbuf);
|
}, sizeof(struct nv_dma_v0), &pushbuf);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -176,25 +176,25 @@ nv50_dmac_create(struct nvif_object *disp, u32 bclass, u8 head,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000000,
|
ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000000,
|
||||||
NV_DMA_IN_MEMORY_CLASS,
|
NV_DMA_IN_MEMORY,
|
||||||
&(struct nv_dma_class) {
|
&(struct nv_dma_v0) {
|
||||||
.flags = NV_DMA_TARGET_VRAM |
|
.target = NV_DMA_V0_TARGET_VRAM,
|
||||||
NV_DMA_ACCESS_RDWR,
|
.access = NV_DMA_V0_ACCESS_RDWR,
|
||||||
.start = syncbuf + 0x0000,
|
.start = syncbuf + 0x0000,
|
||||||
.limit = syncbuf + 0x0fff,
|
.limit = syncbuf + 0x0fff,
|
||||||
}, sizeof(struct nv_dma_class),
|
}, sizeof(struct nv_dma_v0),
|
||||||
&dmac->sync);
|
&dmac->sync);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000001,
|
ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000001,
|
||||||
NV_DMA_IN_MEMORY_CLASS,
|
NV_DMA_IN_MEMORY,
|
||||||
&(struct nv_dma_class) {
|
&(struct nv_dma_v0) {
|
||||||
.flags = NV_DMA_TARGET_VRAM |
|
.target = NV_DMA_V0_TARGET_VRAM,
|
||||||
NV_DMA_ACCESS_RDWR,
|
.access = NV_DMA_V0_ACCESS_RDWR,
|
||||||
.start = 0,
|
.start = 0,
|
||||||
.limit = pfb->ram->size - 1,
|
.limit = pfb->ram->size - 1,
|
||||||
}, sizeof(struct nv_dma_class),
|
}, sizeof(struct nv_dma_v0),
|
||||||
&dmac->vram);
|
&dmac->vram);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -2073,9 +2073,17 @@ nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kin
|
|||||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||||
struct nv50_disp *disp = nv50_disp(dev);
|
struct nv50_disp *disp = nv50_disp(dev);
|
||||||
struct nv50_mast *mast = nv50_mast(dev);
|
struct nv50_mast *mast = nv50_mast(dev);
|
||||||
struct nv_dma_class args;
|
struct __attribute__ ((packed)) {
|
||||||
|
struct nv_dma_v0 base;
|
||||||
|
union {
|
||||||
|
struct nv50_dma_v0 nv50;
|
||||||
|
struct gf100_dma_v0 gf100;
|
||||||
|
struct gf110_dma_v0 gf110;
|
||||||
|
};
|
||||||
|
} args = {};
|
||||||
struct nv50_fbdma *fbdma;
|
struct nv50_fbdma *fbdma;
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
|
u32 size = sizeof(args.base);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
list_for_each_entry(fbdma, &disp->fbdma, head) {
|
list_for_each_entry(fbdma, &disp->fbdma, head) {
|
||||||
@ -2088,31 +2096,33 @@ nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kin
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
list_add(&fbdma->head, &disp->fbdma);
|
list_add(&fbdma->head, &disp->fbdma);
|
||||||
|
|
||||||
args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
|
args.base.target = NV_DMA_V0_TARGET_VRAM;
|
||||||
args.start = offset;
|
args.base.access = NV_DMA_V0_ACCESS_RDWR;
|
||||||
args.limit = offset + length - 1;
|
args.base.start = offset;
|
||||||
args.conf0 = kind;
|
args.base.limit = offset + length - 1;
|
||||||
|
|
||||||
if (drm->device.info.chipset < 0x80) {
|
if (drm->device.info.chipset < 0x80) {
|
||||||
args.conf0 = NV50_DMA_CONF0_ENABLE;
|
args.nv50.part = NV50_DMA_V0_PART_256;
|
||||||
args.conf0 |= NV50_DMA_CONF0_PART_256;
|
size += sizeof(args.nv50);
|
||||||
} else
|
} else
|
||||||
if (drm->device.info.chipset < 0xc0) {
|
if (drm->device.info.chipset < 0xc0) {
|
||||||
args.conf0 |= NV50_DMA_CONF0_ENABLE;
|
args.nv50.part = NV50_DMA_V0_PART_256;
|
||||||
args.conf0 |= NV50_DMA_CONF0_PART_256;
|
args.nv50.kind = kind;
|
||||||
|
size += sizeof(args.nv50);
|
||||||
} else
|
} else
|
||||||
if (drm->device.info.chipset < 0xd0) {
|
if (drm->device.info.chipset < 0xd0) {
|
||||||
args.conf0 |= NVC0_DMA_CONF0_ENABLE;
|
args.gf100.kind = kind;
|
||||||
|
size += sizeof(args.gf100);
|
||||||
} else {
|
} else {
|
||||||
args.conf0 |= NVD0_DMA_CONF0_ENABLE;
|
args.gf110.page = GF110_DMA_V0_PAGE_LP;
|
||||||
args.conf0 |= NVD0_DMA_CONF0_PAGE_LP;
|
args.gf110.kind = kind;
|
||||||
|
size += sizeof(args.gf110);
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||||
struct nv50_head *head = nv50_head(crtc);
|
struct nv50_head *head = nv50_head(crtc);
|
||||||
int ret = nvif_object_init(&head->sync.base.base.user, NULL,
|
int ret = nvif_object_init(&head->sync.base.base.user, NULL,
|
||||||
name, NV_DMA_IN_MEMORY_CLASS,
|
name, NV_DMA_IN_MEMORY, &args, size,
|
||||||
&args, sizeof(args),
|
|
||||||
&fbdma->base[head->base.index]);
|
&fbdma->base[head->base.index]);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
nv50_fbdma_fini(fbdma);
|
nv50_fbdma_fini(fbdma);
|
||||||
@ -2121,7 +2131,7 @@ nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kin
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = nvif_object_init(&mast->base.base.user, NULL, name,
|
ret = nvif_object_init(&mast->base.base.user, NULL, name,
|
||||||
NV_DMA_IN_MEMORY_CLASS, &args, sizeof(args),
|
NV_DMA_IN_MEMORY, &args, size,
|
||||||
&fbdma->core);
|
&fbdma->core);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
nv50_fbdma_fini(fbdma);
|
nv50_fbdma_fini(fbdma);
|
||||||
|
@ -51,14 +51,13 @@ nv50_fence_context_new(struct nouveau_channel *chan)
|
|||||||
fctx->base.read = nv10_fence_read;
|
fctx->base.read = nv10_fence_read;
|
||||||
fctx->base.sync = nv17_fence_sync;
|
fctx->base.sync = nv17_fence_sync;
|
||||||
|
|
||||||
ret = nvif_object_init(chan->object, NULL, NvSema,
|
ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_IN_MEMORY,
|
||||||
NV_DMA_IN_MEMORY_CLASS,
|
&(struct nv_dma_v0) {
|
||||||
&(struct nv_dma_class) {
|
.target = NV_DMA_V0_TARGET_VRAM,
|
||||||
.flags = NV_DMA_TARGET_VRAM |
|
.access = NV_DMA_V0_ACCESS_RDWR,
|
||||||
NV_DMA_ACCESS_RDWR,
|
|
||||||
.start = start,
|
.start = start,
|
||||||
.limit = limit,
|
.limit = limit,
|
||||||
}, sizeof(struct nv_dma_class),
|
}, sizeof(struct nv_dma_v0),
|
||||||
&fctx->sema);
|
&fctx->sema);
|
||||||
|
|
||||||
/* dma objects for display sync channel semaphore blocks */
|
/* dma objects for display sync channel semaphore blocks */
|
||||||
@ -68,13 +67,12 @@ nv50_fence_context_new(struct nouveau_channel *chan)
|
|||||||
u32 limit = start + bo->bo.mem.size - 1;
|
u32 limit = start + bo->bo.mem.size - 1;
|
||||||
|
|
||||||
ret = nvif_object_init(chan->object, NULL, NvEvoSema0 + i,
|
ret = nvif_object_init(chan->object, NULL, NvEvoSema0 + i,
|
||||||
NV_DMA_IN_MEMORY_CLASS,
|
NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
|
||||||
&(struct nv_dma_class) {
|
.target = NV_DMA_V0_TARGET_VRAM,
|
||||||
.flags = NV_DMA_TARGET_VRAM |
|
.access = NV_DMA_V0_ACCESS_RDWR,
|
||||||
NV_DMA_ACCESS_RDWR,
|
|
||||||
.start = start,
|
.start = start,
|
||||||
.limit = limit,
|
.limit = limit,
|
||||||
}, sizeof(struct nv_dma_class),
|
}, sizeof(struct nv_dma_v0),
|
||||||
&fctx->head[i]);
|
&fctx->head[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,6 +8,10 @@
|
|||||||
/* the below match nvidia-assigned (either in hw, or sw) class numbers */
|
/* the below match nvidia-assigned (either in hw, or sw) class numbers */
|
||||||
#define NV_DEVICE 0x00000080
|
#define NV_DEVICE 0x00000080
|
||||||
|
|
||||||
|
#define NV_DMA_FROM_MEMORY 0x00000002
|
||||||
|
#define NV_DMA_TO_MEMORY 0x00000003
|
||||||
|
#define NV_DMA_IN_MEMORY 0x0000003d
|
||||||
|
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* client
|
* client
|
||||||
@ -79,4 +83,72 @@ struct nv_device_info_v0 {
|
|||||||
__u64 ram_user;
|
__u64 ram_user;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/*******************************************************************************
|
||||||
|
* context dma
|
||||||
|
******************************************************************************/
|
||||||
|
|
||||||
|
struct nv_dma_v0 {
|
||||||
|
__u8 version;
|
||||||
|
#define NV_DMA_V0_TARGET_VM 0x00
|
||||||
|
#define NV_DMA_V0_TARGET_VRAM 0x01
|
||||||
|
#define NV_DMA_V0_TARGET_PCI 0x02
|
||||||
|
#define NV_DMA_V0_TARGET_PCI_US 0x03
|
||||||
|
#define NV_DMA_V0_TARGET_AGP 0x04
|
||||||
|
__u8 target;
|
||||||
|
#define NV_DMA_V0_ACCESS_VM 0x00
|
||||||
|
#define NV_DMA_V0_ACCESS_RD 0x01
|
||||||
|
#define NV_DMA_V0_ACCESS_WR 0x02
|
||||||
|
#define NV_DMA_V0_ACCESS_RDWR (NV_DMA_V0_ACCESS_RD | NV_DMA_V0_ACCESS_WR)
|
||||||
|
__u8 access;
|
||||||
|
__u8 pad03[5];
|
||||||
|
__u64 start;
|
||||||
|
__u64 limit;
|
||||||
|
/* ... chipset-specific class data */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct nv50_dma_v0 {
|
||||||
|
__u8 version;
|
||||||
|
#define NV50_DMA_V0_PRIV_VM 0x00
|
||||||
|
#define NV50_DMA_V0_PRIV_US 0x01
|
||||||
|
#define NV50_DMA_V0_PRIV__S 0x02
|
||||||
|
__u8 priv;
|
||||||
|
#define NV50_DMA_V0_PART_VM 0x00
|
||||||
|
#define NV50_DMA_V0_PART_256 0x01
|
||||||
|
#define NV50_DMA_V0_PART_1KB 0x02
|
||||||
|
__u8 part;
|
||||||
|
#define NV50_DMA_V0_COMP_NONE 0x00
|
||||||
|
#define NV50_DMA_V0_COMP_1 0x01
|
||||||
|
#define NV50_DMA_V0_COMP_2 0x02
|
||||||
|
#define NV50_DMA_V0_COMP_VM 0x03
|
||||||
|
__u8 comp;
|
||||||
|
#define NV50_DMA_V0_KIND_PITCH 0x00
|
||||||
|
#define NV50_DMA_V0_KIND_VM 0x7f
|
||||||
|
__u8 kind;
|
||||||
|
__u8 pad05[3];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct gf100_dma_v0 {
|
||||||
|
__u8 version;
|
||||||
|
#define GF100_DMA_V0_PRIV_VM 0x00
|
||||||
|
#define GF100_DMA_V0_PRIV_US 0x01
|
||||||
|
#define GF100_DMA_V0_PRIV__S 0x02
|
||||||
|
__u8 priv;
|
||||||
|
#define GF100_DMA_V0_KIND_PITCH 0x00
|
||||||
|
#define GF100_DMA_V0_KIND_VM 0xff
|
||||||
|
__u8 kind;
|
||||||
|
__u8 pad03[5];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct gf110_dma_v0 {
|
||||||
|
__u8 version;
|
||||||
|
#define GF110_DMA_V0_PAGE_LP 0x00
|
||||||
|
#define GF110_DMA_V0_PAGE_SP 0x01
|
||||||
|
__u8 page;
|
||||||
|
#define GF110_DMA_V0_KIND_PITCH 0x00
|
||||||
|
#define GF110_DMA_V0_KIND_VM 0xff
|
||||||
|
__u8 kind;
|
||||||
|
__u8 pad03[5];
|
||||||
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user