mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-17 07:54:54 +08:00
drm/nouveau: move engine object creation into per-engine hooks
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
475feffabe
commit
4ea52f8974
@ -383,6 +383,7 @@ struct nouveau_pgraph_engine {
|
||||
void (*destroy_context)(struct nouveau_channel *);
|
||||
int (*load_context)(struct nouveau_channel *);
|
||||
int (*unload_context)(struct drm_device *);
|
||||
int (*object_new)(struct nouveau_channel *chan, u32 handle, u16 class);
|
||||
void (*tlb_flush)(struct drm_device *dev);
|
||||
|
||||
void (*set_tile_region)(struct drm_device *dev, int i);
|
||||
@ -507,6 +508,7 @@ struct nouveau_crypt_engine {
|
||||
void (*takedown)(struct drm_device *);
|
||||
int (*create_context)(struct nouveau_channel *);
|
||||
void (*destroy_context)(struct nouveau_channel *);
|
||||
int (*object_new)(struct nouveau_channel *, u32 handle, u16 class);
|
||||
void (*tlb_flush)(struct drm_device *dev);
|
||||
};
|
||||
|
||||
@ -1147,6 +1149,7 @@ extern int nv04_graph_create_context(struct nouveau_channel *);
|
||||
extern void nv04_graph_destroy_context(struct nouveau_channel *);
|
||||
extern int nv04_graph_load_context(struct nouveau_channel *);
|
||||
extern int nv04_graph_unload_context(struct drm_device *);
|
||||
extern int nv04_graph_object_new(struct nouveau_channel *, u32, u16);
|
||||
extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data);
|
||||
extern struct nouveau_bitfield nv04_graph_nsource[];
|
||||
@ -1181,6 +1184,7 @@ extern int nv40_graph_create_context(struct nouveau_channel *);
|
||||
extern void nv40_graph_destroy_context(struct nouveau_channel *);
|
||||
extern int nv40_graph_load_context(struct nouveau_channel *);
|
||||
extern int nv40_graph_unload_context(struct drm_device *);
|
||||
extern int nv40_graph_object_new(struct nouveau_channel *, u32, u16);
|
||||
extern void nv40_grctx_init(struct nouveau_grctx *);
|
||||
extern void nv40_graph_set_tile_region(struct drm_device *dev, int i);
|
||||
|
||||
@ -1193,6 +1197,7 @@ extern int nv50_graph_create_context(struct nouveau_channel *);
|
||||
extern void nv50_graph_destroy_context(struct nouveau_channel *);
|
||||
extern int nv50_graph_load_context(struct nouveau_channel *);
|
||||
extern int nv50_graph_unload_context(struct drm_device *);
|
||||
extern int nv50_graph_object_new(struct nouveau_channel *, u32, u16);
|
||||
extern int nv50_grctx_init(struct nouveau_grctx *);
|
||||
extern void nv50_graph_tlb_flush(struct drm_device *dev);
|
||||
extern void nv84_graph_tlb_flush(struct drm_device *dev);
|
||||
@ -1207,6 +1212,7 @@ extern int nvc0_graph_create_context(struct nouveau_channel *);
|
||||
extern void nvc0_graph_destroy_context(struct nouveau_channel *);
|
||||
extern int nvc0_graph_load_context(struct nouveau_channel *);
|
||||
extern int nvc0_graph_unload_context(struct drm_device *);
|
||||
extern int nvc0_graph_object_new(struct nouveau_channel *, u32, u16);
|
||||
|
||||
/* nv84_crypt.c */
|
||||
extern int nv84_crypt_init(struct drm_device *dev);
|
||||
@ -1214,6 +1220,7 @@ extern void nv84_crypt_fini(struct drm_device *dev);
|
||||
extern int nv84_crypt_create_context(struct nouveau_channel *);
|
||||
extern void nv84_crypt_destroy_context(struct nouveau_channel *);
|
||||
extern void nv84_crypt_tlb_flush(struct drm_device *dev);
|
||||
extern int nv84_crypt_object_new(struct nouveau_channel *, u32, u16);
|
||||
|
||||
/* nv04_instmem.c */
|
||||
extern int nv04_instmem_init(struct drm_device *);
|
||||
|
@ -361,20 +361,6 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static uint32_t
|
||||
nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
/*XXX: dodgy hack for now */
|
||||
if (dev_priv->card_type >= NV_50)
|
||||
return 24;
|
||||
if (dev_priv->card_type >= NV_40)
|
||||
return 32;
|
||||
return 16;
|
||||
}
|
||||
|
||||
/*
|
||||
DMA objects are used to reference a piece of memory in the
|
||||
framebuffer, PCI or AGP address space. Each object is 16 bytes big
|
||||
@ -606,11 +592,11 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
|
||||
set to 0?
|
||||
*/
|
||||
static int
|
||||
nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
|
||||
struct nouveau_gpuobj **gpuobj_ret)
|
||||
nouveau_gpuobj_sw_new(struct nouveau_channel *chan, u32 handle, u16 class)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
int ret;
|
||||
|
||||
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
|
||||
if (!gpuobj)
|
||||
@ -624,17 +610,20 @@ nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
*gpuobj_ret = gpuobj;
|
||||
return 0;
|
||||
|
||||
ret = nouveau_ramht_insert(chan, handle, gpuobj);
|
||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct nouveau_gpuobj_class *oc;
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
|
||||
@ -650,85 +639,27 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
|
||||
found:
|
||||
switch (oc->engine) {
|
||||
case NVOBJ_ENGINE_SW:
|
||||
if (dev_priv->card_type < NV_C0) {
|
||||
ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto insert;
|
||||
}
|
||||
break;
|
||||
return nouveau_gpuobj_sw_new(chan, handle, class);
|
||||
case NVOBJ_ENGINE_GR:
|
||||
if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) ||
|
||||
(dev_priv->card_type < NV_20 && !chan->pgraph_ctx)) {
|
||||
struct nouveau_pgraph_engine *pgraph =
|
||||
&dev_priv->engine.graph;
|
||||
|
||||
ret = pgraph->create_context(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
|
||||
return pgraph->object_new(chan, handle, class);
|
||||
case NVOBJ_ENGINE_CRYPT:
|
||||
if (!chan->crypt_ctx) {
|
||||
struct nouveau_crypt_engine *pcrypt =
|
||||
&dev_priv->engine.crypt;
|
||||
|
||||
ret = pcrypt->create_context(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
|
||||
return pcrypt->object_new(chan, handle, class);
|
||||
}
|
||||
|
||||
/* we're done if this is fermi */
|
||||
if (dev_priv->card_type >= NV_C0)
|
||||
return 0;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, chan,
|
||||
nouveau_gpuobj_class_instmem_size(dev, class),
|
||||
16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
|
||||
&gpuobj);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
nv_wo32(gpuobj, 0, class);
|
||||
nv_wo32(gpuobj, 20, 0x00010000);
|
||||
} else {
|
||||
switch (class) {
|
||||
case NV_CLASS_NULL:
|
||||
nv_wo32(gpuobj, 0, 0x00001030);
|
||||
nv_wo32(gpuobj, 4, 0xFFFFFFFF);
|
||||
break;
|
||||
default:
|
||||
if (dev_priv->card_type >= NV_40) {
|
||||
nv_wo32(gpuobj, 0, class);
|
||||
#ifdef __BIG_ENDIAN
|
||||
nv_wo32(gpuobj, 8, 0x01000000);
|
||||
#endif
|
||||
} else {
|
||||
#ifdef __BIG_ENDIAN
|
||||
nv_wo32(gpuobj, 0, class | 0x00080000);
|
||||
#else
|
||||
nv_wo32(gpuobj, 0, class);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
|
||||
gpuobj->engine = oc->engine;
|
||||
gpuobj->class = oc->id;
|
||||
|
||||
insert:
|
||||
ret = nouveau_ramht_insert(chan, handle, gpuobj);
|
||||
if (ret)
|
||||
NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
|
||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||
return ret;
|
||||
BUG_ON(1);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -73,6 +73,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->graph.destroy_context = nv04_graph_destroy_context;
|
||||
engine->graph.load_context = nv04_graph_load_context;
|
||||
engine->graph.unload_context = nv04_graph_unload_context;
|
||||
engine->graph.object_new = nv04_graph_object_new;
|
||||
engine->fifo.channels = 16;
|
||||
engine->fifo.init = nv04_fifo_init;
|
||||
engine->fifo.takedown = nv04_fifo_fini;
|
||||
@ -131,6 +132,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->graph.fifo_access = nv04_graph_fifo_access;
|
||||
engine->graph.load_context = nv10_graph_load_context;
|
||||
engine->graph.unload_context = nv10_graph_unload_context;
|
||||
engine->graph.object_new = nv04_graph_object_new;
|
||||
engine->graph.set_tile_region = nv10_graph_set_tile_region;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv10_fifo_init;
|
||||
@ -190,6 +192,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->graph.fifo_access = nv04_graph_fifo_access;
|
||||
engine->graph.load_context = nv20_graph_load_context;
|
||||
engine->graph.unload_context = nv20_graph_unload_context;
|
||||
engine->graph.object_new = nv04_graph_object_new;
|
||||
engine->graph.set_tile_region = nv20_graph_set_tile_region;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv10_fifo_init;
|
||||
@ -249,6 +252,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->graph.destroy_context = nv20_graph_destroy_context;
|
||||
engine->graph.load_context = nv20_graph_load_context;
|
||||
engine->graph.unload_context = nv20_graph_unload_context;
|
||||
engine->graph.object_new = nv04_graph_object_new;
|
||||
engine->graph.set_tile_region = nv20_graph_set_tile_region;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv10_fifo_init;
|
||||
@ -311,6 +315,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->graph.destroy_context = nv40_graph_destroy_context;
|
||||
engine->graph.load_context = nv40_graph_load_context;
|
||||
engine->graph.unload_context = nv40_graph_unload_context;
|
||||
engine->graph.object_new = nv40_graph_object_new;
|
||||
engine->graph.set_tile_region = nv40_graph_set_tile_region;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv40_fifo_init;
|
||||
@ -376,6 +381,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->graph.destroy_context = nv50_graph_destroy_context;
|
||||
engine->graph.load_context = nv50_graph_load_context;
|
||||
engine->graph.unload_context = nv50_graph_unload_context;
|
||||
engine->graph.object_new = nv50_graph_object_new;
|
||||
if (dev_priv->chipset == 0x50 ||
|
||||
dev_priv->chipset == 0xac)
|
||||
engine->graph.tlb_flush = nv50_graph_tlb_flush;
|
||||
@ -443,6 +449,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->crypt.takedown = nv84_crypt_fini;
|
||||
engine->crypt.create_context = nv84_crypt_create_context;
|
||||
engine->crypt.destroy_context = nv84_crypt_destroy_context;
|
||||
engine->crypt.object_new = nv84_crypt_object_new;
|
||||
engine->crypt.tlb_flush = nv84_crypt_tlb_flush;
|
||||
break;
|
||||
default:
|
||||
@ -480,6 +487,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->graph.destroy_context = nvc0_graph_destroy_context;
|
||||
engine->graph.load_context = nvc0_graph_load_context;
|
||||
engine->graph.unload_context = nvc0_graph_unload_context;
|
||||
engine->graph.object_new = nvc0_graph_object_new;
|
||||
engine->fifo.channels = 128;
|
||||
engine->fifo.init = nvc0_fifo_init;
|
||||
engine->fifo.takedown = nvc0_fifo_takedown;
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_hw.h"
|
||||
#include "nouveau_util.h"
|
||||
#include "nouveau_ramht.h"
|
||||
|
||||
static int nv04_graph_register(struct drm_device *dev);
|
||||
static void nv04_graph_isr(struct drm_device *dev);
|
||||
@ -481,6 +482,33 @@ nv04_graph_unload_context(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nv04_graph_object_new(struct nouveau_channel *chan, u32 handle, u16 class)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct nouveau_gpuobj *obj = NULL;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
obj->engine = 1;
|
||||
obj->class = class;
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
nv_wo32(obj, 0x00, 0x00080000 | class);
|
||||
#else
|
||||
nv_wo32(obj, 0x00, class);
|
||||
#endif
|
||||
nv_wo32(obj, 0x04, 0x00000000);
|
||||
nv_wo32(obj, 0x08, 0x00000000);
|
||||
nv_wo32(obj, 0x0c, 0x00000000);
|
||||
|
||||
ret = nouveau_ramht_insert(chan, handle, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nv04_graph_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_grctx.h"
|
||||
#include "nouveau_ramht.h"
|
||||
|
||||
static int nv40_graph_register(struct drm_device *);
|
||||
static void nv40_graph_isr(struct drm_device *);
|
||||
@ -204,6 +205,32 @@ nv40_graph_unload_context(struct drm_device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nv40_graph_object_new(struct nouveau_channel *chan, u32 handle, u16 class)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct nouveau_gpuobj *obj = NULL;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
obj->engine = 1;
|
||||
obj->class = class;
|
||||
|
||||
nv_wo32(obj, 0x00, class);
|
||||
nv_wo32(obj, 0x04, 0x00000000);
|
||||
#ifdef __BIG_ENDIAN
|
||||
nv_wo32(obj, 0x08, 0x01000000);
|
||||
#endif
|
||||
nv_wo32(obj, 0x0c, 0x00000000);
|
||||
nv_wo32(obj, 0x10, 0x00000000);
|
||||
|
||||
ret = nouveau_ramht_insert(chan, handle, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nv40_graph_set_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "nouveau_grctx.h"
|
||||
#include "nouveau_dma.h"
|
||||
#include "nouveau_vm.h"
|
||||
#include "nouveau_ramht.h"
|
||||
#include "nv50_evo.h"
|
||||
|
||||
static int nv50_graph_register(struct drm_device *);
|
||||
@ -364,6 +365,31 @@ nv50_graph_unload_context(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nv50_graph_object_new(struct nouveau_channel *chan, u32 handle, u16 class)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *obj = NULL;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
obj->engine = 1;
|
||||
obj->class = class;
|
||||
|
||||
nv_wo32(obj, 0x00, class);
|
||||
nv_wo32(obj, 0x04, 0x00000000);
|
||||
nv_wo32(obj, 0x08, 0x00000000);
|
||||
nv_wo32(obj, 0x0c, 0x00000000);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
|
||||
ret = nouveau_ramht_insert(chan, handle, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_graph_context_switch(struct drm_device *dev)
|
||||
{
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_util.h"
|
||||
#include "nouveau_vm.h"
|
||||
#include "nouveau_ramht.h"
|
||||
|
||||
static void nv84_crypt_isr(struct drm_device *);
|
||||
|
||||
@ -84,6 +85,28 @@ nv84_crypt_destroy_context(struct nouveau_channel *chan)
|
||||
atomic_dec(&chan->vm->pcrypt_refs);
|
||||
}
|
||||
|
||||
int
|
||||
nv84_crypt_object_new(struct nouveau_channel *chan, u32 handle, u16 class)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *obj = NULL;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
obj->engine = 5;
|
||||
obj->class = class;
|
||||
|
||||
nv_wo32(obj, 0x00, class);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
|
||||
ret = nouveau_ramht_insert(chan, handle, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nv84_crypt_tlb_flush(struct drm_device *dev)
|
||||
{
|
||||
|
@ -270,6 +270,12 @@ nvc0_graph_unload_context(struct drm_device *dev)
|
||||
return nvc0_graph_unload_context_to(dev, inst);
|
||||
}
|
||||
|
||||
int
|
||||
nvc0_graph_object_new(struct nouveau_channel *chan, u32 handle, u16 class)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_graph_destroy(struct drm_device *dev)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user