mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-01 16:14:13 +08:00
Short summary of fixes pull:
* gpuva: Cleanups * kunit: Documentation fixes * nouveau: * UAPI: Avoid implicit NO_PREFETCH flag * Scheduler fixes * Fix remap * ttm: Fix type conversion in tests -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEchf7rIzpz2NEoWjlaA3BHVMLeiMFAmTnnO8ACgkQaA3BHVML eiOmuQf/VsWGoLxLYmwa91vjc2aXMHXCBxbShkEKw2VLTi31ytgdFnp9IFelQksf EfG9Nda9dXbePLNFE6FikwF4PoG/GvIpFT0i7JIivp3cTZzkB/23E0yvApBhQ629 d+Cnx7AbxCovplY4/W5lT0Fs1pXsndb2R+MYbpAa/CqkDND9I6CPvhz8omd4dwJB ggHg+B+AKslOgmUsE12kvAzN00wlQ+pGPkBHJ0rlgNf/I4ugjwktL07WBE6cMwnl THKaxZhqbCkLbqVyw6s2HoRRUGTGR8ywKJh/5NKEm7buKcrdCESLo8B6Su2k7FSU w+DHrLnBGPk+fSpIWVBGHUlUokAS7Q== =PYd/ -----END PGP SIGNATURE----- Merge tag 'drm-misc-next-fixes-2023-08-24' of git://anongit.freedesktop.org/drm/drm-misc into drm-next Short summary of fixes pull: * gpuva: Cleanups * kunit: Documentation fixes * nouveau: * UAPI: Avoid implicit NO_PREFETCH flag * Scheduler fixes * Fix remap * ttm: Fix type conversion in tests Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/20230824181241.GA6386@linux-uq9g.hotspot.internet-for-guests.com
This commit is contained in:
commit
bc609f4867
@ -1076,7 +1076,7 @@ __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
|
||||
u64 req_addr, u64 req_range,
|
||||
struct drm_gem_object *req_obj, u64 req_offset)
|
||||
{
|
||||
struct drm_gpuva *va, *next, *prev = NULL;
|
||||
struct drm_gpuva *va, *next;
|
||||
u64 req_end = req_addr + req_range;
|
||||
int ret;
|
||||
|
||||
@ -1106,7 +1106,7 @@ __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
|
||||
ret = op_unmap_cb(ops, priv, va, merge);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto next;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (end > req_end) {
|
||||
@ -1151,7 +1151,7 @@ __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
|
||||
ret = op_remap_cb(ops, priv, &p, NULL, &u);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto next;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (end > req_end) {
|
||||
@ -1184,7 +1184,7 @@ __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
|
||||
ret = op_unmap_cb(ops, priv, va, merge);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto next;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (end > req_end) {
|
||||
@ -1205,8 +1205,6 @@ __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
|
||||
break;
|
||||
}
|
||||
}
|
||||
next:
|
||||
prev = va;
|
||||
}
|
||||
|
||||
return op_map_cb(ops, priv,
|
||||
|
@ -69,16 +69,19 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
|
||||
}
|
||||
|
||||
void
|
||||
nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length)
|
||||
nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length,
|
||||
bool no_prefetch)
|
||||
{
|
||||
struct nvif_user *user = &chan->drm->client.device.user;
|
||||
struct nouveau_bo *pb = chan->push.buffer;
|
||||
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
|
||||
|
||||
BUG_ON(chan->dma.ib_free < 1);
|
||||
WARN_ON(length > NV50_DMA_PUSH_MAX_LENGTH);
|
||||
|
||||
nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
|
||||
nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
|
||||
nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8 |
|
||||
(no_prefetch ? (1 << 31) : 0));
|
||||
|
||||
chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
|
||||
|
||||
|
@ -31,7 +31,8 @@
|
||||
#include "nouveau_chan.h"
|
||||
|
||||
int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
|
||||
void nv50_dma_push(struct nouveau_channel *, u64 addr, int length);
|
||||
void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length,
|
||||
bool no_prefetch);
|
||||
|
||||
/*
|
||||
* There's a hw race condition where you can't jump to your PUT offset,
|
||||
@ -45,6 +46,9 @@ void nv50_dma_push(struct nouveau_channel *, u64 addr, int length);
|
||||
*/
|
||||
#define NOUVEAU_DMA_SKIPS (128 / 4)
|
||||
|
||||
/* Maximum push buffer size. */
|
||||
#define NV50_DMA_PUSH_MAX_LENGTH 0x7fffff
|
||||
|
||||
/* Object handles - for stuff that's doesn't use handle == oclass. */
|
||||
enum {
|
||||
NvDmaFB = 0x80000002,
|
||||
@ -89,7 +93,7 @@ FIRE_RING(struct nouveau_channel *chan)
|
||||
|
||||
if (chan->dma.ib_max) {
|
||||
nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2),
|
||||
(chan->dma.cur - chan->dma.put) << 2);
|
||||
(chan->dma.cur - chan->dma.put) << 2, false);
|
||||
} else {
|
||||
WRITE_PUT(chan->dma.cur);
|
||||
}
|
||||
|
@ -164,8 +164,10 @@ nouveau_exec_job_run(struct nouveau_job *job)
|
||||
}
|
||||
|
||||
for (i = 0; i < exec_job->push.count; i++) {
|
||||
nv50_dma_push(chan, exec_job->push.s[i].va,
|
||||
exec_job->push.s[i].va_len);
|
||||
struct drm_nouveau_exec_push *p = &exec_job->push.s[i];
|
||||
bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH;
|
||||
|
||||
nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
|
||||
}
|
||||
|
||||
ret = nouveau_fence_emit(fence, chan);
|
||||
@ -223,7 +225,18 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob,
|
||||
{
|
||||
struct nouveau_exec_job *job;
|
||||
struct nouveau_job_args args = {};
|
||||
int ret;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < __args->push.count; i++) {
|
||||
struct drm_nouveau_exec_push *p = &__args->push.s[i];
|
||||
|
||||
if (unlikely(p->va_len > NV50_DMA_PUSH_MAX_LENGTH)) {
|
||||
NV_PRINTK(err, nouveau_cli(__args->file_priv),
|
||||
"pushbuf size exceeds limit: 0x%x max 0x%x\n",
|
||||
p->va_len, NV50_DMA_PUSH_MAX_LENGTH);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL);
|
||||
if (!job)
|
||||
|
@ -856,9 +856,11 @@ revalidate:
|
||||
for (i = 0; i < req->nr_push; i++) {
|
||||
struct nouveau_vma *vma = (void *)(unsigned long)
|
||||
bo[push[i].bo_index].user_priv;
|
||||
u64 addr = vma->addr + push[i].offset;
|
||||
u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
|
||||
bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
|
||||
|
||||
nv50_dma_push(chan, vma->addr + push[i].offset,
|
||||
push[i].length);
|
||||
nv50_dma_push(chan, addr, length, no_prefetch);
|
||||
}
|
||||
} else
|
||||
if (drm->client.device.info.chipset >= 0x25) {
|
||||
|
@ -292,6 +292,28 @@ nouveau_job_submit(struct nouveau_job *job)
|
||||
if (job->sync)
|
||||
done_fence = dma_fence_get(job->done_fence);
|
||||
|
||||
/* If a sched job depends on a dma-fence from a job from the same GPU
|
||||
* scheduler instance, but a different scheduler entity, the GPU
|
||||
* scheduler does only wait for the particular job to be scheduled,
|
||||
* rather than for the job to fully complete. This is due to the GPU
|
||||
* scheduler assuming that there is a scheduler instance per ring.
|
||||
* However, the current implementation, in order to avoid arbitrary
|
||||
* amounts of kthreads, has a single scheduler instance while scheduler
|
||||
* entities represent rings.
|
||||
*
|
||||
* As a workaround, set the DRM_SCHED_FENCE_DONT_PIPELINE for all
|
||||
* out-fences in order to force the scheduler to wait for full job
|
||||
* completion for dependent jobs from different entities and same
|
||||
* scheduler instance.
|
||||
*
|
||||
* There is some work in progress [1] to address the issues of firmware
|
||||
* schedulers; once it is in-tree the scheduler topology in Nouveau
|
||||
* should be re-worked accordingly.
|
||||
*
|
||||
* [1] https://lore.kernel.org/dri-devel/20230801205103.627779-1-matthew.brost@intel.com/
|
||||
*/
|
||||
set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &job->done_fence->flags);
|
||||
|
||||
if (job->ops->armed_submit)
|
||||
job->ops->armed_submit(job);
|
||||
|
||||
|
@ -639,6 +639,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
|
||||
struct drm_gpuva *va = r->unmap->va;
|
||||
struct uvmm_map_args remap_args = {
|
||||
.kind = uvma_from_va(va)->kind,
|
||||
.region = uvma_from_va(va)->region,
|
||||
};
|
||||
u64 ustart = va->va.addr;
|
||||
u64 urange = va->va.range;
|
||||
|
@ -156,7 +156,7 @@ static void action_drm_release_context(void *ptr)
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_kunit_helper_context_alloc - Allocates an acquire context
|
||||
* drm_kunit_helper_acquire_ctx_alloc - Allocates an acquire context
|
||||
* @test: The test context object
|
||||
*
|
||||
* Allocates and initializes a modeset acquire context.
|
||||
|
@ -228,8 +228,8 @@ static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
|
||||
dma1 = tt->dma_address[0];
|
||||
dma2 = tt->dma_address[tt->num_pages - 1];
|
||||
|
||||
KUNIT_ASSERT_NOT_NULL(test, (void *)dma1);
|
||||
KUNIT_ASSERT_NOT_NULL(test, (void *)dma2);
|
||||
KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma1);
|
||||
KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma2);
|
||||
|
||||
ttm_pool_free(pool, tt);
|
||||
ttm_tt_fini(tt);
|
||||
|
@ -138,6 +138,7 @@ struct drm_nouveau_gem_pushbuf_push {
|
||||
__u32 pad;
|
||||
__u64 offset;
|
||||
__u64 length;
|
||||
#define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
|
||||
};
|
||||
|
||||
struct drm_nouveau_gem_pushbuf {
|
||||
@ -338,7 +339,12 @@ struct drm_nouveau_exec_push {
|
||||
/**
|
||||
* @va_len: the length of the push buffer mapping
|
||||
*/
|
||||
__u64 va_len;
|
||||
__u32 va_len;
|
||||
/**
|
||||
* @flags: the flags for this push buffer mapping
|
||||
*/
|
||||
__u32 flags;
|
||||
#define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
|
||||
};
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user