mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-18 17:54:13 +08:00
nouveau: use mmu_notifier directly for invalidate_range_start
There is no reason to get the invalidate_range_start() callback via an indirection through hmm_mirror, just register a normal notifier directly. Link: https://lore.kernel.org/r/20191112202231.3856-9-jgg@ziepe.ca Tested-by: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
3506ff69c3
commit
c625c274ee
@ -88,6 +88,7 @@ nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
|
||||
}
|
||||
|
||||
struct nouveau_svmm {
|
||||
struct mmu_notifier notifier;
|
||||
struct nouveau_vmm *vmm;
|
||||
struct {
|
||||
unsigned long start;
|
||||
@ -96,7 +97,6 @@ struct nouveau_svmm {
|
||||
|
||||
struct mutex mutex;
|
||||
|
||||
struct mm_struct *mm;
|
||||
struct hmm_mirror mirror;
|
||||
};
|
||||
|
||||
@ -251,10 +251,11 @@ nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
|
||||
const struct mmu_notifier_range *update)
|
||||
nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
|
||||
const struct mmu_notifier_range *update)
|
||||
{
|
||||
struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror);
|
||||
struct nouveau_svmm *svmm =
|
||||
container_of(mn, struct nouveau_svmm, notifier);
|
||||
unsigned long start = update->start;
|
||||
unsigned long limit = update->end;
|
||||
|
||||
@ -264,6 +265,9 @@ nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
|
||||
SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
|
||||
|
||||
mutex_lock(&svmm->mutex);
|
||||
if (unlikely(!svmm->vmm))
|
||||
goto out;
|
||||
|
||||
if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
|
||||
if (start < svmm->unmanaged.start) {
|
||||
nouveau_svmm_invalidate(svmm, start,
|
||||
@ -273,19 +277,31 @@ nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
|
||||
}
|
||||
|
||||
nouveau_svmm_invalidate(svmm, start, limit);
|
||||
|
||||
out:
|
||||
mutex_unlock(&svmm->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_svmm_release(struct hmm_mirror *mirror)
|
||||
static void nouveau_svmm_free_notifier(struct mmu_notifier *mn)
|
||||
{
|
||||
kfree(container_of(mn, struct nouveau_svmm, notifier));
|
||||
}
|
||||
|
||||
static const struct hmm_mirror_ops
|
||||
nouveau_svmm = {
|
||||
static const struct mmu_notifier_ops nouveau_mn_ops = {
|
||||
.invalidate_range_start = nouveau_svmm_invalidate_range_start,
|
||||
.free_notifier = nouveau_svmm_free_notifier,
|
||||
};
|
||||
|
||||
static int
|
||||
nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
|
||||
const struct mmu_notifier_range *update)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct hmm_mirror_ops nouveau_svmm = {
|
||||
.sync_cpu_device_pagetables = nouveau_svmm_sync_cpu_device_pagetables,
|
||||
.release = nouveau_svmm_release,
|
||||
};
|
||||
|
||||
void
|
||||
@ -294,7 +310,10 @@ nouveau_svmm_fini(struct nouveau_svmm **psvmm)
|
||||
struct nouveau_svmm *svmm = *psvmm;
|
||||
if (svmm) {
|
||||
hmm_mirror_unregister(&svmm->mirror);
|
||||
kfree(*psvmm);
|
||||
mutex_lock(&svmm->mutex);
|
||||
svmm->vmm = NULL;
|
||||
mutex_unlock(&svmm->mutex);
|
||||
mmu_notifier_put(&svmm->notifier);
|
||||
*psvmm = NULL;
|
||||
}
|
||||
}
|
||||
@ -320,7 +339,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
|
||||
mutex_lock(&cli->mutex);
|
||||
if (cli->svm.cli) {
|
||||
ret = -EBUSY;
|
||||
goto done;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/* Allocate a new GPU VMM that can support SVM (managed by the
|
||||
@ -335,24 +354,33 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
|
||||
.fault_replay = true,
|
||||
}, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
|
||||
if (ret)
|
||||
goto done;
|
||||
goto out_free;
|
||||
|
||||
/* Enable HMM mirroring of CPU address-space to VMM. */
|
||||
svmm->mm = get_task_mm(current);
|
||||
down_write(&svmm->mm->mmap_sem);
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
svmm->mirror.ops = &nouveau_svmm;
|
||||
ret = hmm_mirror_register(&svmm->mirror, svmm->mm);
|
||||
if (ret == 0) {
|
||||
cli->svm.svmm = svmm;
|
||||
cli->svm.cli = cli;
|
||||
}
|
||||
up_write(&svmm->mm->mmap_sem);
|
||||
mmput(svmm->mm);
|
||||
|
||||
done:
|
||||
ret = hmm_mirror_register(&svmm->mirror, current->mm);
|
||||
if (ret)
|
||||
nouveau_svmm_fini(&svmm);
|
||||
goto out_mm_unlock;
|
||||
|
||||
svmm->notifier.ops = &nouveau_mn_ops;
|
||||
ret = __mmu_notifier_register(&svmm->notifier, current->mm);
|
||||
if (ret)
|
||||
goto out_hmm_unregister;
|
||||
/* Note, ownership of svmm transfers to mmu_notifier */
|
||||
|
||||
cli->svm.svmm = svmm;
|
||||
cli->svm.cli = cli;
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
mutex_unlock(&cli->mutex);
|
||||
return 0;
|
||||
|
||||
out_hmm_unregister:
|
||||
hmm_mirror_unregister(&svmm->mirror);
|
||||
out_mm_unlock:
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
out_free:
|
||||
mutex_unlock(&cli->mutex);
|
||||
kfree(svmm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -494,12 +522,12 @@ nouveau_range_fault(struct nouveau_svmm *svmm, struct hmm_range *range)
|
||||
|
||||
ret = hmm_range_register(range, &svmm->mirror);
|
||||
if (ret) {
|
||||
up_read(&svmm->mm->mmap_sem);
|
||||
up_read(&svmm->notifier.mm->mmap_sem);
|
||||
return (int)ret;
|
||||
}
|
||||
|
||||
if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
|
||||
up_read(&svmm->mm->mmap_sem);
|
||||
up_read(&svmm->notifier.mm->mmap_sem);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
@ -507,7 +535,7 @@ nouveau_range_fault(struct nouveau_svmm *svmm, struct hmm_range *range)
|
||||
if (ret <= 0) {
|
||||
if (ret == 0)
|
||||
ret = -EBUSY;
|
||||
up_read(&svmm->mm->mmap_sem);
|
||||
up_read(&svmm->notifier.mm->mmap_sem);
|
||||
hmm_range_unregister(range);
|
||||
return ret;
|
||||
}
|
||||
@ -587,12 +615,15 @@ nouveau_svm_fault(struct nvif_notify *notify)
|
||||
args.i.p.version = 0;
|
||||
|
||||
for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
|
||||
struct mm_struct *mm;
|
||||
|
||||
/* Cancel any faults from non-SVM channels. */
|
||||
if (!(svmm = buffer->fault[fi]->svmm)) {
|
||||
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
|
||||
continue;
|
||||
}
|
||||
SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
|
||||
mm = svmm->notifier.mm;
|
||||
|
||||
/* We try and group handling of faults within a small
|
||||
* window into a single update.
|
||||
@ -609,11 +640,11 @@ nouveau_svm_fault(struct nvif_notify *notify)
|
||||
/* Intersect fault window with the CPU VMA, cancelling
|
||||
* the fault if the address is invalid.
|
||||
*/
|
||||
down_read(&svmm->mm->mmap_sem);
|
||||
vma = find_vma_intersection(svmm->mm, start, limit);
|
||||
down_read(&mm->mmap_sem);
|
||||
vma = find_vma_intersection(mm, start, limit);
|
||||
if (!vma) {
|
||||
SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
|
||||
up_read(&svmm->mm->mmap_sem);
|
||||
up_read(&mm->mmap_sem);
|
||||
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
|
||||
continue;
|
||||
}
|
||||
@ -623,7 +654,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
|
||||
|
||||
if (buffer->fault[fi]->addr != start) {
|
||||
SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
|
||||
up_read(&svmm->mm->mmap_sem);
|
||||
up_read(&mm->mmap_sem);
|
||||
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
|
||||
continue;
|
||||
}
|
||||
@ -704,7 +735,7 @@ again:
|
||||
NULL);
|
||||
svmm->vmm->vmm.object.client->super = false;
|
||||
mutex_unlock(&svmm->mutex);
|
||||
up_read(&svmm->mm->mmap_sem);
|
||||
up_read(&mm->mmap_sem);
|
||||
}
|
||||
|
||||
/* Cancel any faults in the window whose pages didn't manage
|
||||
|
Loading…
Reference in New Issue
Block a user