From: Jason Gunthorpe <jgg@ziepe.ca>
To: Jerome Glisse <jglisse@redhat.com>,
Ralph Campbell <rcampbell@nvidia.com>,
John Hubbard <jhubbard@nvidia.com>,
Felix.Kuehling@amd.com
Cc: linux-rdma@vger.kernel.org, linux-mm@kvack.org,
Andrea Arcangeli <aarcange@redhat.com>,
dri-devel@lists.freedesktop.org, amd-gfx@lists.freedesktop.org,
Ben Skeggs <bskeggs@redhat.com>,
Jason Gunthorpe <jgg@mellanox.com>,
nouveau@lists.freedesktop.org
Subject: [PATCH hmm 10/15] nouveau: use mmu_notifier directly for invalidate_range_start
Date: Tue, 15 Oct 2019 15:12:37 -0300 [thread overview]
Message-ID: <20191015181242.8343-11-jgg@ziepe.ca> (raw)
In-Reply-To: <20191015181242.8343-1-jgg@ziepe.ca>
From: Jason Gunthorpe <jgg@mellanox.com>
There is no reason to get the invalidate_range_start() callback via an
indirection through hmm_mirror, just register a normal notifier directly.
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: dri-devel@lists.freedesktop.org
Cc: nouveau@lists.freedesktop.org
Cc: Ralph Campbell <rcampbell@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
---
drivers/gpu/drm/nouveau/nouveau_svm.c | 95 ++++++++++++++++++---------
1 file changed, 63 insertions(+), 32 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 668d4bd0c118f1..577f8811925a59 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -88,6 +88,7 @@ nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
}
struct nouveau_svmm {
+ struct mmu_notifier notifier;
struct nouveau_vmm *vmm;
struct {
unsigned long start;
@@ -96,7 +97,6 @@ struct nouveau_svmm {
struct mutex mutex;
- struct mm_struct *mm;
struct hmm_mirror mirror;
};
@@ -251,10 +251,11 @@ nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
}
static int
-nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
- const struct mmu_notifier_range *update)
+nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
+ const struct mmu_notifier_range *update)
{
- struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror);
+ struct nouveau_svmm *svmm =
+ container_of(mn, struct nouveau_svmm, notifier);
unsigned long start = update->start;
unsigned long limit = update->end;
@@ -264,6 +265,9 @@ nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
mutex_lock(&svmm->mutex);
+ if (unlikely(!svmm->vmm))
+ goto out;
+
if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
if (start < svmm->unmanaged.start) {
nouveau_svmm_invalidate(svmm, start,
@@ -273,19 +277,31 @@ nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
}
nouveau_svmm_invalidate(svmm, start, limit);
+
+out:
mutex_unlock(&svmm->mutex);
return 0;
}
-static void
-nouveau_svmm_release(struct hmm_mirror *mirror)
+static void nouveau_svmm_free_notifier(struct mmu_notifier *mn)
+{
+ kfree(container_of(mn, struct nouveau_svmm, notifier));
+}
+
+static const struct mmu_notifier_ops nouveau_mn_ops = {
+ .invalidate_range_start = nouveau_svmm_invalidate_range_start,
+ .free_notifier = nouveau_svmm_free_notifier,
+};
+
+static int
+nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
+ const struct mmu_notifier_range *update)
{
+ return 0;
}
-static const struct hmm_mirror_ops
-nouveau_svmm = {
+static const struct hmm_mirror_ops nouveau_svmm = {
.sync_cpu_device_pagetables = nouveau_svmm_sync_cpu_device_pagetables,
- .release = nouveau_svmm_release,
};
void
@@ -294,7 +310,10 @@ nouveau_svmm_fini(struct nouveau_svmm **psvmm)
struct nouveau_svmm *svmm = *psvmm;
if (svmm) {
hmm_mirror_unregister(&svmm->mirror);
- kfree(*psvmm);
+ mutex_lock(&svmm->mutex);
+ svmm->vmm = NULL;
+ mutex_unlock(&svmm->mutex);
+ mmu_notifier_put(&svmm->notifier);
*psvmm = NULL;
}
}
@@ -320,7 +339,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
mutex_lock(&cli->mutex);
if (cli->svm.cli) {
ret = -EBUSY;
- goto done;
+ goto out_free;
}
/* Allocate a new GPU VMM that can support SVM (managed by the
@@ -335,24 +354,33 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
.fault_replay = true,
}, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
if (ret)
- goto done;
+ goto out_free;
- /* Enable HMM mirroring of CPU address-space to VMM. */
- svmm->mm = get_task_mm(current);
- down_write(&svmm->mm->mmap_sem);
+ down_write(¤t->mm->mmap_sem);
svmm->mirror.ops = &nouveau_svmm;
- ret = hmm_mirror_register(&svmm->mirror, svmm->mm);
- if (ret == 0) {
- cli->svm.svmm = svmm;
- cli->svm.cli = cli;
- }
- up_write(&svmm->mm->mmap_sem);
- mmput(svmm->mm);
+ ret = hmm_mirror_register(&svmm->mirror, current->mm);
+ if (ret)
+ goto out_mm_unlock;
-done:
+ svmm->notifier.ops = &nouveau_mn_ops;
+ ret = __mmu_notifier_register(&svmm->notifier, current->mm);
if (ret)
- nouveau_svmm_fini(&svmm);
+ goto out_hmm_unregister;
+ /* Note, ownership of svmm transfers to mmu_notifier */
+
+ cli->svm.svmm = svmm;
+ cli->svm.cli = cli;
+ up_write(¤t->mm->mmap_sem);
mutex_unlock(&cli->mutex);
+ return 0;
+
+out_hmm_unregister:
+ hmm_mirror_unregister(&svmm->mirror);
+out_mm_unlock:
+ up_write(¤t->mm->mmap_sem);
+out_free:
+ mutex_unlock(&cli->mutex);
+ kfree(svmm);
return ret;
}
@@ -494,12 +522,12 @@ nouveau_range_fault(struct nouveau_svmm *svmm, struct hmm_range *range)
ret = hmm_range_register(range, &svmm->mirror);
if (ret) {
- up_read(&svmm->mm->mmap_sem);
+ up_read(&svmm->notifier.mm->mmap_sem);
return (int)ret;
}
if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
- up_read(&svmm->mm->mmap_sem);
+ up_read(&svmm->notifier.mm->mmap_sem);
return -EBUSY;
}
@@ -507,7 +535,7 @@ nouveau_range_fault(struct nouveau_svmm *svmm, struct hmm_range *range)
if (ret <= 0) {
if (ret == 0)
ret = -EBUSY;
- up_read(&svmm->mm->mmap_sem);
+ up_read(&svmm->notifier.mm->mmap_sem);
hmm_range_unregister(range);
return ret;
}
@@ -587,12 +615,15 @@ nouveau_svm_fault(struct nvif_notify *notify)
args.i.p.version = 0;
for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
+ struct mm_struct *mm;
+
/* Cancel any faults from non-SVM channels. */
if (!(svmm = buffer->fault[fi]->svmm)) {
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
+ mm = svmm->notifier.mm;
/* We try and group handling of faults within a small
* window into a single update.
@@ -609,11 +640,11 @@ nouveau_svm_fault(struct nvif_notify *notify)
/* Intersect fault window with the CPU VMA, cancelling
* the fault if the address is invalid.
*/
- down_read(&svmm->mm->mmap_sem);
- vma = find_vma_intersection(svmm->mm, start, limit);
+ down_read(&mm->mmap_sem);
+ vma = find_vma_intersection(mm, start, limit);
if (!vma) {
SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
- up_read(&svmm->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
@@ -623,7 +654,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
if (buffer->fault[fi]->addr != start) {
SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
- up_read(&svmm->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
@@ -704,7 +735,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
NULL);
svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
- up_read(&svmm->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
}
/* Cancel any faults in the window whose pages didn't manage
--
2.23.0
next prev parent reply other threads:[~2019-10-15 18:16 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-10-15 18:12 [PATCH hmm 00/15] Consolidate the mmu notifier interval_tree and locking Jason Gunthorpe
2019-10-15 18:12 ` [PATCH hmm 01/15] mm/mmu_notifier: define the header pre-processor parts even if disabled Jason Gunthorpe
2019-10-21 18:32 ` Jerome Glisse
2019-10-15 18:12 ` [PATCH hmm 02/15] mm/mmu_notifier: add an interval tree notifier Jason Gunthorpe
2019-10-21 18:30 ` Jerome Glisse
2019-10-21 18:54 ` Jason Gunthorpe
2019-10-21 19:11 ` Jerome Glisse
2019-10-21 19:24 ` Jason Gunthorpe
2019-10-21 19:47 ` Jerome Glisse
2019-10-27 23:15 ` Jason Gunthorpe
2019-10-15 18:12 ` [PATCH hmm 03/15] mm/hmm: allow hmm_range to be used with a mmu_range_notifier or hmm_mirror Jason Gunthorpe
2019-10-21 18:33 ` Jerome Glisse
2019-10-15 18:12 ` [PATCH hmm 04/15] mm/hmm: define the pre-processor related parts of hmm.h even if disabled Jason Gunthorpe
2019-10-21 18:31 ` Jerome Glisse
2019-10-15 18:12 ` [PATCH hmm 05/15] RDMA/odp: Use mmu_range_notifier_insert() Jason Gunthorpe
2019-11-04 20:25 ` Jason Gunthorpe
2019-10-15 18:12 ` [PATCH hmm 06/15] RDMA/hfi1: Use mmu_range_notifier_inset for user_exp_rcv Jason Gunthorpe
2019-10-29 12:15 ` Dennis Dalessandro
2019-10-15 18:12 ` [PATCH hmm 07/15] drm/radeon: use mmu_range_notifier_insert Jason Gunthorpe
2019-10-15 18:12 ` [PATCH hmm 08/15] xen/gntdev: Use select for DMA_SHARED_BUFFER Jason Gunthorpe
2019-10-16 5:11 ` Jürgen Groß
2019-10-16 6:35 ` Oleksandr Andrushchenko
2019-10-21 19:12 ` Jason Gunthorpe
2019-10-28 6:25 ` [Xen-devel] " Oleksandr Andrushchenko
2019-10-15 18:12 ` [PATCH hmm 09/15] xen/gntdev: use mmu_range_notifier_insert Jason Gunthorpe
2019-10-15 18:12 ` Jason Gunthorpe [this message]
2019-10-15 18:12 ` [PATCH hmm 11/15] nouveau: use mmu_range_notifier instead of hmm_mirror Jason Gunthorpe
2019-10-15 18:12 ` [PATCH hmm 13/15] drm/amdgpu: Use mmu_range_insert " Jason Gunthorpe
2019-10-15 18:12 ` [PATCH hmm 14/15] drm/amdgpu: Use mmu_range_notifier " Jason Gunthorpe
2019-10-15 18:12 ` [PATCH hmm 15/15] mm/hmm: remove hmm_mirror and related Jason Gunthorpe
2019-10-21 18:38 ` Jerome Glisse
2019-10-21 18:57 ` Jason Gunthorpe
2019-10-21 19:19 ` Jerome Glisse
2019-10-16 8:58 ` [PATCH hmm 00/15] Consolidate the mmu notifier interval_tree and locking Christian König
2019-10-16 16:04 ` Jason Gunthorpe
2019-10-17 8:54 ` Christian König
2019-10-17 16:26 ` Yang, Philip
2019-10-17 16:47 ` Koenig, Christian
2019-10-18 20:36 ` Jason Gunthorpe
2019-10-20 14:21 ` Koenig, Christian
2019-10-21 13:57 ` Jason Gunthorpe
2019-10-21 14:28 ` Koenig, Christian
2019-10-21 15:12 ` Jason Gunthorpe
2019-10-22 7:57 ` Daniel Vetter
2019-10-22 15:01 ` Jason Gunthorpe
2019-10-23 9:08 ` Daniel Vetter
2019-10-23 9:32 ` Christian König
2019-10-23 16:52 ` Jerome Glisse
2019-10-23 17:24 ` Jason Gunthorpe
2019-10-24 2:16 ` Christoph Hellwig
2019-10-21 15:55 ` Dennis Dalessandro
2019-10-21 16:58 ` Jason Gunthorpe
2019-10-22 11:56 ` Dennis Dalessandro
2019-10-22 14:37 ` Jason Gunthorpe
2019-10-21 18:40 ` Jerome Glisse
2019-10-21 19:06 ` Jason Gunthorpe
2019-10-23 20:26 ` Jerome Glisse
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191015181242.8343-11-jgg@ziepe.ca \
--to=jgg@ziepe.ca \
--cc=Felix.Kuehling@amd.com \
--cc=aarcange@redhat.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=bskeggs@redhat.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=jgg@mellanox.com \
--cc=jglisse@redhat.com \
--cc=jhubbard@nvidia.com \
--cc=linux-mm@kvack.org \
--cc=linux-rdma@vger.kernel.org \
--cc=nouveau@lists.freedesktop.org \
--cc=rcampbell@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox