From: Liam Howlett <liam.howlett@oracle.com>
To: "linux-mm@kvack.org" <linux-mm@kvack.org>,
"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
Andrew Morton <akpm@linux-foundation.org>,
"maple-tree@lists.infradead.org" <maple-tree@lists.infradead.org>
Cc: Liam Howlett <liam.howlett@oracle.com>,
Liam Howlett <liam.howlett@oracle.com>
Subject: [PATCH v3 16/48] userfaultfd: Use vma iterator
Date: Tue, 17 Jan 2023 02:34:15 +0000 [thread overview]
Message-ID: <20230117023335.1690727-17-Liam.Howlett@oracle.com> (raw)
In-Reply-To: <20230117023335.1690727-1-Liam.Howlett@oracle.com>
From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
Use the vma iterator so that the iterator can be invalidated or updated
to avoid each caller doing so.
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
fs/userfaultfd.c | 87 ++++++++++++++++++------------------------------
1 file changed, 33 insertions(+), 54 deletions(-)
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 98ac37e34e3d..8aba0afb9b43 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -857,7 +857,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
/* len == 0 means wake all */
struct userfaultfd_wake_range range = { .len = 0, };
unsigned long new_flags;
- MA_STATE(mas, &mm->mm_mt, 0, 0);
+ VMA_ITERATOR(vmi, mm, 0);
WRITE_ONCE(ctx->released, true);
@@ -874,7 +874,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
*/
mmap_write_lock(mm);
prev = NULL;
- mas_for_each(&mas, vma, ULONG_MAX) {
+ for_each_vma(vmi, vma) {
cond_resched();
BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
!!(vma->vm_flags & __VM_UFFD_FLAGS));
@@ -883,13 +883,12 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
continue;
}
new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
- prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
+ prev = vmi_vma_merge(&vmi, mm, prev, vma->vm_start, vma->vm_end,
new_flags, vma->anon_vma,
vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
NULL_VM_UFFD_CTX, anon_vma_name(vma));
if (prev) {
- mas_pause(&mas);
vma = prev;
} else {
prev = vma;
@@ -1276,7 +1275,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
bool found;
bool basic_ioctls;
unsigned long start, end, vma_end;
- MA_STATE(mas, &mm->mm_mt, 0, 0);
+ struct vma_iterator vmi;
user_uffdio_register = (struct uffdio_register __user *) arg;
@@ -1318,17 +1317,13 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
if (!mmget_not_zero(mm))
goto out;
+ ret = -EINVAL;
mmap_write_lock(mm);
- mas_set(&mas, start);
- vma = mas_find(&mas, ULONG_MAX);
+ vma_iter_init(&vmi, mm, start);
+ vma = vma_find(&vmi, end);
if (!vma)
goto out_unlock;
- /* check that there's at least one vma in the range */
- ret = -EINVAL;
- if (vma->vm_start >= end)
- goto out_unlock;
-
/*
* If the first vma contains huge pages, make sure start address
* is aligned to huge page size.
@@ -1345,7 +1340,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
*/
found = false;
basic_ioctls = false;
- for (cur = vma; cur; cur = mas_next(&mas, end - 1)) {
+ cur = vma;
+ do {
cond_resched();
BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
@@ -1402,16 +1398,14 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
basic_ioctls = true;
found = true;
- }
+ } for_each_vma_range(vmi, cur, end);
BUG_ON(!found);
- mas_set(&mas, start);
- prev = mas_prev(&mas, 0);
- if (prev != vma)
- mas_next(&mas, ULONG_MAX);
+ vma_iter_set(&vmi, start);
+ prev = vma_prev(&vmi);
ret = 0;
- do {
+ for_each_vma_range(vmi, vma, end) {
cond_resched();
BUG_ON(!vma_can_userfault(vma, vm_flags));
@@ -1432,30 +1426,25 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
vma_end = min(end, vma->vm_end);
new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
- prev = vma_merge(mm, prev, start, vma_end, new_flags,
+ prev = vmi_vma_merge(&vmi, mm, prev, start, vma_end, new_flags,
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
((struct vm_userfaultfd_ctx){ ctx }),
anon_vma_name(vma));
if (prev) {
/* vma_merge() invalidated the mas */
- mas_pause(&mas);
vma = prev;
goto next;
}
if (vma->vm_start < start) {
- ret = split_vma(mm, vma, start, 1);
+ ret = vmi_split_vma(&vmi, mm, vma, start, 1);
if (ret)
break;
- /* split_vma() invalidated the mas */
- mas_pause(&mas);
}
if (vma->vm_end > end) {
- ret = split_vma(mm, vma, end, 0);
+ ret = vmi_split_vma(&vmi, mm, vma, end, 0);
if (ret)
break;
- /* split_vma() invalidated the mas */
- mas_pause(&mas);
}
next:
/*
@@ -1472,8 +1461,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
skip:
prev = vma;
start = vma->vm_end;
- vma = mas_next(&mas, end - 1);
- } while (vma);
+ }
+
out_unlock:
mmap_write_unlock(mm);
mmput(mm);
@@ -1517,7 +1506,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
bool found;
unsigned long start, end, vma_end;
const void __user *buf = (void __user *)arg;
- MA_STATE(mas, &mm->mm_mt, 0, 0);
+ struct vma_iterator vmi;
ret = -EFAULT;
if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
@@ -1536,14 +1525,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
goto out;
mmap_write_lock(mm);
- mas_set(&mas, start);
- vma = mas_find(&mas, ULONG_MAX);
- if (!vma)
- goto out_unlock;
-
- /* check that there's at least one vma in the range */
ret = -EINVAL;
- if (vma->vm_start >= end)
+ vma_iter_init(&vmi, mm, start);
+ vma = vma_find(&vmi, end);
+ if (!vma)
goto out_unlock;
/*
@@ -1561,8 +1546,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
* Search for not compatible vmas.
*/
found = false;
- ret = -EINVAL;
- for (cur = vma; cur; cur = mas_next(&mas, end - 1)) {
+ cur = vma;
+ do {
cond_resched();
BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
@@ -1579,16 +1564,13 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
goto out_unlock;
found = true;
- }
+ } for_each_vma_range(vmi, cur, end);
BUG_ON(!found);
- mas_set(&mas, start);
- prev = mas_prev(&mas, 0);
- if (prev != vma)
- mas_next(&mas, ULONG_MAX);
-
+ vma_iter_set(&vmi, start);
+ prev = vma_prev(&vmi);
ret = 0;
- do {
+ for_each_vma_range(vmi, vma, end) {
cond_resched();
BUG_ON(!vma_can_userfault(vma, vma->vm_flags));
@@ -1624,26 +1606,23 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
uffd_wp_range(mm, vma, start, vma_end - start, false);
new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
- prev = vma_merge(mm, prev, start, vma_end, new_flags,
+ prev = vmi_vma_merge(&vmi, mm, prev, start, vma_end, new_flags,
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
NULL_VM_UFFD_CTX, anon_vma_name(vma));
if (prev) {
vma = prev;
- mas_pause(&mas);
goto next;
}
if (vma->vm_start < start) {
- ret = split_vma(mm, vma, start, 1);
+ ret = vmi_split_vma(&vmi, mm, vma, start, 1);
if (ret)
break;
- mas_pause(&mas);
}
if (vma->vm_end > end) {
- ret = split_vma(mm, vma, end, 0);
+ ret = vmi_split_vma(&vmi, mm, vma, end, 0);
if (ret)
break;
- mas_pause(&mas);
}
next:
/*
@@ -1657,8 +1636,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
skip:
prev = vma;
start = vma->vm_end;
- vma = mas_next(&mas, end - 1);
- } while (vma);
+ }
+
out_unlock:
mmap_write_unlock(mm);
mmput(mm);
--
2.35.1
next prev parent reply other threads:[~2023-01-17 2:34 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-01-17 2:34 [PATCH v3 00/48] VMA tree type safety and remove __vma_adjust() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 01/48] maple_tree: Add mas_init() function Liam Howlett
2023-01-17 2:34 ` [PATCH v3 03/48] maple_tree: Reduce user error potential Liam Howlett
2023-01-17 2:34 ` [PATCH v3 02/48] maple_tree: Fix potential rcu issue Liam Howlett
2023-01-17 2:34 ` [PATCH v3 04/48] test_maple_tree: Test modifications while iterating Liam Howlett
2023-01-17 2:34 ` [PATCH v3 05/48] maple_tree: Fix handle of invalidated state in mas_wr_store_setup() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 07/48] mm: Expand vma iterator interface Liam Howlett
2023-01-17 2:34 ` [PATCH v3 06/48] maple_tree: Fix mas_prev() and mas_find() state handling Liam Howlett
2023-01-17 2:34 ` [PATCH v3 08/48] mm/mmap: convert brk to use vma iterator Liam Howlett
2023-01-17 2:34 ` [PATCH v3 09/48] kernel/fork: Convert forking to using the vmi iterator Liam Howlett
2023-01-17 2:34 ` [PATCH v3 10/48] mmap: Convert vma_link() vma iterator Liam Howlett
2023-01-17 2:34 ` [PATCH v3 12/48] mmap: Change do_mas_munmap and do_mas_aligned_munmap() to use " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 11/48] mm/mmap: Remove preallocation from do_mas_align_munmap() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 14/48] mm: Add temporary vma iterator versions of vma_merge(), split_vma(), and __split_vma() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 13/48] mmap: Convert vma_expand() to use vma iterator Liam Howlett
2023-01-17 2:34 ` [PATCH v3 15/48] ipc/shm: Use the vma iterator for munmap calls Liam Howlett
2023-01-17 2:34 ` Liam Howlett [this message]
2023-01-17 2:34 ` [PATCH v3 18/48] mlock: Convert mlock to vma iterator Liam Howlett
2023-01-17 2:34 ` [PATCH v3 17/48] mm: Change mprotect_fixup " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 21/48] task_mmu: Convert " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 20/48] mempolicy: " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 19/48] coredump: " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 24/48] mmap: Pass through vmi iterator to __split_vma() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 23/48] madvise: Use vmi iterator for __split_vma() and vma_merge() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 22/48] sched: Convert to vma iterator Liam Howlett
2023-01-17 2:34 ` [PATCH v3 25/48] mmap: Use vmi version of vma_merge() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 27/48] nommu: Convert nommu to using the vma iterator Liam Howlett
2023-01-17 2:34 ` [PATCH v3 28/48] nommu: Pass through vma iterator to shrink_vma() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 26/48] mm/mremap: Use vmi version of vma_merge() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 30/48] mm/damon: Stop using vma_mas_store() for maple tree store Liam Howlett
2023-01-17 19:11 ` SeongJae Park
2023-01-17 19:16 ` SeongJae Park
2023-01-17 22:20 ` Daniel Latypov
2023-01-17 22:47 ` Liam Howlett
2023-01-19 2:00 ` SeongJae Park
2023-01-19 18:55 ` Liam R. Howlett
2023-01-17 2:34 ` [PATCH v3 29/48] mm: Switch vma_merge(), split_vma(), and __split_vma to vma iterator Liam Howlett
2023-01-17 2:34 ` [PATCH v3 31/48] mmap: Convert __vma_adjust() to use " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 34/48] mm: Remove unnecessary write to vma iterator in __vma_adjust() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 33/48] madvise: Use split_vma() instead of __split_vma() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 35/48] mm: Pass vma iterator through to __vma_adjust() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 32/48] mm: Pass through vma iterator " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 38/48] mm: Change munmap splitting order and move_vma() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 36/48] mm: Add vma iterator to vma_adjust() arguments Liam Howlett
2023-01-17 2:34 ` [PATCH v3 37/48] mmap: Clean up mmap_region() unrolling Liam Howlett
2023-01-17 2:34 ` [PATCH v3 41/48] mm/mmap: Use vma_prepare() and vma_complete() in vma_expand() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 39/48] mm/mmap: move anon_vma setting in __vma_adjust() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 42/48] mm/mmap: Introduce init_vma_prep() and init_multi_vma_prep() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 40/48] mm/mmap: Refactor locking out of __vma_adjust() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 43/48] mm: Don't use __vma_adjust() in __split_vma() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 44/48] mm/mmap: Don't use __vma_adjust() in shift_arg_pages() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 45/48] mm/mmap: Introduce dup_vma_anon() helper Liam Howlett
2023-01-17 2:34 ` [PATCH v3 47/48] mm/mmap: Remove __vma_adjust() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 46/48] mm/mmap: Convert do_brk_flags() to use vma_prepare() and vma_complete() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 48/48] vma_merge: Set vma iterator to correct position Liam Howlett
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230117023335.1690727-17-Liam.Howlett@oracle.com \
--to=liam.howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=maple-tree@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox