From: Liam Howlett <liam.howlett@oracle.com>
To: "linux-mm@kvack.org" <linux-mm@kvack.org>,
"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
Andrew Morton <akpm@linux-foundation.org>,
"maple-tree@lists.infradead.org" <maple-tree@lists.infradead.org>
Cc: Liam Howlett <liam.howlett@oracle.com>,
Liam Howlett <liam.howlett@oracle.com>
Subject: [PATCH v3 40/48] mm/mmap: Refactor locking out of __vma_adjust()
Date: Tue, 17 Jan 2023 02:34:22 +0000 [thread overview]
Message-ID: <20230117023335.1690727-41-Liam.Howlett@oracle.com> (raw)
In-Reply-To: <20230117023335.1690727-1-Liam.Howlett@oracle.com>
From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
Move the locking into vma_prepare() and vma_complete() for use elsewhere
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
mm/internal.h | 14 +++
mm/mmap.c | 231 +++++++++++++++++++++++++++++---------------------
2 files changed, 150 insertions(+), 95 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index 1c3eb70b7a7c..eccfc1ce1f49 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -913,4 +913,18 @@ static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
return 0;
}
+
+/*
+ * VMA lock generalization
+ */
+struct vma_prepare {
+ struct vm_area_struct *vma;
+ struct vm_area_struct *adj_next;
+ struct file *file;
+ struct address_space *mapping;
+ struct anon_vma *anon_vma;
+ struct vm_area_struct *insert;
+ struct vm_area_struct *remove;
+ struct vm_area_struct *remove2;
+};
#endif /* __MM_INTERNAL_H */
diff --git a/mm/mmap.c b/mm/mmap.c
index 90638fa48734..786d8181089f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -576,6 +576,127 @@ inline int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
return -ENOMEM;
}
+/*
+ * vma_prepare() - Helper function for handling locking VMAs prior to altering
+ * @vp: The initialized vma_prepare struct
+ */
+static inline void vma_prepare(struct vma_prepare *vp)
+{
+ if (vp->file) {
+ uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
+
+ if (vp->adj_next)
+ uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
+ vp->adj_next->vm_end);
+
+ i_mmap_lock_write(vp->mapping);
+ if (vp->insert && vp->insert->vm_file) {
+ /*
+ * Put into interval tree now, so instantiated pages
+ * are visible to arm/parisc __flush_dcache_page
+ * throughout; but we cannot insert into address
+ * space until vma start or end is updated.
+ */
+ __vma_link_file(vp->insert,
+ vp->insert->vm_file->f_mapping);
+ }
+ }
+
+ if (vp->anon_vma) {
+ anon_vma_lock_write(vp->anon_vma);
+ anon_vma_interval_tree_pre_update_vma(vp->vma);
+ if (vp->adj_next)
+ anon_vma_interval_tree_pre_update_vma(vp->adj_next);
+ }
+
+ if (vp->file) {
+ flush_dcache_mmap_lock(vp->mapping);
+ vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
+ if (vp->adj_next)
+ vma_interval_tree_remove(vp->adj_next,
+ &vp->mapping->i_mmap);
+ }
+
+}
+
+/*
+ * vma_complete- Helper function for handling the unlocking after altering VMAs,
+ * or for inserting a VMA.
+ *
+ * @vp: The vma_prepare struct
+ * @vmi: The vma iterator
+ * @mm: The mm_struct
+ */
+static inline void vma_complete(struct vma_prepare *vp,
+ struct vma_iterator *vmi, struct mm_struct *mm)
+{
+ if (vp->file) {
+ if (vp->adj_next)
+ vma_interval_tree_insert(vp->adj_next,
+ &vp->mapping->i_mmap);
+ vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
+ flush_dcache_mmap_unlock(vp->mapping);
+ }
+
+ if (vp->remove && vp->file) {
+ __remove_shared_vm_struct(vp->remove, vp->file, vp->mapping);
+ if (vp->remove2)
+ __remove_shared_vm_struct(vp->remove2, vp->file,
+ vp->mapping);
+ } else if (vp->insert) {
+ /*
+ * split_vma has split insert from vma, and needs
+ * us to insert it before dropping the locks
+ * (it may either follow vma or precede it).
+ */
+ vma_iter_store(vmi, vp->insert);
+ mm->map_count++;
+ }
+
+ if (vp->anon_vma) {
+ anon_vma_interval_tree_post_update_vma(vp->vma);
+ if (vp->adj_next)
+ anon_vma_interval_tree_post_update_vma(vp->adj_next);
+ anon_vma_unlock_write(vp->anon_vma);
+ }
+
+ if (vp->file) {
+ i_mmap_unlock_write(vp->mapping);
+ uprobe_mmap(vp->vma);
+
+ if (vp->adj_next)
+ uprobe_mmap(vp->adj_next);
+ }
+
+ if (vp->remove) {
+again:
+ if (vp->file) {
+ uprobe_munmap(vp->remove, vp->remove->vm_start,
+ vp->remove->vm_end);
+ fput(vp->file);
+ }
+ if (vp->remove->anon_vma)
+ anon_vma_merge(vp->vma, vp->remove);
+ mm->map_count--;
+ mpol_put(vma_policy(vp->remove));
+ if (!vp->remove2)
+ WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
+ vm_area_free(vp->remove);
+
+ /*
+ * In mprotect's case 6 (see comments on vma_merge),
+ * we must remove next_next too.
+ */
+ if (vp->remove2) {
+ vp->remove = vp->remove2;
+ vp->remove2 = NULL;
+ goto again;
+ }
+ }
+ if (vp->insert && vp->file)
+ uprobe_mmap(vp->insert);
+}
+
/*
* We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
* is already present in an i_mmap tree without adjusting the tree.
@@ -591,14 +712,13 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct vm_area_struct *next_next = NULL; /* uninit var warning */
struct vm_area_struct *next = find_vma(mm, vma->vm_end);
struct vm_area_struct *orig_vma = vma;
- struct address_space *mapping = NULL;
- struct rb_root_cached *root = NULL;
struct anon_vma *anon_vma = NULL;
struct file *file = vma->vm_file;
bool vma_changed = false;
long adjust_next = 0;
int remove_next = 0;
struct vm_area_struct *exporter = NULL, *importer = NULL;
+ struct vma_prepare vma_prep;
if (next && !insert) {
if (end >= next->vm_end) {
@@ -694,39 +814,22 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
anon_vma != next->anon_vma);
vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
- if (file) {
- mapping = file->f_mapping;
- root = &mapping->i_mmap;
- uprobe_munmap(vma, vma->vm_start, vma->vm_end);
-
- if (adjust_next)
- uprobe_munmap(next, next->vm_start, next->vm_end);
-
- i_mmap_lock_write(mapping);
- if (insert && insert->vm_file) {
- /*
- * Put into interval tree now, so instantiated pages
- * are visible to arm/parisc __flush_dcache_page
- * throughout; but we cannot insert into address
- * space until vma start or end is updated.
- */
- __vma_link_file(insert, insert->vm_file->f_mapping);
- }
- }
- if (anon_vma) {
- anon_vma_lock_write(anon_vma);
- anon_vma_interval_tree_pre_update_vma(vma);
- if (adjust_next)
- anon_vma_interval_tree_pre_update_vma(next);
+ memset(&vma_prep, 0, sizeof(vma_prep));
+ vma_prep.vma = vma;
+ vma_prep.anon_vma = anon_vma;
+ vma_prep.file = file;
+ if (adjust_next)
+ vma_prep.adj_next = next;
+ if (file)
+ vma_prep.mapping = file->f_mapping;
+ vma_prep.insert = insert;
+ if (remove_next) {
+ vma_prep.remove = next;
+ vma_prep.remove2 = next_next;
}
- if (file) {
- flush_dcache_mmap_lock(mapping);
- vma_interval_tree_remove(vma, root);
- if (adjust_next)
- vma_interval_tree_remove(next, root);
- }
+ vma_prepare(&vma_prep);
if (start != vma->vm_start) {
if (vma->vm_start < start) {
@@ -764,69 +867,7 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
vma_iter_store(vmi, next);
}
- if (file) {
- if (adjust_next)
- vma_interval_tree_insert(next, root);
- vma_interval_tree_insert(vma, root);
- flush_dcache_mmap_unlock(mapping);
- }
-
- if (remove_next && file) {
- __remove_shared_vm_struct(next, file, mapping);
- if (remove_next == 2)
- __remove_shared_vm_struct(next_next, file, mapping);
- } else if (insert) {
- /*
- * split_vma has split insert from vma, and needs
- * us to insert it before dropping the locks
- * (it may either follow vma or precede it).
- */
- vma_iter_store(vmi, insert);
- mm->map_count++;
- }
-
- if (anon_vma) {
- anon_vma_interval_tree_post_update_vma(vma);
- if (adjust_next)
- anon_vma_interval_tree_post_update_vma(next);
- anon_vma_unlock_write(anon_vma);
- }
-
- if (file) {
- i_mmap_unlock_write(mapping);
- uprobe_mmap(vma);
-
- if (adjust_next)
- uprobe_mmap(next);
- }
-
- if (remove_next) {
-again:
- if (file) {
- uprobe_munmap(next, next->vm_start, next->vm_end);
- fput(file);
- }
- if (next->anon_vma)
- anon_vma_merge(vma, next);
- mm->map_count--;
- mpol_put(vma_policy(next));
- if (remove_next != 2)
- BUG_ON(vma->vm_end < next->vm_end);
- vm_area_free(next);
-
- /*
- * In mprotect's case 6 (see comments on vma_merge),
- * we must remove next_next too.
- */
- if (remove_next == 2) {
- remove_next = 1;
- next = next_next;
- goto again;
- }
- }
- if (insert && file)
- uprobe_mmap(insert);
-
+ vma_complete(&vma_prep, vmi, mm);
vma_iter_free(vmi);
validate_mm(mm);
--
2.35.1
next prev parent reply other threads:[~2023-01-17 2:35 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-01-17 2:34 [PATCH v3 00/48] VMA tree type safety and remove __vma_adjust() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 01/48] maple_tree: Add mas_init() function Liam Howlett
2023-01-17 2:34 ` [PATCH v3 02/48] maple_tree: Fix potential rcu issue Liam Howlett
2023-01-17 2:34 ` [PATCH v3 04/48] test_maple_tree: Test modifications while iterating Liam Howlett
2023-01-17 2:34 ` [PATCH v3 03/48] maple_tree: Reduce user error potential Liam Howlett
2023-01-17 2:34 ` [PATCH v3 05/48] maple_tree: Fix handle of invalidated state in mas_wr_store_setup() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 07/48] mm: Expand vma iterator interface Liam Howlett
2023-01-17 2:34 ` [PATCH v3 06/48] maple_tree: Fix mas_prev() and mas_find() state handling Liam Howlett
2023-01-17 2:34 ` [PATCH v3 08/48] mm/mmap: convert brk to use vma iterator Liam Howlett
2023-01-17 2:34 ` [PATCH v3 09/48] kernel/fork: Convert forking to using the vmi iterator Liam Howlett
2023-01-17 2:34 ` [PATCH v3 11/48] mm/mmap: Remove preallocation from do_mas_align_munmap() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 12/48] mmap: Change do_mas_munmap and do_mas_aligned_munmap() to use vma iterator Liam Howlett
2023-01-17 2:34 ` [PATCH v3 10/48] mmap: Convert vma_link() " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 13/48] mmap: Convert vma_expand() to use " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 15/48] ipc/shm: Use the vma iterator for munmap calls Liam Howlett
2023-01-17 2:34 ` [PATCH v3 14/48] mm: Add temporary vma iterator versions of vma_merge(), split_vma(), and __split_vma() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 17/48] mm: Change mprotect_fixup to vma iterator Liam Howlett
2023-01-17 2:34 ` [PATCH v3 16/48] userfaultfd: Use " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 18/48] mlock: Convert mlock to " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 20/48] mempolicy: Convert " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 19/48] coredump: " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 21/48] task_mmu: " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 23/48] madvise: Use vmi iterator for __split_vma() and vma_merge() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 24/48] mmap: Pass through vmi iterator to __split_vma() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 22/48] sched: Convert to vma iterator Liam Howlett
2023-01-17 2:34 ` [PATCH v3 26/48] mm/mremap: Use vmi version of vma_merge() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 28/48] nommu: Pass through vma iterator to shrink_vma() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 27/48] nommu: Convert nommu to using the vma iterator Liam Howlett
2023-01-17 2:34 ` [PATCH v3 25/48] mmap: Use vmi version of vma_merge() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 30/48] mm/damon: Stop using vma_mas_store() for maple tree store Liam Howlett
2023-01-17 19:11 ` SeongJae Park
2023-01-17 19:16 ` SeongJae Park
2023-01-17 22:20 ` Daniel Latypov
2023-01-17 22:47 ` Liam Howlett
2023-01-19 2:00 ` SeongJae Park
2023-01-19 18:55 ` Liam R. Howlett
2023-01-17 2:34 ` [PATCH v3 29/48] mm: Switch vma_merge(), split_vma(), and __split_vma to vma iterator Liam Howlett
2023-01-17 2:34 ` [PATCH v3 31/48] mmap: Convert __vma_adjust() to use " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 32/48] mm: Pass through vma iterator to __vma_adjust() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 35/48] mm: Pass vma iterator through " Liam Howlett
2023-01-17 2:34 ` [PATCH v3 33/48] madvise: Use split_vma() instead of __split_vma() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 34/48] mm: Remove unnecessary write to vma iterator in __vma_adjust() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 37/48] mmap: Clean up mmap_region() unrolling Liam Howlett
2023-01-17 2:34 ` [PATCH v3 36/48] mm: Add vma iterator to vma_adjust() arguments Liam Howlett
2023-01-17 2:34 ` [PATCH v3 38/48] mm: Change munmap splitting order and move_vma() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 42/48] mm/mmap: Introduce init_vma_prep() and init_multi_vma_prep() Liam Howlett
2023-01-17 2:34 ` Liam Howlett [this message]
2023-01-17 2:34 ` [PATCH v3 41/48] mm/mmap: Use vma_prepare() and vma_complete() in vma_expand() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 39/48] mm/mmap: move anon_vma setting in __vma_adjust() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 44/48] mm/mmap: Don't use __vma_adjust() in shift_arg_pages() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 45/48] mm/mmap: Introduce dup_vma_anon() helper Liam Howlett
2023-01-17 2:34 ` [PATCH v3 43/48] mm: Don't use __vma_adjust() in __split_vma() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 46/48] mm/mmap: Convert do_brk_flags() to use vma_prepare() and vma_complete() Liam Howlett
2023-01-17 2:34 ` [PATCH v3 48/48] vma_merge: Set vma iterator to correct position Liam Howlett
2023-01-17 2:34 ` [PATCH v3 47/48] mm/mmap: Remove __vma_adjust() Liam Howlett
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230117023335.1690727-41-Liam.Howlett@oracle.com \
--to=liam.howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=maple-tree@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox