From: Suren Baghdasaryan <surenb@google.com>
To: akpm@linux-foundation.org
Cc: torvalds@linux-foundation.org, jannh@google.com,
willy@infradead.org, liam.howlett@oracle.com, david@redhat.com,
peterx@redhat.com, ldufour@linux.ibm.com, vbabka@suse.cz,
michel@lespinasse.org, jglisse@google.com, mhocko@suse.com,
hannes@cmpxchg.org, dave@stgolabs.net, hughd@google.com,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
stable@vger.kernel.org, Suren Baghdasaryan <surenb@google.com>,
Linus Torvalds <torvalds@linuxfoundation.org>
Subject: [PATCH 6/6] mm: move vma locking out of vma_prepare
Date: Mon, 31 Jul 2023 10:12:32 -0700 [thread overview]
Message-ID: <20230731171233.1098105-7-surenb@google.com> (raw)
In-Reply-To: <20230731171233.1098105-1-surenb@google.com>
vma_prepare() is currently the central place where vmas are being locked
before vma_complete() applies changes to them. While this is convenient,
it also obscures vma locking and makes it hard to follow the locking rules.
Move vma locking out of vma_prepare() and take vma locks explicitly at the
locations where vmas are being modified.
Suggested-by: Linus Torvalds <torvalds@linuxfoundation.org>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
mm/mmap.c | 26 ++++++++++++++++----------
1 file changed, 16 insertions(+), 10 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c
index 850a39dee075..e59d83cb1d7a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -476,16 +476,6 @@ static inline void init_vma_prep(struct vma_prepare *vp,
*/
static inline void vma_prepare(struct vma_prepare *vp)
{
- vma_start_write(vp->vma);
- if (vp->adj_next)
- vma_start_write(vp->adj_next);
- if (vp->insert)
- vma_start_write(vp->insert);
- if (vp->remove)
- vma_start_write(vp->remove);
- if (vp->remove2)
- vma_start_write(vp->remove2);
-
if (vp->file) {
uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
@@ -650,6 +640,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
bool remove_next = false;
struct vma_prepare vp;
+ vma_start_write(vma);
if (next && (vma != next) && (end == next->vm_end)) {
int ret;
@@ -657,6 +648,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
ret = dup_anon_vma(vma, next);
if (ret)
return ret;
+ vma_start_write(next);
}
init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL);
@@ -708,6 +700,8 @@ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
if (vma_iter_prealloc(vmi))
return -ENOMEM;
+ vma_start_write(vma);
+
init_vma_prep(&vp, vma);
vma_prepare(&vp);
vma_adjust_trans_huge(vma, start, end, 0);
@@ -946,10 +940,12 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
/* Can we merge both the predecessor and the successor? */
if (merge_prev && merge_next &&
is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
+ vma_start_write(next);
remove = next; /* case 1 */
vma_end = next->vm_end;
err = dup_anon_vma(prev, next);
if (curr) { /* case 6 */
+ vma_start_write(curr);
remove = curr;
remove2 = next;
if (!next->anon_vma)
@@ -958,6 +954,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
} else if (merge_prev) { /* case 2 */
if (curr) {
err = dup_anon_vma(prev, curr);
+ vma_start_write(curr);
if (end == curr->vm_end) { /* case 7 */
remove = curr;
} else { /* case 5 */
@@ -969,6 +966,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
res = next;
if (prev && addr < prev->vm_end) { /* case 4 */
vma_end = addr;
+ vma_start_write(next);
adjust = next;
adj_start = -(prev->vm_end - addr);
err = dup_anon_vma(next, prev);
@@ -983,6 +981,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_pgoff = next->vm_pgoff - pglen;
if (curr) { /* case 8 */
vma_pgoff = curr->vm_pgoff;
+ vma_start_write(curr);
remove = curr;
err = dup_anon_vma(next, curr);
}
@@ -996,6 +995,8 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
if (vma_iter_prealloc(vmi))
return NULL;
+ vma_start_write(vma);
+
init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
vp.anon_vma != adjust->anon_vma);
@@ -2373,6 +2374,9 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
+ vma_start_write(vma);
+ vma_start_write(new);
+
init_vma_prep(&vp, vma);
vp.insert = new;
vma_prepare(&vp);
@@ -3078,6 +3082,8 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
if (vma_iter_prealloc(vmi))
goto unacct_fail;
+ vma_start_write(vma);
+
init_vma_prep(&vp, vma);
vma_prepare(&vp);
vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
--
2.41.0.487.g6d72f3e995-goog
next prev parent reply other threads:[~2023-07-31 17:12 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-31 17:12 [PATCH 0/6] make vma locking more obvious Suren Baghdasaryan
2023-07-31 17:12 ` [PATCH 1/6] mm: enable page walking API to lock vmas during the walk Suren Baghdasaryan
2023-07-31 18:02 ` Linus Torvalds
2023-07-31 19:30 ` Suren Baghdasaryan
2023-07-31 19:33 ` Linus Torvalds
2023-07-31 20:24 ` Suren Baghdasaryan
2023-08-01 20:28 ` Suren Baghdasaryan
2023-08-01 21:34 ` Peter Xu
2023-08-01 21:46 ` Suren Baghdasaryan
2023-08-01 22:13 ` Suren Baghdasaryan
2023-07-31 17:12 ` [PATCH 2/6] mm: for !CONFIG_PER_VMA_LOCK equate write lock assertion for vma and mmap Suren Baghdasaryan
2023-07-31 17:12 ` [PATCH 3/6] mm: replace mmap with vma write lock assertions when operating on a vma Suren Baghdasaryan
2023-07-31 17:12 ` [PATCH 4/6] mm: lock vma explicitly before doing vm_flags_reset and vm_flags_reset_once Suren Baghdasaryan
2023-07-31 17:12 ` [PATCH 5/6] mm: always lock new vma before inserting into vma tree Suren Baghdasaryan
2023-07-31 17:12 ` Suren Baghdasaryan [this message]
2023-07-31 20:30 ` [PATCH 6/6] mm: move vma locking out of vma_prepare Liam R. Howlett
2023-07-31 20:38 ` Suren Baghdasaryan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230731171233.1098105-7-surenb@google.com \
--to=surenb@google.com \
--cc=akpm@linux-foundation.org \
--cc=dave@stgolabs.net \
--cc=david@redhat.com \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=jannh@google.com \
--cc=jglisse@google.com \
--cc=ldufour@linux.ibm.com \
--cc=liam.howlett@oracle.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.com \
--cc=michel@lespinasse.org \
--cc=peterx@redhat.com \
--cc=stable@vger.kernel.org \
--cc=torvalds@linux-foundation.org \
--cc=torvalds@linuxfoundation.org \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox