From: Suren Baghdasaryan <surenb@google.com>
To: akpm@linux-foundation.org
Cc: peterz@infradead.org, willy@infradead.org,
liam.howlett@oracle.com, lorenzo.stoakes@oracle.com,
david.laight.linux@gmail.com, mhocko@suse.com, vbabka@suse.cz,
hannes@cmpxchg.org, mjguzik@gmail.com, oliver.sang@intel.com,
mgorman@techsingularity.net, david@redhat.com,
peterx@redhat.com, oleg@redhat.com, dave@stgolabs.net,
paulmck@kernel.org, brauner@kernel.org, dhowells@redhat.com,
hdanton@sina.com, hughd@google.com, lokeshgidra@google.com,
minchan@google.com, jannh@google.com, shakeel.butt@linux.dev,
souravpanda@google.com, pasha.tatashin@soleen.com,
klarasmodin@gmail.com, richard.weiyang@gmail.com,
corbet@lwn.net, linux-doc@vger.kernel.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org, kernel-team@android.com,
surenb@google.com, "Liam R. Howlett" <Liam.Howlett@Oracle.com>
Subject: [PATCH v9 03/17] mm: mark vma as detached until it's added into vma tree
Date: Fri, 10 Jan 2025 20:25:50 -0800 [thread overview]
Message-ID: <20250111042604.3230628-4-surenb@google.com> (raw)
In-Reply-To: <20250111042604.3230628-1-surenb@google.com>
Current implementation does not set detached flag when a VMA is first
allocated. This does not represent the real state of the VMA, which is
detached until it is added into mm's VMA tree. Fix this by marking new
VMAs as detached and resetting detached flag only after VMA is added into
a tree.
Introduce vma_mark_attached() to make the API more readable and to
simplify possible future cleanup when vma->vm_mm might be used to indicate
detached vma and vma_mark_attached() will need an additional mm parameter.
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
---
include/linux/mm.h | 27 ++++++++++++++++++++-------
kernel/fork.c | 4 ++++
mm/memory.c | 2 +-
mm/vma.c | 6 +++---
mm/vma.h | 2 ++
tools/testing/vma/vma_internal.h | 17 ++++++++++++-----
6 files changed, 42 insertions(+), 16 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ed739406b0a7..2b322871da87 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -821,12 +821,21 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
vma_assert_write_locked(vma);
}
-static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
+static inline void vma_mark_attached(struct vm_area_struct *vma)
+{
+ vma->detached = false;
+}
+
+static inline void vma_mark_detached(struct vm_area_struct *vma)
{
/* When detaching vma should be write-locked */
- if (detached)
- vma_assert_write_locked(vma);
- vma->detached = detached;
+ vma_assert_write_locked(vma);
+ vma->detached = true;
+}
+
+static inline bool is_vma_detached(struct vm_area_struct *vma)
+{
+ return vma->detached;
}
static inline void release_fault_lock(struct vm_fault *vmf)
@@ -857,8 +866,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
static inline void vma_start_write(struct vm_area_struct *vma) {}
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{ mmap_assert_write_locked(vma->vm_mm); }
-static inline void vma_mark_detached(struct vm_area_struct *vma,
- bool detached) {}
+static inline void vma_mark_attached(struct vm_area_struct *vma) {}
+static inline void vma_mark_detached(struct vm_area_struct *vma) {}
static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address)
@@ -891,7 +900,10 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
vma->vm_mm = mm;
vma->vm_ops = &vma_dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma_mark_detached(vma, false);
+#ifdef CONFIG_PER_VMA_LOCK
+ /* vma is not locked, can't use vma_mark_detached() */
+ vma->detached = true;
+#endif
vma_numab_state_init(vma);
vma_lock_init(vma);
}
@@ -1086,6 +1098,7 @@ static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
+ vma_mark_attached(vma);
return 0;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 40a8e615499f..f2f9e7b427ad 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -465,6 +465,10 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
data_race(memcpy(new, orig, sizeof(*new)));
vma_lock_init(new);
INIT_LIST_HEAD(&new->anon_vma_chain);
+#ifdef CONFIG_PER_VMA_LOCK
+ /* vma is not locked, can't use vma_mark_detached() */
+ new->detached = true;
+#endif
vma_numab_state_init(new);
dup_anon_vma_name(orig, new);
diff --git a/mm/memory.c b/mm/memory.c
index 2a20e3810534..d0dee2282325 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -6349,7 +6349,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
goto inval;
/* Check if the VMA got isolated after we found it */
- if (vma->detached) {
+ if (is_vma_detached(vma)) {
vma_end_read(vma);
count_vm_vma_lock_event(VMA_LOCK_MISS);
/* The area was replaced with another one */
diff --git a/mm/vma.c b/mm/vma.c
index af1d549b179c..d603494e69d7 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -327,7 +327,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
if (vp->remove) {
again:
- vma_mark_detached(vp->remove, true);
+ vma_mark_detached(vp->remove);
if (vp->file) {
uprobe_munmap(vp->remove, vp->remove->vm_start,
vp->remove->vm_end);
@@ -1221,7 +1221,7 @@ static void reattach_vmas(struct ma_state *mas_detach)
mas_set(mas_detach, 0);
mas_for_each(mas_detach, vma, ULONG_MAX)
- vma_mark_detached(vma, false);
+ vma_mark_attached(vma);
__mt_destroy(mas_detach->tree);
}
@@ -1296,7 +1296,7 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
if (error)
goto munmap_gather_failed;
- vma_mark_detached(next, true);
+ vma_mark_detached(next);
nrpages = vma_pages(next);
vms->nr_pages += nrpages;
diff --git a/mm/vma.h b/mm/vma.h
index a2e8710b8c47..2a2668de8d2c 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -157,6 +157,7 @@ static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
+ vma_mark_attached(vma);
return 0;
}
@@ -389,6 +390,7 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
mas_store_prealloc(&vmi->mas, vma);
+ vma_mark_attached(vma);
}
static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
index 96aeb28c81f9..47c8b03ffbbd 100644
--- a/tools/testing/vma/vma_internal.h
+++ b/tools/testing/vma/vma_internal.h
@@ -469,13 +469,17 @@ static inline void vma_lock_init(struct vm_area_struct *vma)
vma->vm_lock_seq = UINT_MAX;
}
+static inline void vma_mark_attached(struct vm_area_struct *vma)
+{
+ vma->detached = false;
+}
+
static inline void vma_assert_write_locked(struct vm_area_struct *);
-static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
+static inline void vma_mark_detached(struct vm_area_struct *vma)
{
/* When detaching vma should be write-locked */
- if (detached)
- vma_assert_write_locked(vma);
- vma->detached = detached;
+ vma_assert_write_locked(vma);
+ vma->detached = true;
}
extern const struct vm_operations_struct vma_dummy_vm_ops;
@@ -488,7 +492,8 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
vma->vm_mm = mm;
vma->vm_ops = &vma_dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma_mark_detached(vma, false);
+ /* vma is not locked, can't use vma_mark_detached() */
+ vma->detached = true;
vma_lock_init(vma);
}
@@ -514,6 +519,8 @@ static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
memcpy(new, orig, sizeof(*new));
vma_lock_init(new);
INIT_LIST_HEAD(&new->anon_vma_chain);
+ /* vma is not locked, can't use vma_mark_detached() */
+ new->detached = true;
return new;
}
--
2.47.1.613.gc27f4b7a9f-goog
next prev parent reply other threads:[~2025-01-11 4:26 UTC|newest]
Thread overview: 140+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-11 4:25 [PATCH v9 00/17] reimplement per-vma lock as a refcount Suren Baghdasaryan
2025-01-11 4:25 ` [PATCH v9 01/17] mm: introduce vma_start_read_locked{_nested} helpers Suren Baghdasaryan
2025-01-11 4:25 ` [PATCH v9 02/17] mm: move per-vma lock into vm_area_struct Suren Baghdasaryan
2025-01-11 4:25 ` Suren Baghdasaryan [this message]
2025-01-11 4:25 ` [PATCH v9 04/17] mm: introduce vma_iter_store_attached() to use with attached vmas Suren Baghdasaryan
2025-01-13 11:58 ` Lorenzo Stoakes
2025-01-13 16:31 ` Suren Baghdasaryan
2025-01-13 16:44 ` Lorenzo Stoakes
2025-01-13 16:47 ` Lorenzo Stoakes
2025-01-13 19:09 ` Suren Baghdasaryan
2025-01-14 11:38 ` Lorenzo Stoakes
2025-01-11 4:25 ` [PATCH v9 05/17] mm: mark vmas detached upon exit Suren Baghdasaryan
2025-01-13 12:05 ` Lorenzo Stoakes
2025-01-13 17:02 ` Suren Baghdasaryan
2025-01-13 17:13 ` Lorenzo Stoakes
2025-01-13 19:11 ` Suren Baghdasaryan
2025-01-13 20:32 ` Vlastimil Babka
2025-01-13 20:42 ` Suren Baghdasaryan
2025-01-14 11:36 ` Lorenzo Stoakes
2025-01-11 4:25 ` [PATCH v9 06/17] types: move struct rcuwait into types.h Suren Baghdasaryan
2025-01-13 14:46 ` Lorenzo Stoakes
2025-01-11 4:25 ` [PATCH v9 07/17] mm: allow vma_start_read_locked/vma_start_read_locked_nested to fail Suren Baghdasaryan
2025-01-13 15:25 ` Lorenzo Stoakes
2025-01-13 17:53 ` Suren Baghdasaryan
2025-01-14 11:48 ` Lorenzo Stoakes
2025-01-11 4:25 ` [PATCH v9 08/17] mm: move mmap_init_lock() out of the header file Suren Baghdasaryan
2025-01-13 15:27 ` Lorenzo Stoakes
2025-01-13 17:53 ` Suren Baghdasaryan
2025-01-11 4:25 ` [PATCH v9 09/17] mm: uninline the main body of vma_start_write() Suren Baghdasaryan
2025-01-13 15:52 ` Lorenzo Stoakes
2025-01-11 4:25 ` [PATCH v9 10/17] refcount: introduce __refcount_{add|inc}_not_zero_limited Suren Baghdasaryan
2025-01-11 6:31 ` Hillf Danton
2025-01-11 9:59 ` Suren Baghdasaryan
2025-01-11 10:00 ` Suren Baghdasaryan
2025-01-11 12:13 ` Hillf Danton
2025-01-11 17:11 ` Suren Baghdasaryan
2025-01-11 23:44 ` Hillf Danton
2025-01-12 0:31 ` Suren Baghdasaryan
2025-01-15 9:39 ` Peter Zijlstra
2025-01-16 10:52 ` Hillf Danton
2025-01-11 12:39 ` David Laight
2025-01-11 17:07 ` Matthew Wilcox
2025-01-11 18:30 ` Paul E. McKenney
2025-01-11 22:19 ` David Laight
2025-01-11 22:50 ` [PATCH v9 10/17] refcount: introduce __refcount_{add|inc}_not_zero_limited - clang 17.0.1 bug David Laight
2025-01-12 11:37 ` David Laight
2025-01-12 17:56 ` Paul E. McKenney
2025-01-11 4:25 ` [PATCH v9 11/17] mm: replace vm_lock and detached flag with a reference count Suren Baghdasaryan
2025-01-11 11:24 ` Mateusz Guzik
2025-01-11 20:14 ` Suren Baghdasaryan
2025-01-11 20:16 ` Suren Baghdasaryan
2025-01-11 20:31 ` Mateusz Guzik
2025-01-11 20:58 ` Suren Baghdasaryan
2025-01-11 20:38 ` Vlastimil Babka
2025-01-13 1:47 ` Wei Yang
2025-01-13 2:25 ` Wei Yang
2025-01-13 21:14 ` Suren Baghdasaryan
2025-01-13 21:08 ` Suren Baghdasaryan
2025-01-15 10:48 ` Peter Zijlstra
2025-01-15 11:13 ` Peter Zijlstra
2025-01-15 15:00 ` Suren Baghdasaryan
2025-01-15 15:35 ` Peter Zijlstra
2025-01-15 15:38 ` Peter Zijlstra
2025-01-15 16:22 ` Suren Baghdasaryan
2025-01-15 16:00 ` [PATCH] refcount: Strengthen inc_not_zero() Peter Zijlstra
2025-01-16 15:12 ` Suren Baghdasaryan
2025-01-17 15:41 ` Will Deacon
2025-01-27 14:09 ` Will Deacon
2025-01-27 19:21 ` Suren Baghdasaryan
2025-01-28 23:51 ` Suren Baghdasaryan
2025-02-06 2:52 ` [PATCH 1/1] refcount: provide ops for cases when object's memory can be reused Suren Baghdasaryan
2025-02-06 10:41 ` Vlastimil Babka
2025-02-06 3:03 ` [PATCH] refcount: Strengthen inc_not_zero() Suren Baghdasaryan
2025-02-13 23:04 ` Suren Baghdasaryan
2025-01-17 16:13 ` Matthew Wilcox
2025-01-12 2:59 ` [PATCH v9 11/17] mm: replace vm_lock and detached flag with a reference count Wei Yang
2025-01-12 17:35 ` Suren Baghdasaryan
2025-01-13 0:59 ` Wei Yang
2025-01-13 2:37 ` Wei Yang
2025-01-13 21:16 ` Suren Baghdasaryan
2025-01-13 9:36 ` Wei Yang
2025-01-13 21:18 ` Suren Baghdasaryan
2025-01-15 2:58 ` Wei Yang
2025-01-15 3:12 ` Suren Baghdasaryan
2025-01-15 12:05 ` Wei Yang
2025-01-15 15:01 ` Suren Baghdasaryan
2025-01-16 1:37 ` Wei Yang
2025-01-16 1:41 ` Suren Baghdasaryan
2025-01-16 9:10 ` Wei Yang
2025-01-11 4:25 ` [PATCH v9 12/17] mm: move lesser used vma_area_struct members into the last cacheline Suren Baghdasaryan
2025-01-13 16:15 ` Lorenzo Stoakes
2025-01-15 10:50 ` Peter Zijlstra
2025-01-15 16:39 ` Suren Baghdasaryan
2025-02-13 22:59 ` Suren Baghdasaryan
2025-01-11 4:26 ` [PATCH v9 13/17] mm/debug: print vm_refcnt state when dumping the vma Suren Baghdasaryan
2025-01-13 16:21 ` Lorenzo Stoakes
2025-01-13 16:35 ` Liam R. Howlett
2025-01-13 17:57 ` Suren Baghdasaryan
2025-01-14 11:41 ` Lorenzo Stoakes
2025-01-11 4:26 ` [PATCH v9 14/17] mm: remove extra vma_numab_state_init() call Suren Baghdasaryan
2025-01-13 16:28 ` Lorenzo Stoakes
2025-01-13 17:56 ` Suren Baghdasaryan
2025-01-14 11:45 ` Lorenzo Stoakes
2025-01-11 4:26 ` [PATCH v9 15/17] mm: prepare lock_vma_under_rcu() for vma reuse possibility Suren Baghdasaryan
2025-01-11 4:26 ` [PATCH v9 16/17] mm: make vma cache SLAB_TYPESAFE_BY_RCU Suren Baghdasaryan
2025-01-15 2:27 ` Wei Yang
2025-01-15 3:15 ` Suren Baghdasaryan
2025-01-15 3:58 ` Liam R. Howlett
2025-01-15 5:41 ` Suren Baghdasaryan
2025-01-15 3:59 ` Mateusz Guzik
2025-01-15 5:47 ` Suren Baghdasaryan
2025-01-15 5:51 ` Mateusz Guzik
2025-01-15 6:41 ` Suren Baghdasaryan
2025-01-15 7:58 ` Vlastimil Babka
2025-01-15 15:10 ` Suren Baghdasaryan
2025-02-13 22:56 ` Suren Baghdasaryan
2025-01-15 12:17 ` Wei Yang
2025-01-15 21:46 ` Suren Baghdasaryan
2025-01-11 4:26 ` [PATCH v9 17/17] docs/mm: document latest changes to vm_lock Suren Baghdasaryan
2025-01-13 16:33 ` Lorenzo Stoakes
2025-01-13 17:56 ` Suren Baghdasaryan
2025-01-11 4:52 ` [PATCH v9 00/17] reimplement per-vma lock as a refcount Matthew Wilcox
2025-01-11 9:45 ` Suren Baghdasaryan
2025-01-13 12:14 ` Lorenzo Stoakes
2025-01-13 16:58 ` Suren Baghdasaryan
2025-01-13 17:11 ` Lorenzo Stoakes
2025-01-13 19:00 ` Suren Baghdasaryan
2025-01-14 11:35 ` Lorenzo Stoakes
2025-01-14 1:49 ` Andrew Morton
2025-01-14 2:53 ` Suren Baghdasaryan
2025-01-14 4:09 ` Andrew Morton
2025-01-14 9:09 ` Vlastimil Babka
2025-01-14 10:27 ` Hillf Danton
2025-01-14 9:47 ` Lorenzo Stoakes
2025-01-14 14:59 ` Liam R. Howlett
2025-01-14 15:54 ` Suren Baghdasaryan
2025-01-15 11:34 ` Lorenzo Stoakes
2025-01-15 15:14 ` Suren Baghdasaryan
2025-01-28 5:26 ` Shivank Garg
2025-01-28 5:50 ` Suren Baghdasaryan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250111042604.3230628-4-surenb@google.com \
--to=surenb@google.com \
--cc=akpm@linux-foundation.org \
--cc=brauner@kernel.org \
--cc=corbet@lwn.net \
--cc=dave@stgolabs.net \
--cc=david.laight.linux@gmail.com \
--cc=david@redhat.com \
--cc=dhowells@redhat.com \
--cc=hannes@cmpxchg.org \
--cc=hdanton@sina.com \
--cc=hughd@google.com \
--cc=jannh@google.com \
--cc=kernel-team@android.com \
--cc=klarasmodin@gmail.com \
--cc=liam.howlett@oracle.com \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lokeshgidra@google.com \
--cc=lorenzo.stoakes@oracle.com \
--cc=mgorman@techsingularity.net \
--cc=mhocko@suse.com \
--cc=minchan@google.com \
--cc=mjguzik@gmail.com \
--cc=oleg@redhat.com \
--cc=oliver.sang@intel.com \
--cc=pasha.tatashin@soleen.com \
--cc=paulmck@kernel.org \
--cc=peterx@redhat.com \
--cc=peterz@infradead.org \
--cc=richard.weiyang@gmail.com \
--cc=shakeel.butt@linux.dev \
--cc=souravpanda@google.com \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox