From: Liam Howlett <liam.howlett@oracle.com>
To: "maple-tree@lists.infradead.org" <maple-tree@lists.infradead.org>,
"linux-mm@kvack.org" <linux-mm@kvack.org>,
"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
Andrew Morton <akpm@linux-foundation.org>
Cc: Liam Howlett <liam.howlett@oracle.com>,
Liam Howlett <liam.howlett@oracle.com>
Subject: [PATCH v2 39/44] mm: Don't use __vma_adjust() in __split_vma()
Date: Thu, 5 Jan 2023 19:16:04 +0000 [thread overview]
Message-ID: <20230105191517.3099082-40-Liam.Howlett@oracle.com> (raw)
In-Reply-To: <20230105191517.3099082-1-Liam.Howlett@oracle.com>
From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
Use the abstracted locking and maple tree operations. Since
__split_vma() is the only user of the __vma_adjust() function to use the
insert argument, drop that argument. Remove the NULL passed through
from fs/exec's shift_arg_pages() at the same time.
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
fs/exec.c | 4 +-
include/linux/mm.h | 7 ++-
mm/mmap.c | 114 ++++++++++++++++++++-------------------------
3 files changed, 56 insertions(+), 69 deletions(-)
diff --git a/fs/exec.c b/fs/exec.c
index 76ee62e1d3f1..d52fca2dd30b 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -699,7 +699,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
/*
* cover the whole range: [new_start, old_end)
*/
- if (vma_adjust(&vmi, vma, new_start, old_end, vma->vm_pgoff, NULL))
+ if (vma_adjust(&vmi, vma, new_start, old_end, vma->vm_pgoff))
return -ENOMEM;
/*
@@ -733,7 +733,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
vma_prev(&vmi);
/* Shrink the vma to just the new range */
- return vma_adjust(&vmi, vma, new_start, new_end, vma->vm_pgoff, NULL);
+ return vma_adjust(&vmi, vma, new_start, new_end, vma->vm_pgoff);
}
/*
diff --git a/include/linux/mm.h b/include/linux/mm.h
index aabfd4183091..a00871cc63cc 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2823,13 +2823,12 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
/* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
extern int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
- struct vm_area_struct *expand);
+ unsigned long end, pgoff_t pgoff, struct vm_area_struct *expand);
static inline int vma_adjust(struct vma_iterator *vmi,
struct vm_area_struct *vma, unsigned long start, unsigned long end,
- pgoff_t pgoff, struct vm_area_struct *insert)
+ pgoff_t pgoff)
{
- return __vma_adjust(vmi, vma, start, end, pgoff, insert, NULL);
+ return __vma_adjust(vmi, vma, start, end, pgoff, NULL);
}
extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
diff --git a/mm/mmap.c b/mm/mmap.c
index 431c5ee9ce00..3bca62c11686 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -754,7 +754,7 @@ inline int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
*/
int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff,
- struct vm_area_struct *insert, struct vm_area_struct *expand)
+ struct vm_area_struct *expand)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *remove2 = NULL;
@@ -767,7 +767,7 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct vm_area_struct *exporter = NULL, *importer = NULL;
struct vma_prepare vma_prep;
- if (next && !insert) {
+ if (next) {
if (end >= next->vm_end) {
/*
* vma expands, overlapping all the next, and
@@ -858,39 +858,25 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
VM_WARN_ON(vma_prep.anon_vma && adjust_next && next->anon_vma &&
vma_prep.anon_vma != next->anon_vma);
- vma_prep.insert = insert;
vma_prepare(&vma_prep);
- if (start != vma->vm_start) {
- if (vma->vm_start < start) {
- if (!insert || (insert->vm_end != start)) {
- vma_iter_clear(vmi, vma->vm_start, start);
- vma_iter_set(vmi, start);
- VM_WARN_ON(insert && insert->vm_start > vma->vm_start);
- }
- } else {
- vma_changed = true;
- }
- vma->vm_start = start;
- }
- if (end != vma->vm_end) {
- if (vma->vm_end > end) {
- if (!insert || (insert->vm_start != end)) {
- vma_iter_clear(vmi, end, vma->vm_end);
- vma_iter_set(vmi, vma->vm_end);
- VM_WARN_ON(insert &&
- insert->vm_end < vma->vm_end);
- }
- } else {
- vma_changed = true;
- }
- vma->vm_end = end;
- }
+ if (vma->vm_start < start)
+ vma_iter_clear(vmi, vma->vm_start, start);
+ else if (start != vma->vm_start)
+ vma_changed = true;
+
+ if (vma->vm_end > end)
+ vma_iter_clear(vmi, end, vma->vm_end);
+ else if (end != vma->vm_end)
+ vma_changed = true;
+
+ vma->vm_start = start;
+ vma->vm_end = end;
+ vma->vm_pgoff = pgoff;
if (vma_changed)
vma_iter_store(vmi, vma);
- vma->vm_pgoff = pgoff;
if (adjust_next) {
next->vm_start += adjust_next;
next->vm_pgoff += adjust_next >> PAGE_SHIFT;
@@ -909,9 +895,9 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
* per-vma resources, so we don't attempt to merge those.
*/
static inline int is_mergeable_vma(struct vm_area_struct *vma,
- struct file *file, unsigned long vm_flags,
- struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
- struct anon_vma_name *anon_name)
+ struct file *file, unsigned long vm_flags,
+ struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+ struct anon_vma_name *anon_name)
{
/*
* VM_SOFTDIRTY should not prevent from VMA merging, if we
@@ -1093,20 +1079,19 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
is_mergeable_anon_vma(prev->anon_vma,
next->anon_vma, NULL)) { /* cases 1, 6 */
err = __vma_adjust(vmi, prev, prev->vm_start,
- next->vm_end, prev->vm_pgoff, NULL,
- prev);
+ next->vm_end, prev->vm_pgoff, prev);
res = prev;
} else if (merge_prev) { /* cases 2, 5, 7 */
err = __vma_adjust(vmi, prev, prev->vm_start,
- end, prev->vm_pgoff, NULL, prev);
+ end, prev->vm_pgoff, prev);
res = prev;
} else if (merge_next) {
if (prev && addr < prev->vm_end) /* case 4 */
err = __vma_adjust(vmi, prev, prev->vm_start,
- addr, prev->vm_pgoff, NULL, next);
+ addr, prev->vm_pgoff, next);
else /* cases 3, 8 */
err = __vma_adjust(vmi, mid, addr, next->vm_end,
- next->vm_pgoff - pglen, NULL, next);
+ next->vm_pgoff - pglen, next);
res = next;
}
@@ -2246,6 +2231,7 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
+ struct vma_prepare vp;
struct vm_area_struct *new;
int err;
@@ -2261,16 +2247,20 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
if (!new)
return -ENOMEM;
- if (new_below)
+ err = -ENOMEM;
+ if (vma_iter_prealloc(vmi, vma))
+ goto out_free_vma;
+
+ if (new_below) {
new->vm_end = addr;
- else {
+ } else {
new->vm_start = addr;
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
err = vma_dup_policy(vma, new);
if (err)
- goto out_free_vma;
+ goto out_free_vmi;
err = anon_vma_clone(new, vma);
if (err)
@@ -2282,33 +2272,31 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
- if (new_below)
- err = vma_adjust(vmi, vma, addr, vma->vm_end,
- vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT),
- new);
- else
- err = vma_adjust(vmi, vma, vma->vm_start, addr, vma->vm_pgoff,
- new);
+ vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
+ init_vma_prep(&vp, vma);
+ vp.insert = new;
+ vma_prepare(&vp);
- /* Success. */
- if (!err) {
- if (new_below)
- vma_next(vmi);
- return 0;
+ if (new_below) {
+ vma->vm_start = addr;
+ vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
+ } else {
+ vma->vm_end = addr;
}
- /* Avoid vm accounting in close() operation */
- new->vm_start = new->vm_end;
- new->vm_pgoff = 0;
- /* Clean everything up if vma_adjust failed. */
- if (new->vm_ops && new->vm_ops->close)
- new->vm_ops->close(new);
- if (new->vm_file)
- fput(new->vm_file);
- unlink_anon_vmas(new);
- out_free_mpol:
+ /* vma_complete stores the new vma */
+ vma_complete(&vp, vmi, vma->vm_mm);
+
+ /* Success. */
+ if (new_below)
+ vma_next(vmi);
+ return 0;
+
+out_free_mpol:
mpol_put(vma_policy(new));
- out_free_vma:
+out_free_vmi:
+ vma_iter_free(vmi);
+out_free_vma:
vm_area_free(new);
validate_mm_mt(vma->vm_mm);
return err;
--
2.35.1
next prev parent reply other threads:[~2023-01-05 20:19 UTC|newest]
Thread overview: 63+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-01-05 19:15 [PATCH v2 00/44] VMA tree type safety and remove __vma_adjust() Liam Howlett
2023-01-05 19:15 ` [PATCH v2 02/44] maple_tree: Fix potential rcu issue Liam Howlett
2023-01-05 19:15 ` [PATCH v2 01/44] maple_tree: Add mas_init() function Liam Howlett
2023-01-05 19:15 ` [PATCH v2 04/44] test_maple_tree: Test modifications while iterating Liam Howlett
2023-01-05 19:15 ` [PATCH v2 05/44] mm: Expand vma iterator interface Liam Howlett
2023-01-05 19:15 ` [PATCH v2 03/44] maple_tree: Reduce user error potential Liam Howlett
2023-01-05 19:15 ` [PATCH v2 08/44] mmap: Convert vma_link() vma iterator Liam Howlett
2023-01-05 19:15 ` [PATCH v2 07/44] kernel/fork: Convert forking to using the vmi iterator Liam Howlett
2023-01-05 19:15 ` [PATCH v2 06/44] mm/mmap: convert brk to use vma iterator Liam Howlett
2023-01-09 15:10 ` Vernon Yang
2023-01-09 16:38 ` Liam Howlett
2023-01-05 19:15 ` [PATCH v2 10/44] mmap: Change do_mas_munmap and do_mas_aligned_munmap() " Liam Howlett
2023-01-10 14:53 ` Sven Schnelle
2023-01-10 17:26 ` Liam Howlett
2023-01-11 6:55 ` Sven Schnelle
2023-01-05 19:15 ` [PATCH v2 11/44] mmap: Convert vma_expand() " Liam Howlett
2023-01-05 19:15 ` [PATCH v2 09/44] mm/mmap: Remove preallocation from do_mas_align_munmap() Liam Howlett
2023-01-05 19:15 ` [PATCH v2 14/44] userfaultfd: Use vma iterator Liam Howlett
2023-01-05 19:15 ` [PATCH v2 13/44] ipc/shm: Use the vma iterator for munmap calls Liam Howlett
2023-01-05 19:15 ` [PATCH v2 15/44] mm: Change mprotect_fixup to vma iterator Liam Howlett
2023-01-05 19:15 ` [PATCH v2 12/44] mm: Add temporary vma iterator versions of vma_merge(), split_vma(), and __split_vma() Liam Howlett
2023-01-05 19:15 ` [PATCH v2 18/44] mempolicy: Convert to vma iterator Liam Howlett
2023-01-05 19:15 ` [PATCH v2 17/44] coredump: " Liam Howlett
2023-01-05 19:15 ` [PATCH v2 16/44] mlock: Convert mlock " Liam Howlett
2023-01-05 19:15 ` [PATCH v2 20/44] sched: Convert " Liam Howlett
2023-01-05 19:15 ` [PATCH v2 22/44] mmap: Pass through vmi iterator to __split_vma() Liam Howlett
2023-01-07 2:01 ` SeongJae Park
2023-01-07 2:39 ` SeongJae Park
2023-01-09 16:45 ` Liam Howlett
2023-01-09 19:28 ` SeongJae Park
2023-01-09 20:30 ` Liam Howlett
2023-01-09 23:07 ` SeongJae Park
2023-01-05 19:15 ` [PATCH v2 21/44] madvise: Use vmi iterator for __split_vma() and vma_merge() Liam Howlett
2023-01-05 19:15 ` [PATCH v2 19/44] task_mmu: Convert to vma iterator Liam Howlett
2023-01-05 19:15 ` [PATCH v2 23/44] mmap: Use vmi version of vma_merge() Liam Howlett
2023-01-05 19:15 ` [PATCH v2 25/44] mm: Switch vma_merge(), split_vma(), and __split_vma to vma iterator Liam Howlett
2023-01-06 17:23 ` SeongJae Park
2023-01-06 19:20 ` Liam Howlett
2023-01-05 19:15 ` [PATCH v2 24/44] mm/mremap: Use vmi version of vma_merge() Liam Howlett
2023-01-05 19:16 ` [PATCH v2 26/44] mm/damon: Stop using vma_mas_store() for maple tree store Liam Howlett
2023-01-05 19:32 ` SeongJae Park
2023-01-05 19:52 ` Liam Howlett
2023-01-05 20:16 ` SeongJae Park
2023-01-05 19:16 ` [PATCH v2 27/44] mmap: Convert __vma_adjust() to use vma iterator Liam Howlett
2023-01-05 19:16 ` [PATCH v2 28/44] mm: Pass through vma iterator to __vma_adjust() Liam Howlett
2023-01-05 19:16 ` [PATCH v2 30/44] mm: Remove unnecessary write to vma iterator in __vma_adjust() Liam Howlett
2023-01-05 19:16 ` [PATCH v2 29/44] madvise: Use split_vma() instead of __split_vma() Liam Howlett
2023-01-05 19:16 ` [PATCH v2 32/44] mm: Add vma iterator to vma_adjust() arguments Liam Howlett
2023-01-05 19:16 ` [PATCH v2 33/44] mmap: Clean up mmap_region() unrolling Liam Howlett
2023-01-05 19:16 ` [PATCH v2 31/44] mm: Pass vma iterator through to __vma_adjust() Liam Howlett
2023-01-05 19:16 ` [PATCH v2 34/44] mm: Change munmap splitting order and move_vma() Liam Howlett
2023-01-05 19:16 ` [PATCH v2 37/44] mm/mmap: Use vma_prepare() and vma_complete() in vma_expand() Liam Howlett
2023-01-05 19:16 ` [PATCH v2 35/44] mm/mmap: move anon_vma setting in __vma_adjust() Liam Howlett
2023-01-05 19:16 ` [PATCH v2 36/44] mm/mmap: Refactor locking out of __vma_adjust() Liam Howlett
2023-01-05 19:16 ` [PATCH v2 38/44] mm/mmap: Introduce init_vma_prep() and init_multi_vma_prep() Liam Howlett
2023-01-05 19:16 ` [PATCH v2 40/44] mm/mmap: Don't use __vma_adjust() in shift_arg_pages() Liam Howlett
2023-01-05 19:16 ` Liam Howlett [this message]
2023-01-05 19:16 ` [PATCH v2 44/44] vma_merge: Set vma iterator to correct position Liam Howlett
2023-01-05 19:16 ` [PATCH v2 42/44] mm/mmap: Convert do_brk_flags() to use vma_prepare() and vma_complete() Liam Howlett
2023-01-05 19:16 ` [PATCH v2 43/44] mm/mmap: Remove __vma_adjust() Liam Howlett
2023-01-05 19:16 ` [PATCH v2 41/44] mm/mmap: Introduce dup_vma_anon() helper Liam Howlett
2023-01-10 22:51 ` [PATCH v2 00/44] VMA tree type safety and remove __vma_adjust() Mark Brown
2023-01-11 2:22 ` Liam Howlett
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230105191517.3099082-40-Liam.Howlett@oracle.com \
--to=liam.howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=maple-tree@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox