From: Usama Arif <usama.arif@linux.dev>
To: Andrew Morton <akpm@linux-foundation.org>,
david@kernel.org, lorenzo.stoakes@oracle.com,
willy@infradead.org, linux-mm@kvack.org
Cc: fvdl@google.com, hannes@cmpxchg.org, riel@surriel.com,
shakeel.butt@linux.dev, kas@kernel.org, baohua@kernel.org,
dev.jain@arm.com, baolin.wang@linux.alibaba.com,
npache@redhat.com, Liam.Howlett@oracle.com, ryan.roberts@arm.com,
Vlastimil Babka <vbabka@kernel.org>,
lance.yang@linux.dev, linux-kernel@vger.kernel.org,
kernel-team@meta.com, maddy@linux.ibm.com, mpe@ellerman.id.au,
linuxppc-dev@lists.ozlabs.org, hca@linux.ibm.com,
gor@linux.ibm.com, agordeev@linux.ibm.com,
borntraeger@linux.ibm.com, svens@linux.ibm.com,
linux-s390@vger.kernel.org, Usama Arif <usama.arif@linux.dev>
Subject: [RFC v2 02/21] mm: thp: propagate split failure from vma_adjust_trans_huge()
Date: Thu, 26 Feb 2026 03:23:31 -0800 [thread overview]
Message-ID: <20260226113233.3987674-3-usama.arif@linux.dev> (raw)
In-Reply-To: <20260226113233.3987674-1-usama.arif@linux.dev>
With lazy PTE page table allocation, split_huge_pmd_if_needed() and
thus vma_adjust_trans_huge() can now fail if order-0 allocation
for pagetable fails when trying to split. It is important to check
if this failure occurred to prevent a huge PMD straddling at VMA
boundary.
The vma_adjust_trans_huge() call is moved before vma_prepare() in all
three callers (__split_vma, vma_shrink, commit_merge). Previously it sat
between vma_prepare() and vma_complete(), where there is no mechanism to
abort - once vma_prepare() has been called, we must reach vma_complete().
By moving the call earlier, a split failure can return -ENOMEM cleanly
without needing to undo VMA preparation.
This move is safe because vma_adjust_trans_huge() acquires its own
pmd_lock() internally and does not depend on any locks or state changes
from vma_prepare(). The VMA boundaries are also unchanged at the new
call site, satisfying __split_huge_pmd_locked()'s requirement that the
VMA covers the full PMD range.
All 3 callers (__split_vma, vma_shrink, commit_merge) already return
-ENOMEM if there are allocation failures for other reasons (failure in
vma_iter_prealloc for example), this follows the same pattern.
Signed-off-by: Usama Arif <usama.arif@linux.dev>
---
include/linux/huge_mm.h | 13 ++++++-----
mm/huge_memory.c | 21 +++++++++++++-----
mm/vma.c | 37 +++++++++++++++++++++----------
tools/testing/vma/include/stubs.h | 9 ++++----
4 files changed, 53 insertions(+), 27 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index e4cbf5afdbe7e..207bf7cd95c78 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -484,8 +484,8 @@ int hugepage_madvise(struct vm_area_struct *vma, vm_flags_t *vm_flags,
int advice);
int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
unsigned long end, bool *lock_dropped);
-void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct vm_area_struct *next);
+int vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, struct vm_area_struct *next);
spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
@@ -687,11 +687,12 @@ static inline int madvise_collapse(struct vm_area_struct *vma,
return -EINVAL;
}
-static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- struct vm_area_struct *next)
+static inline int vma_adjust_trans_huge(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ struct vm_area_struct *next)
{
+ return 0;
}
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 125ff36f475de..a979aa5bd2995 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3316,20 +3316,31 @@ static inline int split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned
return 0;
}
-void vma_adjust_trans_huge(struct vm_area_struct *vma,
+int vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
struct vm_area_struct *next)
{
+ int err;
+
/* Check if we need to split start first. */
- split_huge_pmd_if_needed(vma, start);
+ err = split_huge_pmd_if_needed(vma, start);
+ if (err)
+ return err;
/* Check if we need to split end next. */
- split_huge_pmd_if_needed(vma, end);
+ err = split_huge_pmd_if_needed(vma, end);
+ if (err)
+ return err;
/* If we're incrementing next->vm_start, we might need to split it. */
- if (next)
- split_huge_pmd_if_needed(next, end);
+ if (next) {
+ err = split_huge_pmd_if_needed(next, end);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static void unmap_folio(struct folio *folio)
diff --git a/mm/vma.c b/mm/vma.c
index be64f781a3aa7..f50b1f291ab7c 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -510,6 +510,15 @@ __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
return err;
}
+ /*
+ * Split any THP straddling the split boundary before splitting
+ * the VMA itself. Do this before vma_prepare() so we can
+ * cleanly fail without undoing VMA preparation.
+ */
+ err = vma_adjust_trans_huge(vma, vma->vm_start, addr, NULL);
+ if (err)
+ return err;
+
new = vm_area_dup(vma);
if (!new)
return -ENOMEM;
@@ -547,11 +556,6 @@ __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
vp.insert = new;
vma_prepare(&vp);
- /*
- * Get rid of huge pages and shared page tables straddling the split
- * boundary.
- */
- vma_adjust_trans_huge(vma, vma->vm_start, addr, NULL);
if (is_vm_hugetlb_page(vma))
hugetlb_split(vma, addr);
@@ -729,6 +733,7 @@ static int commit_merge(struct vma_merge_struct *vmg)
{
struct vm_area_struct *vma;
struct vma_prepare vp;
+ int err;
if (vmg->__adjust_next_start) {
/* We manipulate middle and adjust next, which is the target. */
@@ -740,6 +745,16 @@ static int commit_merge(struct vma_merge_struct *vmg)
vma_iter_config(vmg->vmi, vmg->start, vmg->end);
}
+ /*
+ * THP pages may need to do additional splits if we increase
+ * middle->vm_start. Do this before vma_prepare() so we can
+ * cleanly fail without undoing VMA preparation.
+ */
+ err = vma_adjust_trans_huge(vma, vmg->start, vmg->end,
+ vmg->__adjust_middle_start ? vmg->middle : NULL);
+ if (err)
+ return err;
+
init_multi_vma_prep(&vp, vma, vmg);
/*
@@ -752,12 +767,6 @@ static int commit_merge(struct vma_merge_struct *vmg)
return -ENOMEM;
vma_prepare(&vp);
- /*
- * THP pages may need to do additional splits if we increase
- * middle->vm_start.
- */
- vma_adjust_trans_huge(vma, vmg->start, vmg->end,
- vmg->__adjust_middle_start ? vmg->middle : NULL);
vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff);
vmg_adjust_set_range(vmg);
vma_iter_store_overwrite(vmg->vmi, vmg->target);
@@ -1229,9 +1238,14 @@ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff)
{
struct vma_prepare vp;
+ int err;
WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
+ err = vma_adjust_trans_huge(vma, start, end, NULL);
+ if (err)
+ return err;
+
if (vma->vm_start < start)
vma_iter_config(vmi, vma->vm_start, start);
else
@@ -1244,7 +1258,6 @@ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
init_vma_prep(&vp, vma);
vma_prepare(&vp);
- vma_adjust_trans_huge(vma, start, end, NULL);
vma_iter_clear(vmi);
vma_set_range(vma, start, end, pgoff);
diff --git a/tools/testing/vma/include/stubs.h b/tools/testing/vma/include/stubs.h
index 947a3a0c25665..171986f9c9fcd 100644
--- a/tools/testing/vma/include/stubs.h
+++ b/tools/testing/vma/include/stubs.h
@@ -418,11 +418,12 @@ static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_stru
return 0;
}
-static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- struct vm_area_struct *next)
+static inline int vma_adjust_trans_huge(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ struct vm_area_struct *next)
{
+ return 0;
}
static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
--
2.47.3
next prev parent reply other threads:[~2026-02-26 11:33 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-26 11:23 [RFC v2 00/21] mm: thp: lazy PTE page table allocation at PMD split Usama Arif
2026-02-26 11:23 ` [RFC v2 01/21] mm: thp: make split_huge_pmd functions return int for error propagation Usama Arif
2026-02-26 11:23 ` Usama Arif [this message]
2026-02-26 11:23 ` [RFC v2 03/21] mm: thp: handle split failure in copy_huge_pmd() Usama Arif
2026-02-26 11:23 ` [RFC v2 04/21] mm: thp: handle split failure in do_huge_pmd_wp_page() Usama Arif
2026-02-26 11:23 ` [RFC v2 05/21] mm: thp: handle split failure in zap_pmd_range() Usama Arif
2026-02-26 11:23 ` [RFC v2 06/21] mm: thp: handle split failure in wp_huge_pmd() Usama Arif
2026-02-26 11:23 ` [RFC v2 07/21] mm: thp: retry on split failure in change_pmd_range() Usama Arif
2026-02-26 11:23 ` [RFC v2 08/21] mm: thp: handle split failure in follow_pmd_mask() Usama Arif
2026-02-26 11:23 ` [RFC v2 09/21] mm: handle walk_page_range() failure from THP split Usama Arif
2026-02-26 11:23 ` [RFC v2 10/21] mm: thp: handle split failure in mremap move_page_tables() Usama Arif
2026-02-26 11:23 ` [RFC v2 11/21] mm: thp: handle split failure in userfaultfd move_pages() Usama Arif
2026-02-26 11:23 ` [RFC v2 12/21] mm: thp: handle split failure in device migration Usama Arif
2026-02-26 11:23 ` [RFC v2 13/21] mm: huge_mm: Make sure all split_huge_pmd calls are checked Usama Arif
2026-02-26 11:23 ` [RFC v2 14/21] mm: thp: allocate PTE page tables lazily at split time Usama Arif
2026-02-26 11:23 ` [RFC v2 15/21] mm: thp: remove pgtable_trans_huge_{deposit/withdraw} when not needed Usama Arif
2026-02-26 11:23 ` [RFC v2 16/21] mm: thp: add THP_SPLIT_PMD_FAILED counter Usama Arif
2026-02-26 14:22 ` Usama Arif
2026-02-26 11:23 ` [RFC v2 17/21] selftests/mm: add THP PMD split test infrastructure Usama Arif
2026-02-26 11:23 ` [RFC v2 18/21] selftests/mm: add partial_mprotect test for change_pmd_range Usama Arif
2026-02-26 11:23 ` [RFC v2 19/21] selftests/mm: add partial_mlock test Usama Arif
2026-02-26 11:23 ` [RFC v2 20/21] selftests/mm: add partial_mremap test for move_page_tables Usama Arif
2026-02-26 11:23 ` [RFC v2 21/21] selftests/mm: add madv_dontneed_partial test Usama Arif
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260226113233.3987674-3-usama.arif@linux.dev \
--to=usama.arif@linux.dev \
--cc=Liam.Howlett@oracle.com \
--cc=agordeev@linux.ibm.com \
--cc=akpm@linux-foundation.org \
--cc=baohua@kernel.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=borntraeger@linux.ibm.com \
--cc=david@kernel.org \
--cc=dev.jain@arm.com \
--cc=fvdl@google.com \
--cc=gor@linux.ibm.com \
--cc=hannes@cmpxchg.org \
--cc=hca@linux.ibm.com \
--cc=kas@kernel.org \
--cc=kernel-team@meta.com \
--cc=lance.yang@linux.dev \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-s390@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=maddy@linux.ibm.com \
--cc=mpe@ellerman.id.au \
--cc=npache@redhat.com \
--cc=riel@surriel.com \
--cc=ryan.roberts@arm.com \
--cc=shakeel.butt@linux.dev \
--cc=svens@linux.ibm.com \
--cc=vbabka@kernel.org \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox