From: "Liam R. Howlett" <Liam.Howlett@oracle.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Suren Baghdasaryan <surenb@google.com>,
Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,
Pedro Falcato <pfalcato@suse.de>,
David Hildenbrand <david@redhat.com>,
Vlastimil Babka <vbabka@suse.cz>, Michal Hocko <mhocko@suse.com>,
Jann Horn <jannh@google.com>,
shikemeng@huaweicloud.com, kasong@tencent.com, nphamcs@gmail.com,
bhe@redhat.com, baohua@kernel.org, chrisl@kernel.org,
Matthew Wilcox <willy@infradead.org>,
"Liam R. Howlett" <Liam.Howlett@oracle.com>
Subject: [PATCH v2 10/10] mm: Use unmap_desc struct for freeing page tables.
Date: Thu, 15 Jan 2026 13:27:20 -0500 [thread overview]
Message-ID: <20260115182720.1691130-11-Liam.Howlett@oracle.com> (raw)
In-Reply-To: <20260115182720.1691130-1-Liam.Howlett@oracle.com>
Pass through the unmap_desc to free_pgtables() because it almost has
everything necessary and is already on the stack.
Updates testing code as necessary.
No functional changes intended.
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
mm/internal.h | 5 +----
mm/memory.c | 37 ++++++++++++++------------------
mm/mmap.c | 6 +++---
mm/vma.c | 6 ++----
tools/testing/vma/vma_internal.h | 7 +++---
5 files changed, 25 insertions(+), 36 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index 25a17eea550b8..1cad630f0dcef 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -512,10 +512,7 @@ bool __folio_end_writeback(struct folio *folio);
void deactivate_file_folio(struct folio *folio);
void folio_activate(struct folio *folio);
-void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
- struct vm_area_struct *vma, unsigned long pg_start,
- unsigned long pg_end, unsigned long vma_end,
- bool mm_wr_locked);
+void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc);
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
diff --git a/mm/memory.c b/mm/memory.c
index 6fd6decc139e9..16b25eff19251 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -373,12 +373,7 @@ void free_pgd_range(struct mmu_gather *tlb,
/**
* free_pgtables() - Free a range of page tables
* @tlb: The mmu gather
- * @mas: The maple state
- * @vma: The first vma
- * @pg_start: The lowest page table address (floor)
- * @pg_end: The highest page table address (ceiling)
- * @vma_end: The highest vma tree search address
- * @mm_wr_locked: boolean indicating if the mm is write locked
+ * @unmap: The unmap_desc
*
* Note: pg_start and pg_end are provided to indicate the absolute range of the
* page tables that should be removed. This can differ from the vma mappings on
@@ -388,21 +383,21 @@ void free_pgd_range(struct mmu_gather *tlb,
* The vma_end differs from the pg_end when a dup_mmap() failed and the tree has
* unrelated data to the mm_struct being torn down.
*/
-void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
- struct vm_area_struct *vma, unsigned long pg_start,
- unsigned long pg_end, unsigned long vma_end,
- bool mm_wr_locked)
+void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *unmap)
{
struct unlink_vma_file_batch vb;
+ struct ma_state *mas = unmap->mas;
+ struct vm_area_struct *vma = unmap->first;
/*
* Note: USER_PGTABLES_CEILING may be passed as the value of pg_end and
- * may be 0. Underflow is expected in this case. Otherwise the
- * pagetable end is exclusive.
- * vma_end is exclusive.
- * The last vma address should never be larger than the pagetable end.
+ * may be 0. The underflow here is fine and expected.
+ * The vma_end is exclusive, which is fine until we use the mas_ instead
+ * of the vma iterators.
+ * For freeing the page tables to make sense, the vma_end must be larger
+ * than the pg_end, so check that after the potential underflow.
*/
- WARN_ON_ONCE(vma_end - 1 > pg_end - 1);
+ WARN_ON_ONCE(unmap->vma_end - 1 > unmap->pg_end - 1);
tlb_free_vmas(tlb);
@@ -410,13 +405,13 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
unsigned long addr = vma->vm_start;
struct vm_area_struct *next;
- next = mas_find(mas, vma_end - 1);
+ next = mas_find(mas, unmap->tree_end - 1);
/*
* Hide vma from rmap and truncate_pagecache before freeing
* pgtables
*/
- if (mm_wr_locked)
+ if (unmap->mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
@@ -428,16 +423,16 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
*/
while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
vma = next;
- next = mas_find(mas, vma_end - 1);
- if (mm_wr_locked)
+ next = mas_find(mas, unmap->tree_end - 1);
+ if (unmap->mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
unlink_file_vma_batch_add(&vb, vma);
}
unlink_file_vma_batch_final(&vb);
- free_pgd_range(tlb, addr, vma->vm_end,
- pg_start, next ? next->vm_start : pg_end);
+ free_pgd_range(tlb, addr, vma->vm_end, unmap->pg_start,
+ next ? next->vm_start : unmap->pg_end);
vma = next;
} while (vma);
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 042b6b4b6ab86..8771b276d63db 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1307,10 +1307,10 @@ void exit_mmap(struct mm_struct *mm)
*/
mm_flags_set(MMF_OOM_SKIP, mm);
mmap_write_lock(mm);
+ unmap.mm_wr_locked = true;
mt_clear_in_rcu(&mm->mm_mt);
- vma_iter_set(&vmi, vma->vm_end);
- free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
- USER_PGTABLES_CEILING, USER_PGTABLES_CEILING, true);
+ vma_iter_set(&vmi, unmap.tree_reset);
+ free_pgtables(&tlb, &unmap);
tlb_finish_mmu(&tlb);
/*
diff --git a/mm/vma.c b/mm/vma.c
index 876d2db5329dd..f352d5c722126 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -475,15 +475,13 @@ void remove_vma(struct vm_area_struct *vma)
void unmap_region(struct unmap_desc *unmap)
{
struct mm_struct *mm = unmap->first->vm_mm;
- struct ma_state *mas = unmap->mas;
struct mmu_gather tlb;
tlb_gather_mmu(&tlb, mm);
update_hiwater_rss(mm);
unmap_vmas(&tlb, unmap);
- mas_set(mas, unmap->tree_reset);
- free_pgtables(&tlb, mas, unmap->first, unmap->pg_start, unmap->pg_end,
- unmap->tree_end, unmap->mm_wr_locked);
+ mas_set(unmap->mas, unmap->tree_reset);
+ free_pgtables(&tlb, unmap);
tlb_finish_mmu(&tlb);
}
diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
index 0b4918aac8d6d..ca4eb563b29ba 100644
--- a/tools/testing/vma/vma_internal.h
+++ b/tools/testing/vma/vma_internal.h
@@ -1137,11 +1137,10 @@ static inline void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap)
{
}
-static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
- struct vm_area_struct *vma, unsigned long floor,
- unsigned long ceiling, unsigned long tree_max,
- bool mm_wr_locked)
+static inline void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc)
{
+ (void)tlb;
+ (void)desc;
}
static inline void mapping_unmap_writable(struct address_space *mapping)
--
2.47.3
next prev parent reply other threads:[~2026-01-15 18:29 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-15 18:27 [PATCH v2 00/10] Remove XA_ZERO from error recovery of dup_mmap() Liam R. Howlett
2026-01-15 18:27 ` [PATCH v2 01/10] mm/mmap: Move exit_mmap() trace point Liam R. Howlett
2026-01-15 18:27 ` [PATCH v2 02/10] mm/mmap: Abstract vma clean up from exit_mmap() Liam R. Howlett
2026-01-15 18:27 ` [PATCH v2 03/10] mm/vma: Add limits to unmap_region() for vmas Liam R. Howlett
2026-01-15 18:27 ` [PATCH v2 04/10] mm/memory: Add tree limit to free_pgtables() Liam R. Howlett
2026-01-15 18:27 ` [PATCH v2 05/10] mm/vma: Add page table limit to unmap_region() Liam R. Howlett
2026-01-15 18:27 ` [PATCH v2 06/10] mm: Change dup_mmap() recovery Liam R. Howlett
2026-01-15 18:27 ` [PATCH v2 07/10] mm: Introduce unmap_desc struct to reduce function arguments Liam R. Howlett
2026-01-15 18:27 ` [PATCH v2 08/10] mm/vma: Use unmap_desc in exit_mmap() and vms_clear_ptes() Liam R. Howlett
2026-01-16 1:30 ` kernel test robot
2026-01-16 17:55 ` Liam R. Howlett
2026-01-16 19:48 ` Liam R. Howlett
2026-01-16 2:04 ` kernel test robot
2026-01-16 8:27 ` Lorenzo Stoakes
2026-01-16 16:15 ` Liam R. Howlett
2026-01-17 1:07 ` SeongJae Park
2026-01-17 3:25 ` Andrew Morton
2026-01-15 18:27 ` [PATCH v2 09/10] mm/vma: Use unmap_region() in vms_clear_ptes() Liam R. Howlett
2026-01-15 18:27 ` Liam R. Howlett [this message]
2026-01-15 19:01 ` [PATCH v2 00/10] Remove XA_ZERO from error recovery of dup_mmap() Andrew Morton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260115182720.1691130-11-Liam.Howlett@oracle.com \
--to=liam.howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=baohua@kernel.org \
--cc=bhe@redhat.com \
--cc=chrisl@kernel.org \
--cc=david@redhat.com \
--cc=jannh@google.com \
--cc=kasong@tencent.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mhocko@suse.com \
--cc=nphamcs@gmail.com \
--cc=pfalcato@suse.de \
--cc=shikemeng@huaweicloud.com \
--cc=surenb@google.com \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox