From: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>,
Janosch Frank <frankja@linux.ibm.com>,
Claudio Imbrenda <imbrenda@linux.ibm.com>,
David Hildenbrand <david@redhat.com>,
Alexander Gordeev <agordeev@linux.ibm.com>,
Gerald Schaefer <gerald.schaefer@linux.ibm.com>,
Heiko Carstens <hca@linux.ibm.com>,
Vasily Gorbik <gor@linux.ibm.com>,
Sven Schnelle <svens@linux.ibm.com>, Peter Xu <peterx@redhat.com>,
Alexander Viro <viro@zeniv.linux.org.uk>,
Christian Brauner <brauner@kernel.org>, Jan Kara <jack@suse.cz>,
Arnd Bergmann <arnd@arndb.de>, Zi Yan <ziy@nvidia.com>,
Baolin Wang <baolin.wang@linux.alibaba.com>,
"Liam R . Howlett" <Liam.Howlett@oracle.com>,
Nico Pache <npache@redhat.com>,
Ryan Roberts <ryan.roberts@arm.com>, Dev Jain <dev.jain@arm.com>,
Barry Song <baohua@kernel.org>, Lance Yang <lance.yang@linux.dev>,
Muchun Song <muchun.song@linux.dev>,
Oscar Salvador <osalvador@suse.de>,
Vlastimil Babka <vbabka@suse.cz>, Mike Rapoport <rppt@kernel.org>,
Suren Baghdasaryan <surenb@google.com>,
Michal Hocko <mhocko@suse.com>,
Matthew Brost <matthew.brost@intel.com>,
Joshua Hahn <joshua.hahnjy@gmail.com>,
Rakie Kim <rakie.kim@sk.com>, Byungchul Park <byungchul@sk.com>,
Gregory Price <gourry@gourry.net>,
Ying Huang <ying.huang@linux.alibaba.com>,
Alistair Popple <apopple@nvidia.com>,
Axel Rasmussen <axelrasmussen@google.com>,
Yuanchu Xie <yuanchu@google.com>, Wei Xu <weixugc@google.com>,
Kemeng Shi <shikemeng@huaweicloud.com>,
Kairui Song <kasong@tencent.com>, Nhat Pham <nphamcs@gmail.com>,
Baoquan He <bhe@redhat.com>, Chris Li <chrisl@kernel.org>,
SeongJae Park <sj@kernel.org>,
Matthew Wilcox <willy@infradead.org>,
Jason Gunthorpe <jgg@ziepe.ca>, Leon Romanovsky <leon@kernel.org>,
Xu Xin <xu.xin16@zte.com.cn>,
Chengming Zhou <chengming.zhou@linux.dev>,
Jann Horn <jannh@google.com>, Miaohe Lin <linmiaohe@huawei.com>,
Naoya Horiguchi <nao.horiguchi@gmail.com>,
Pedro Falcato <pfalcato@suse.de>,
Pasha Tatashin <pasha.tatashin@soleen.com>,
Rik van Riel <riel@surriel.com>, Harry Yoo <harry.yoo@oracle.com>,
Hugh Dickins <hughd@google.com>,
linux-kernel@vger.kernel.org, kvm@vger.kernel.org,
linux-s390@vger.kernel.org, linux-fsdevel@vger.kernel.org,
linux-mm@kvack.org, linux-arch@vger.kernel.org,
damon@lists.linux.dev
Subject: [PATCH 16/16] mm: replace remaining pte_to_swp_entry() with leafent_from_pte()
Date: Mon, 3 Nov 2025 12:31:57 +0000 [thread overview]
Message-ID: <1518bb0d0e40ace2012e64590288046aef03781e.1762171281.git.lorenzo.stoakes@oracle.com> (raw)
In-Reply-To: <cover.1762171281.git.lorenzo.stoakes@oracle.com>
There are straggler invocations of pte_to_swp_entry() lying around, replace
all of these with the leaf entry equivalent - leafent_from_pte().
With those removed, eliminate pte_to_swp_entry() altogether.
No functional change intended.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
---
include/linux/leafops.h | 7 ++++++-
include/linux/swapops.h | 13 -------------
mm/debug_vm_pgtable.c | 2 +-
mm/internal.h | 7 +++++--
mm/memory-failure.c | 2 +-
mm/memory.c | 16 ++++++++--------
mm/migrate.c | 2 +-
mm/mincore.c | 4 +++-
mm/rmap.c | 8 ++++++--
mm/swapfile.c | 5 +++--
10 files changed, 34 insertions(+), 32 deletions(-)
diff --git a/include/linux/leafops.h b/include/linux/leafops.h
index b74d406ba648..ba970d4e2e17 100644
--- a/include/linux/leafops.h
+++ b/include/linux/leafops.h
@@ -66,11 +66,16 @@ static inline leaf_entry_t leafent_mk_none(void)
*/
static inline leaf_entry_t leafent_from_pte(pte_t pte)
{
+ leaf_entry_t arch_entry;
+
if (pte_present(pte))
return leafent_mk_none();
+ pte = pte_swp_clear_flags(pte);
+ arch_entry = __pte_to_swp_entry(pte);
+
/* Temporary until swp_entry_t eliminated. */
- return pte_to_swp_entry(pte);
+ return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
/**
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 3d02b288c15e..8cfc966eae48 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -107,19 +107,6 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
return entry.val & SWP_OFFSET_MASK;
}
-/*
- * Convert the arch-dependent pte representation of a swp_entry_t into an
- * arch-independent swp_entry_t.
- */
-static inline swp_entry_t pte_to_swp_entry(pte_t pte)
-{
- swp_entry_t arch_entry;
-
- pte = pte_swp_clear_flags(pte);
- arch_entry = __pte_to_swp_entry(pte);
- return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
-}
-
/*
* Convert the arch-independent representation of a swp_entry_t into the
* arch-dependent pte representation.
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index 181fa2b25625..4526be294ecf 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -1229,7 +1229,7 @@ static int __init init_args(struct pgtable_debug_args *args)
init_fixed_pfns(args);
/* See generic_max_swapfile_size(): probe the maximum offset */
- max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL))));
+ max_swap_offset = swp_offset(leafent_from_pte(leafent_to_pte(swp_entry(0, ~0UL))));
/* Create a swp entry with all possible bits set while still being swap. */
args->swp_entry = swp_entry(MAX_SWAPFILES - 1, max_swap_offset);
/* Create a non-present migration entry. */
diff --git a/mm/internal.h b/mm/internal.h
index e450a34c37dd..0af87f6c2889 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -334,7 +334,7 @@ unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
*/
static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
{
- swp_entry_t entry = pte_to_swp_entry(pte);
+ const leaf_entry_t entry = leafent_from_pte(pte);
pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
(swp_offset(entry) + delta)));
@@ -389,11 +389,14 @@ static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
cgroup_id = lookup_swap_cgroup_id(entry);
while (ptep < end_ptep) {
+ leaf_entry_t entry;
+
pte = ptep_get(ptep);
if (!pte_same(pte, expected_pte))
break;
- if (lookup_swap_cgroup_id(pte_to_swp_entry(pte)) != cgroup_id)
+ entry = leafent_from_pte(pte);
+ if (lookup_swap_cgroup_id(entry) != cgroup_id)
break;
expected_pte = pte_next_swp_offset(expected_pte);
ptep++;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 42cd4079c660..0e64d070d27d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -50,7 +50,7 @@
#include <linux/backing-dev.h>
#include <linux/migrate.h>
#include <linux/slab.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/hugetlb.h>
#include <linux/memory_hotplug.h>
#include <linux/mm_inline.h>
diff --git a/mm/memory.c b/mm/memory.c
index f7b837c3c4dd..1c66ee83a7ab 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1218,7 +1218,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
spinlock_t *src_ptl, *dst_ptl;
int progress, max_nr, ret = 0;
int rss[NR_MM_COUNTERS];
- swp_entry_t entry = (swp_entry_t){0};
+ leaf_entry_t entry = leafent_mk_none();
struct folio *prealloc = NULL;
int nr;
@@ -1282,7 +1282,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
dst_vma, src_vma,
addr, rss);
if (ret == -EIO) {
- entry = pte_to_swp_entry(ptep_get(src_pte));
+ entry = leafent_from_pte(ptep_get(src_pte));
break;
} else if (ret == -EBUSY) {
break;
@@ -4456,13 +4456,13 @@ static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct folio *folio;
- swp_entry_t entry;
+ leaf_entry_t entry;
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
if (!folio)
return NULL;
- entry = pte_to_swp_entry(vmf->orig_pte);
+ entry = leafent_from_pte(vmf->orig_pte);
if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
GFP_KERNEL, entry)) {
folio_put(folio);
@@ -4480,7 +4480,7 @@ static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
{
unsigned long addr;
- swp_entry_t entry;
+ leaf_entry_t entry;
int idx;
pte_t pte;
@@ -4490,7 +4490,7 @@ static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx)))
return false;
- entry = pte_to_swp_entry(pte);
+ entry = leafent_from_pte(pte);
if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages)
return false;
@@ -4536,7 +4536,7 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
unsigned long orders;
struct folio *folio;
unsigned long addr;
- swp_entry_t entry;
+ leaf_entry_t entry;
spinlock_t *ptl;
pte_t *pte;
gfp_t gfp;
@@ -4557,7 +4557,7 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
if (!zswap_never_enabled())
goto fallback;
- entry = pte_to_swp_entry(vmf->orig_pte);
+ entry = leafent_from_pte(vmf->orig_pte);
/*
* Get a list of all the (large) orders below PMD_ORDER that are enabled
* and suitable for swapping THP.
diff --git a/mm/migrate.c b/mm/migrate.c
index 22e52e90cb21..567dfae4d9f8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -534,7 +534,7 @@ void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, p
* lock release in migration_entry_wait_on_locked().
*/
hugetlb_vma_unlock_read(vma);
- migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
+ migration_entry_wait_on_locked(entry, ptl);
return;
}
diff --git a/mm/mincore.c b/mm/mincore.c
index a1f48df5564e..a6194bbc0c25 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -202,7 +202,9 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
for (i = 0; i < step; i++)
vec[i] = 1;
} else { /* pte is a swap entry */
- *vec = mincore_swap(pte_to_swp_entry(pte), false);
+ const leaf_entry_t entry = leafent_from_pte(pte);
+
+ *vec = mincore_swap(entry, false);
}
vec += step;
}
diff --git a/mm/rmap.c b/mm/rmap.c
index 061d988b6ddf..60c3cd70b6ea 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1969,7 +1969,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
if (likely(pte_present(pteval))) {
pfn = pte_pfn(pteval);
} else {
- pfn = leafent_to_pfn(pte_to_swp_entry(pteval));
+ const leaf_entry_t entry = leafent_from_pte(pteval);
+
+ pfn = leafent_to_pfn(entry);
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
}
@@ -2368,7 +2370,9 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
if (likely(pte_present(pteval))) {
pfn = pte_pfn(pteval);
} else {
- pfn = leafent_to_pfn(pte_to_swp_entry(pteval));
+ const leaf_entry_t entry = leafent_from_pte(pteval);
+
+ pfn = leafent_to_pfn(entry);
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 82a8b5d7e8d0..86721fea1aa3 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3201,8 +3201,9 @@ static int claim_swapfile(struct swap_info_struct *si, struct inode *inode)
*/
unsigned long generic_max_swapfile_size(void)
{
- return swp_offset(pte_to_swp_entry(
- swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
+ const leaf_entry_t entry = swp_entry(0, ~0UL);
+
+ return swp_offset(leafent_from_pte(leafent_to_pte(entry))) + 1;
}
/* Can be overridden by an architecture for additional checks. */
--
2.51.0
next prev parent reply other threads:[~2025-11-03 12:38 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-03 12:31 [PATCH 00/16] mm: remove is_swap_[pte, pmd]() + non-swap entries, introduce leaf entries Lorenzo Stoakes
2025-11-03 12:31 ` [PATCH 01/16] mm: correctly handle UFFD PTE markers Lorenzo Stoakes
2025-11-05 18:25 ` Vlastimil Babka
2025-11-03 12:31 ` [PATCH 02/16] mm: introduce leaf entry type and use to simplify leaf entry logic Lorenzo Stoakes
2025-11-03 17:27 ` Lorenzo Stoakes
2025-11-05 14:42 ` Gregory Price
2025-11-05 17:21 ` Jason Gunthorpe
2025-11-05 17:32 ` Lorenzo Stoakes
2025-11-05 18:16 ` Jason Gunthorpe
2025-11-05 19:54 ` Lorenzo Stoakes
2025-11-05 19:06 ` Matthew Wilcox
2025-11-05 19:25 ` Gregory Price
2025-11-05 19:52 ` Lorenzo Stoakes
2025-11-05 19:56 ` David Hildenbrand
2025-11-05 20:01 ` Gregory Price
2025-11-05 20:05 ` Lorenzo Stoakes
2025-11-05 20:11 ` David Hildenbrand (Red Hat)
2025-11-05 21:08 ` Lorenzo Stoakes
2025-11-05 21:15 ` David Hildenbrand (Red Hat)
2025-11-05 21:24 ` Lorenzo Stoakes
2025-11-05 21:29 ` David Hildenbrand (Red Hat)
2025-11-05 21:47 ` Lorenzo Stoakes
2025-11-05 19:56 ` Lorenzo Stoakes
2025-11-03 12:31 ` [PATCH 03/16] mm: avoid unnecessary uses of is_swap_pte() Lorenzo Stoakes
2025-11-03 12:31 ` [PATCH 04/16] mm: eliminate uses of is_swap_pte() when leafent_from_pte() suffices Lorenzo Stoakes
2025-11-03 12:31 ` [PATCH 05/16] mm: use leaf entries in debug pgtable + remove is_swap_pte() Lorenzo Stoakes
2025-11-03 12:31 ` [PATCH 06/16] fs/proc/task_mmu: refactor pagemap_pmd_range() Lorenzo Stoakes
2025-11-03 12:31 ` [PATCH 07/16] mm: avoid unnecessary use of is_swap_pmd() Lorenzo Stoakes
2025-11-03 12:31 ` [PATCH 08/16] mm/huge_memory: refactor copy_huge_pmd() non-present logic Lorenzo Stoakes
2025-11-03 12:31 ` [PATCH 09/16] mm/huge_memory: refactor change_huge_pmd() " Lorenzo Stoakes
2025-11-03 12:31 ` [PATCH 10/16] mm: replace pmd_to_swp_entry() with leafent_from_pmd() Lorenzo Stoakes
2025-11-03 15:01 ` kernel test robot
2025-11-03 15:14 ` Lorenzo Stoakes
2025-11-03 16:24 ` kernel test robot
2025-11-03 17:30 ` Lorenzo Stoakes
2025-11-04 0:15 ` kernel test robot
2025-11-03 12:31 ` [PATCH 11/16] mm: introduce pmd_is_huge() and use where appropriate Lorenzo Stoakes
2025-11-03 12:31 ` [PATCH 12/16] mm: remove remaining is_swap_pmd() users and is_swap_pmd() Lorenzo Stoakes
2025-11-03 12:31 ` [PATCH 13/16] mm: remove non_swap_entry() and use leaf entry helpers instead Lorenzo Stoakes
2025-11-04 6:02 ` kernel test robot
2025-11-04 6:13 ` Lorenzo Stoakes
2025-11-04 6:17 ` Lorenzo Stoakes
2025-11-03 12:31 ` [PATCH 14/16] mm: remove is_hugetlb_entry_[migration, hwpoisoned]() Lorenzo Stoakes
2025-11-03 12:31 ` [PATCH 15/16] mm: eliminate further swapops predicates Lorenzo Stoakes
2025-11-03 12:31 ` Lorenzo Stoakes [this message]
2025-11-04 1:13 ` [PATCH 00/16] mm: remove is_swap_[pte, pmd]() + non-swap entries, introduce leaf entries Andrew Morton
2025-11-05 2:41 ` Wei Yang
2025-11-05 17:33 ` Lorenzo Stoakes
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1518bb0d0e40ace2012e64590288046aef03781e.1762171281.git.lorenzo.stoakes@oracle.com \
--to=lorenzo.stoakes@oracle.com \
--cc=Liam.Howlett@oracle.com \
--cc=agordeev@linux.ibm.com \
--cc=akpm@linux-foundation.org \
--cc=apopple@nvidia.com \
--cc=arnd@arndb.de \
--cc=axelrasmussen@google.com \
--cc=baohua@kernel.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=bhe@redhat.com \
--cc=borntraeger@linux.ibm.com \
--cc=brauner@kernel.org \
--cc=byungchul@sk.com \
--cc=chengming.zhou@linux.dev \
--cc=chrisl@kernel.org \
--cc=damon@lists.linux.dev \
--cc=david@redhat.com \
--cc=dev.jain@arm.com \
--cc=frankja@linux.ibm.com \
--cc=gerald.schaefer@linux.ibm.com \
--cc=gor@linux.ibm.com \
--cc=gourry@gourry.net \
--cc=harry.yoo@oracle.com \
--cc=hca@linux.ibm.com \
--cc=hughd@google.com \
--cc=imbrenda@linux.ibm.com \
--cc=jack@suse.cz \
--cc=jannh@google.com \
--cc=jgg@ziepe.ca \
--cc=joshua.hahnjy@gmail.com \
--cc=kasong@tencent.com \
--cc=kvm@vger.kernel.org \
--cc=lance.yang@linux.dev \
--cc=leon@kernel.org \
--cc=linmiaohe@huawei.com \
--cc=linux-arch@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-s390@vger.kernel.org \
--cc=matthew.brost@intel.com \
--cc=mhocko@suse.com \
--cc=muchun.song@linux.dev \
--cc=nao.horiguchi@gmail.com \
--cc=npache@redhat.com \
--cc=nphamcs@gmail.com \
--cc=osalvador@suse.de \
--cc=pasha.tatashin@soleen.com \
--cc=peterx@redhat.com \
--cc=pfalcato@suse.de \
--cc=rakie.kim@sk.com \
--cc=riel@surriel.com \
--cc=rppt@kernel.org \
--cc=ryan.roberts@arm.com \
--cc=shikemeng@huaweicloud.com \
--cc=sj@kernel.org \
--cc=surenb@google.com \
--cc=svens@linux.ibm.com \
--cc=vbabka@suse.cz \
--cc=viro@zeniv.linux.org.uk \
--cc=weixugc@google.com \
--cc=willy@infradead.org \
--cc=xu.xin16@zte.com.cn \
--cc=ying.huang@linux.alibaba.com \
--cc=yuanchu@google.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox