* [PATCH 0/4] Some uses of folio_mk_pte()
@ 2025-02-21 14:30 Matthew Wilcox (Oracle)
2025-02-21 14:30 ` [PATCH 1/4] hugetlb: Simplify make_huge_pte() Matthew Wilcox (Oracle)
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: Matthew Wilcox (Oracle) @ 2025-02-21 14:30 UTC (permalink / raw)
To: linux-mm; +Cc: Matthew Wilcox (Oracle), linux-arch
Building on top of my earlier series to add folio_mk_pte(), I'm
simplifying both hugetlb and thp. I dislike that we still have
pmd_mkhuge() (we shouldn't be able to create a PMD entry that isn't
huge!) and I continue to dislike the 'huge pte' concept in hugetlb,
but this is a step in the right direction.
Matthew Wilcox (Oracle) (4):
hugetlb: Simplify make_huge_pte()
mm: Remove mk_huge_pte()
mm: Add folio_mk_pmd()
arch: Remove mk_pmd()
arch/arc/include/asm/hugepage.h | 2 --
arch/arc/include/asm/pgtable-levels.h | 1 -
arch/arm/include/asm/pgtable-3level.h | 1 -
arch/arm64/include/asm/pgtable.h | 1 -
arch/loongarch/include/asm/pgtable.h | 1 -
arch/loongarch/mm/pgtable.c | 9 ---------
arch/mips/include/asm/pgtable.h | 3 ---
arch/mips/mm/pgtable-32.c | 10 ----------
arch/mips/mm/pgtable-64.c | 9 ---------
arch/powerpc/include/asm/book3s/64/pgtable.h | 1 -
arch/powerpc/mm/book3s64/pgtable.c | 5 -----
arch/riscv/include/asm/pgtable-64.h | 2 --
arch/s390/include/asm/pgtable.h | 1 -
arch/sparc/include/asm/pgtable_64.h | 1 -
arch/x86/include/asm/pgtable.h | 2 --
fs/dax.c | 3 +--
include/asm-generic/hugetlb.h | 5 -----
include/linux/huge_mm.h | 2 --
include/linux/mm.h | 17 +++++++++++++++++
mm/debug_vm_pgtable.c | 18 +++++-------------
mm/huge_memory.c | 11 +++++------
mm/hugetlb.c | 18 ++++++++----------
mm/khugepaged.c | 2 +-
mm/memory.c | 2 +-
24 files changed, 38 insertions(+), 89 deletions(-)
--
2.47.2
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1/4] hugetlb: Simplify make_huge_pte()
2025-02-21 14:30 [PATCH 0/4] Some uses of folio_mk_pte() Matthew Wilcox (Oracle)
@ 2025-02-21 14:30 ` Matthew Wilcox (Oracle)
2025-02-21 14:30 ` [PATCH 2/4] mm: Remove mk_huge_pte() Matthew Wilcox (Oracle)
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Matthew Wilcox (Oracle) @ 2025-02-21 14:30 UTC (permalink / raw)
To: linux-mm; +Cc: Matthew Wilcox (Oracle), linux-arch
mk_huge_pte() is a bad API. Despite its name, it creates a normal
PTE which is later transformed into a huge PTE by arch_make_huge_pte().
So replace the page argument with a folio argument and call folio_mk_pte()
instead. Then, because we now know this is a regular PTE rather than a
huge one, use pte_mkdirty() instead of huge_pte_mkdirty() (and similar
functions).
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/hugetlb.c | 18 ++++++++----------
1 file changed, 8 insertions(+), 10 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 163190e89ea1..1ea42dd01012 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5162,18 +5162,16 @@ const struct vm_operations_struct hugetlb_vm_ops = {
.pagesize = hugetlb_vm_op_pagesize,
};
-static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
+static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio,
bool try_mkwrite)
{
- pte_t entry;
+ pte_t entry = folio_mk_pte(folio, vma->vm_page_prot);
unsigned int shift = huge_page_shift(hstate_vma(vma));
if (try_mkwrite && (vma->vm_flags & VM_WRITE)) {
- entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
- vma->vm_page_prot)));
+ entry = pte_mkwrite_novma(pte_mkdirty(entry));
} else {
- entry = huge_pte_wrprotect(mk_huge_pte(page,
- vma->vm_page_prot));
+ entry = pte_wrprotect(entry);
}
entry = pte_mkyoung(entry);
entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
@@ -5228,7 +5226,7 @@ static void
hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
struct folio *new_folio, pte_t old, unsigned long sz)
{
- pte_t newpte = make_huge_pte(vma, &new_folio->page, true);
+ pte_t newpte = make_huge_pte(vma, new_folio, true);
__folio_mark_uptodate(new_folio);
hugetlb_add_new_anon_rmap(new_folio, vma, addr);
@@ -5978,7 +5976,7 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
spin_lock(vmf->ptl);
vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) {
- pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare);
+ pte_t newpte = make_huge_pte(vma, new_folio, !unshare);
/* Break COW or unshare */
huge_ptep_clear_flush(vma, vmf->address, vmf->pte);
@@ -6258,7 +6256,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
hugetlb_add_new_anon_rmap(folio, vma, vmf->address);
else
hugetlb_add_file_rmap(folio);
- new_pte = make_huge_pte(vma, &folio->page, vma->vm_flags & VM_SHARED);
+ new_pte = make_huge_pte(vma, folio, vma->vm_flags & VM_SHARED);
/*
* If this pte was previously wr-protected, keep it wr-protected even
* if populated.
@@ -6743,7 +6741,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
* For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
* with wp flag set, don't set pte write bit.
*/
- _dst_pte = make_huge_pte(dst_vma, &folio->page,
+ _dst_pte = make_huge_pte(dst_vma, folio,
!wp_enabled && !(is_continue && !vm_shared));
/*
* Always mark UFFDIO_COPY page dirty; note that this may not be
--
2.47.2
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 2/4] mm: Remove mk_huge_pte()
2025-02-21 14:30 [PATCH 0/4] Some uses of folio_mk_pte() Matthew Wilcox (Oracle)
2025-02-21 14:30 ` [PATCH 1/4] hugetlb: Simplify make_huge_pte() Matthew Wilcox (Oracle)
@ 2025-02-21 14:30 ` Matthew Wilcox (Oracle)
2025-02-21 14:31 ` [PATCH 3/4] mm: Add folio_mk_pmd() Matthew Wilcox (Oracle)
2025-02-21 14:31 ` [PATCH 4/4] arch: Remove mk_pmd() Matthew Wilcox (Oracle)
3 siblings, 0 replies; 5+ messages in thread
From: Matthew Wilcox (Oracle) @ 2025-02-21 14:30 UTC (permalink / raw)
To: linux-mm; +Cc: Matthew Wilcox (Oracle), linux-arch
The only remaining user of mk_huge_pte() is the debug code, so remove
the API and replace its use with pfn_pte() which lets us remove the
conversion to a page first. We should always call arch_make_huge_pte()
to turn this PTE into a huge PTE before operating on it with
huge_pte_mkdirty() etc.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/asm-generic/hugetlb.h | 5 -----
mm/debug_vm_pgtable.c | 18 +++++-------------
2 files changed, 5 insertions(+), 18 deletions(-)
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index f42133dae68e..710b77a1e0e8 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -5,11 +5,6 @@
#include <linux/swap.h>
#include <linux/swapops.h>
-static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
-{
- return mk_pte(page, pgprot);
-}
-
static inline unsigned long huge_pte_write(pte_t pte)
{
return pte_write(pte);
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index bc748f700a9e..7731b238b534 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -910,26 +910,18 @@ static void __init swap_migration_tests(struct pgtable_debug_args *args)
#ifdef CONFIG_HUGETLB_PAGE
static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
{
- struct page *page;
pte_t pte;
pr_debug("Validating HugeTLB basic\n");
- /*
- * Accessing the page associated with the pfn is safe here,
- * as it was previously derived from a real kernel symbol.
- */
- page = pfn_to_page(args->fixed_pmd_pfn);
- pte = mk_huge_pte(page, args->page_prot);
+ pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
+ pte = arch_make_huge_pte(pte, PMD_SHIFT, VM_ACCESS_FLAGS);
+#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
+ WARN_ON(!pte_huge(pte));
+#endif
WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
-
-#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
- pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
-
- WARN_ON(!pte_huge(arch_make_huge_pte(pte, PMD_SHIFT, VM_ACCESS_FLAGS)));
-#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
}
#else /* !CONFIG_HUGETLB_PAGE */
static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
--
2.47.2
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 3/4] mm: Add folio_mk_pmd()
2025-02-21 14:30 [PATCH 0/4] Some uses of folio_mk_pte() Matthew Wilcox (Oracle)
2025-02-21 14:30 ` [PATCH 1/4] hugetlb: Simplify make_huge_pte() Matthew Wilcox (Oracle)
2025-02-21 14:30 ` [PATCH 2/4] mm: Remove mk_huge_pte() Matthew Wilcox (Oracle)
@ 2025-02-21 14:31 ` Matthew Wilcox (Oracle)
2025-02-21 14:31 ` [PATCH 4/4] arch: Remove mk_pmd() Matthew Wilcox (Oracle)
3 siblings, 0 replies; 5+ messages in thread
From: Matthew Wilcox (Oracle) @ 2025-02-21 14:31 UTC (permalink / raw)
To: linux-mm; +Cc: Matthew Wilcox (Oracle), linux-arch
Removes five conversions from folio to page. Also removes both callers
of mk_pmd() that aren't part of mk_huge_pmd(), getting us a step closer to
removing the confusion between mk_pmd(), mk_huge_pmd() and pmd_mkhuge().
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
fs/dax.c | 3 +--
include/linux/mm.h | 17 +++++++++++++++++
mm/huge_memory.c | 11 +++++------
mm/khugepaged.c | 2 +-
mm/memory.c | 2 +-
5 files changed, 25 insertions(+), 10 deletions(-)
diff --git a/fs/dax.c b/fs/dax.c
index 21b47402b3dc..22efc6c44539 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1237,8 +1237,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
mm_inc_nr_ptes(vma->vm_mm);
}
- pmd_entry = mk_pmd(&zero_folio->page, vmf->vma->vm_page_prot);
- pmd_entry = pmd_mkhuge(pmd_entry);
+ pmd_entry = folio_mk_pmd(zero_folio, vmf->vma->vm_page_prot);
set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
spin_unlock(ptl);
trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b1e311bae6b7..5c883c619fa4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1936,7 +1936,24 @@ static inline pte_t folio_mk_pte(struct folio *folio, pgprot_t pgprot)
{
return pfn_pte(folio_pfn(folio), pgprot);
}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/**
+ * folio_mk_pmd - Create a PMD for this folio
+ * @folio: The folio to create a PMD for
+ * @pgprot: The page protection bits to use
+ *
+ * Create a page table entry for the first page of this folio.
+ * This is suitable for passing to set_pmd_at().
+ *
+ * Return: A page table entry suitable for mapping this folio.
+ */
+static inline pmd_t folio_mk_pmd(struct folio *folio, pgprot_t pgprot)
+{
+ return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot));
+}
#endif
+#endif /* CONFIG_MMU */
/**
* folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 3d3ebdc002d5..95ed5dd9622b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1203,7 +1203,7 @@ static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd,
{
pmd_t entry;
- entry = mk_huge_pmd(&folio->page, vma->vm_page_prot);
+ entry = folio_mk_pmd(folio, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);
@@ -1311,8 +1311,7 @@ static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
pmd_t entry;
if (!pmd_none(*pmd))
return;
- entry = mk_pmd(&zero_folio->page, vma->vm_page_prot);
- entry = pmd_mkhuge(entry);
+ entry = folio_mk_pmd(zero_folio, vma->vm_page_prot);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, haddr, pmd, entry);
mm_inc_nr_ptes(mm);
@@ -2570,12 +2569,12 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
folio_move_anon_rmap(src_folio, dst_vma);
src_folio->index = linear_page_index(dst_vma, dst_addr);
- _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
+ _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot);
/* Follow mremap() behavior and treat the entry dirty after the move */
_dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
} else {
src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
- _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot);
+ _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot);
}
set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
@@ -4306,7 +4305,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
entry = pmd_to_swp_entry(*pvmw->pmd);
folio_get(folio);
- pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
+ pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot));
if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde);
if (is_writable_migration_entry(entry))
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 5f0be134141e..4f85597a7f64 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1239,7 +1239,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
__folio_mark_uptodate(folio);
pgtable = pmd_pgtable(_pmd);
- _pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot);
+ _pmd = folio_mk_pmd(folio, vma->vm_page_prot);
_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
spin_lock(pmd_ptl);
diff --git a/mm/memory.c b/mm/memory.c
index ea5a58db76dd..6d1a1185c34c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5078,7 +5078,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
flush_icache_pages(vma, page, HPAGE_PMD_NR);
- entry = mk_huge_pmd(page, vma->vm_page_prot);
+ entry = folio_mk_pmd(folio, vma->vm_page_prot);
if (write)
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
--
2.47.2
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 4/4] arch: Remove mk_pmd()
2025-02-21 14:30 [PATCH 0/4] Some uses of folio_mk_pte() Matthew Wilcox (Oracle)
` (2 preceding siblings ...)
2025-02-21 14:31 ` [PATCH 3/4] mm: Add folio_mk_pmd() Matthew Wilcox (Oracle)
@ 2025-02-21 14:31 ` Matthew Wilcox (Oracle)
3 siblings, 0 replies; 5+ messages in thread
From: Matthew Wilcox (Oracle) @ 2025-02-21 14:31 UTC (permalink / raw)
To: linux-mm; +Cc: Matthew Wilcox (Oracle), linux-arch
There are now no callers of mk_huge_pmd() and mk_pmd(). Remove them.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
arch/arc/include/asm/hugepage.h | 2 --
arch/arc/include/asm/pgtable-levels.h | 1 -
arch/arm/include/asm/pgtable-3level.h | 1 -
arch/arm64/include/asm/pgtable.h | 1 -
arch/loongarch/include/asm/pgtable.h | 1 -
arch/loongarch/mm/pgtable.c | 9 ---------
arch/mips/include/asm/pgtable.h | 3 ---
arch/mips/mm/pgtable-32.c | 10 ----------
arch/mips/mm/pgtable-64.c | 9 ---------
arch/powerpc/include/asm/book3s/64/pgtable.h | 1 -
arch/powerpc/mm/book3s64/pgtable.c | 5 -----
arch/riscv/include/asm/pgtable-64.h | 2 --
arch/s390/include/asm/pgtable.h | 1 -
arch/sparc/include/asm/pgtable_64.h | 1 -
arch/x86/include/asm/pgtable.h | 2 --
include/linux/huge_mm.h | 2 --
16 files changed, 51 deletions(-)
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
index 8a2441670a8f..7765dc105d54 100644
--- a/arch/arc/include/asm/hugepage.h
+++ b/arch/arc/include/asm/hugepage.h
@@ -40,8 +40,6 @@ static inline pmd_t pte_pmd(pte_t pte)
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
-#define mk_pmd(page, prot) pte_pmd(mk_pte(page, prot))
-
#define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ)
#define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
diff --git a/arch/arc/include/asm/pgtable-levels.h b/arch/arc/include/asm/pgtable-levels.h
index 55dbd2719e35..d1ce4b0f1071 100644
--- a/arch/arc/include/asm/pgtable-levels.h
+++ b/arch/arc/include/asm/pgtable-levels.h
@@ -142,7 +142,6 @@
#define pmd_pfn(pmd) ((pmd_val(pmd) & PMD_MASK) >> PAGE_SHIFT)
#define pfn_pmd(pfn,prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
#endif
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index fa5939eb9864..7b71a3d414b7 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -209,7 +209,6 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
-#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
/* No hardware dirty/accessed bits -- generic_pmdp_establish() fits */
#define pmdp_establish generic_pmdp_establish
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 64b469d799c6..5c0c184bf044 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -607,7 +607,6 @@ static inline pmd_t pmd_mkspecial(pmd_t pmd)
#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
#define pud_young(pud) pte_young(pud_pte(pud))
#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 9ba3a4ebcd98..a3f17914dbab 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -255,7 +255,6 @@ static inline void pmd_clear(pmd_t *pmdp)
#define pmd_page_vaddr(pmd) pmd_val(pmd)
-extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
#define pte_page(x) pfn_to_page(pte_pfn(x))
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
index 22a94bb3e6e8..352d9b2e02ab 100644
--- a/arch/loongarch/mm/pgtable.c
+++ b/arch/loongarch/mm/pgtable.c
@@ -135,15 +135,6 @@ void kernel_pte_init(void *addr)
} while (p != end);
}
-pmd_t mk_pmd(struct page *page, pgprot_t prot)
-{
- pmd_t pmd;
-
- pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
-
- return pmd;
-}
-
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index d69cfa5a8ac6..4852b005a72d 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -713,9 +713,6 @@ static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
-/* Extern to avoid header file madness */
-extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
-
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index 84dd5136d53a..e2cf2166d5cb 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -31,16 +31,6 @@ void pgd_init(void *addr)
}
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
-pmd_t mk_pmd(struct page *page, pgprot_t prot)
-{
- pmd_t pmd;
-
- pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
-
- return pmd;
-}
-
-
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 1e544827dea9..b24f865de357 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -90,15 +90,6 @@ void pud_init(void *addr)
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-pmd_t mk_pmd(struct page *page, pgprot_t prot)
-{
- pmd_t pmd;
-
- pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
-
- return pmd;
-}
-
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 6d98e6f08d4d..6ed93e290c2f 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1096,7 +1096,6 @@ static inline bool pmd_access_permitted(pmd_t pmd, bool write)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
extern pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot);
-extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
extern pud_t pud_modify(pud_t pud, pgprot_t newprot);
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index ce64abea9e3e..81a1fa7dcc23 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -270,11 +270,6 @@ pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot)
return __pud_mkhuge(pud_set_protbits(__pud(pudv), pgprot));
}
-pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
-{
- return pfn_pmd(page_to_pfn(page), pgprot);
-}
-
pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
unsigned long pmdv;
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 0897dd99ab8d..188fadc1c21f 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -262,8 +262,6 @@ static inline unsigned long _pmd_pfn(pmd_t pmd)
return __page_val_to_pfn(pmd_val(pmd));
}
-#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
-
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index db932beabc87..7d8d8793259a 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1870,7 +1870,6 @@ static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
#define pmdp_collapse_flush pmdp_collapse_flush
#define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
-#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
static inline int pmd_trans_huge(pmd_t pmd)
{
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index ac115adb488f..5a6dc10486de 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -233,7 +233,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
return __pmd(pte_val(pte));
}
-#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
#endif
/* This one can be done with two shifts. */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 9f480bdafd20..d09d17cff956 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1347,8 +1347,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
#define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
-#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
-
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 93e509b6c00e..78341bb40351 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -461,8 +461,6 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
void mm_put_huge_zero_folio(struct mm_struct *mm);
-#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
-
static inline bool thp_migration_supported(void)
{
return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
--
2.47.2
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2025-02-21 14:31 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-02-21 14:30 [PATCH 0/4] Some uses of folio_mk_pte() Matthew Wilcox (Oracle)
2025-02-21 14:30 ` [PATCH 1/4] hugetlb: Simplify make_huge_pte() Matthew Wilcox (Oracle)
2025-02-21 14:30 ` [PATCH 2/4] mm: Remove mk_huge_pte() Matthew Wilcox (Oracle)
2025-02-21 14:31 ` [PATCH 3/4] mm: Add folio_mk_pmd() Matthew Wilcox (Oracle)
2025-02-21 14:31 ` [PATCH 4/4] arch: Remove mk_pmd() Matthew Wilcox (Oracle)
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox