linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 0/4] arch, mm: consolidate empty_zero_page
@ 2026-02-11 10:31 Mike Rapoport
  2026-02-11 10:31 ` [PATCH v3 1/4] mm: don't special case !MMU for is_zero_pfn() and my_zero_pfn() Mike Rapoport
                   ` (3 more replies)
  0 siblings, 4 replies; 17+ messages in thread
From: Mike Rapoport @ 2026-02-11 10:31 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Andreas Larsson, Borislav Petkov, Brian Cain, Catalin Marinas,
	Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, David Hildenbrand, Dinh Nguyen,
	Geert Uytterhoeven, Guo Ren, Helge Deller, Huacai Chen,
	Ingo Molnar, Johannes Berg, John Paul Adrian Glaubitz,
	Liam R. Howlett, Lorenzo Stoakes, Madhavan Srinivasan,
	Magnus Lindholm, Matt Turner, Max Filippov, Michael Ellerman,
	Michal Hocko, Michal Simek, Mike Rapoport, Palmer Dabbelt,
	Richard Weinberger, Russell King, Stafford Horne,
	Suren Baghdasaryan, Thomas Gleixner, Vineet Gupta,
	Vlastimil Babka, Will Deacon, linux-alpha, linux-kernel,
	linux-snps-arc, linux-arm-kernel, linux-csky, linux-hexagon,
	loongarch, linux-m68k, linux-openrisc, linux-parisc,
	linuxppc-dev, linux-riscv, linux-sh, sparclinux, linux-um,
	linux-mm, x86

From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>

Hi,

These patches cleanup handling of ZERO_PAGE() and zero_pfn.

v3 changes:
* move zero_page_pfn extern declaration back inside the helpers
* add tags, thanks all!

v2: https://lore.kernel.org/all/20260209144058.2092871-1-rppt@kernel.org
* add patches that cleanup zero_pfn code a bit (patches 1,2)
* add a patch that caches struct page for empty_zero_page (patch 4)
* use uint8_t instead of unsigned long for empty_zero_page
* fix sparc64 changes in patch 3

v1: https://lore.kernel.org/all/20260124095628.668870-1-rppt@kernel.org

Mike Rapoport (Microsoft) (4):
  mm: don't special case !MMU for is_zero_pfn() and my_zero_pfn()
  mm: rename my_zero_pfn() to zero_pfn()
  arch, mm: consolidate empty_zero_page
  mm: cache struct page for empty_zero_page and return it from ZERO_PAGE()

 arch/alpha/include/asm/pgtable.h          |  6 ---
 arch/arc/include/asm/pgtable.h            |  3 --
 arch/arc/mm/init.c                        |  2 -
 arch/arm/include/asm/pgtable.h            |  9 -----
 arch/arm/mm/mmu.c                         |  7 ----
 arch/arm/mm/nommu.c                       |  7 ----
 arch/arm64/include/asm/pgtable.h          |  7 ----
 arch/arm64/mm/init.c                      |  5 +++
 arch/arm64/mm/mmu.c                       |  7 ----
 arch/csky/include/asm/pgtable.h           |  3 --
 arch/csky/mm/init.c                       |  3 --
 arch/hexagon/include/asm/pgtable.h        |  6 ---
 arch/hexagon/kernel/head.S                |  5 ---
 arch/hexagon/kernel/hexagon_ksyms.c       |  1 -
 arch/loongarch/include/asm/pgtable.h      |  9 -----
 arch/loongarch/mm/init.c                  |  3 --
 arch/m68k/include/asm/pgtable_mm.h        |  9 -----
 arch/m68k/include/asm/pgtable_no.h        |  7 ----
 arch/m68k/mm/init.c                       |  9 -----
 arch/m68k/mm/mcfmmu.c                     |  2 -
 arch/m68k/mm/motorola.c                   |  6 ---
 arch/m68k/mm/sun3mmu.c                    |  2 -
 arch/microblaze/include/asm/pgtable.h     | 10 -----
 arch/microblaze/kernel/head.S             |  4 --
 arch/microblaze/kernel/microblaze_ksyms.c |  2 -
 arch/mips/mm/init.c                       | 11 +-----
 arch/nios2/include/asm/pgtable.h          |  7 ----
 arch/nios2/kernel/head.S                  | 10 -----
 arch/nios2/kernel/nios2_ksyms.c           |  1 -
 arch/openrisc/include/asm/pgtable.h       |  4 --
 arch/openrisc/kernel/head.S               |  3 --
 arch/openrisc/kernel/or32_ksyms.c         |  1 -
 arch/openrisc/mm/init.c                   |  3 --
 arch/parisc/include/asm/pgtable.h         | 11 ------
 arch/parisc/mm/init.c                     |  6 ---
 arch/powerpc/include/asm/pgtable.h        |  6 ---
 arch/powerpc/mm/mem.c                     |  3 --
 arch/riscv/include/asm/pgtable.h          |  7 ----
 arch/riscv/mm/init.c                      |  4 --
 arch/s390/mm/init.c                       |  4 +-
 arch/sh/include/asm/pgtable.h             |  8 ----
 arch/sh/include/asm/setup.h               |  3 +-
 arch/sh/kernel/head_32.S                  |  4 +-
 arch/sh/kernel/sh_ksyms_32.c              |  1 -
 arch/sh/mm/init.c                         |  1 -
 arch/sparc/include/asm/pgtable_32.h       |  8 ----
 arch/sparc/include/asm/pgtable_64.h       |  3 --
 arch/sparc/include/asm/setup.h            |  2 -
 arch/sparc/kernel/head_32.S               |  7 ----
 arch/sparc/mm/init_32.c                   |  4 --
 arch/sparc/mm/init_64.c                   | 24 +++++-------
 arch/um/include/asm/pgtable.h             |  9 -----
 arch/um/include/shared/kern_util.h        |  1 -
 arch/um/kernel/mem.c                      | 16 --------
 arch/um/kernel/um_arch.c                  |  1 -
 arch/x86/include/asm/pgtable.h            |  8 ----
 arch/x86/kernel/head_32.S                 |  4 --
 arch/x86/kernel/head_64.S                 |  7 ----
 arch/x86/kvm/mmu/spte.h                   |  2 +-
 arch/xtensa/include/asm/pgtable.h         |  4 --
 arch/xtensa/kernel/head.S                 |  3 --
 arch/xtensa/kernel/xtensa_ksyms.c         |  2 -
 fs/dax.c                                  |  2 +-
 fs/proc/vmcore.c                          |  2 +-
 include/linux/pgtable.h                   | 48 ++++++++++++++---------
 mm/huge_memory.c                          |  2 +-
 mm/memory.c                               | 15 +------
 mm/migrate.c                              |  2 +-
 mm/mm_init.c                              | 28 +++++++++++++
 mm/userfaultfd.c                          |  4 +-
 70 files changed, 86 insertions(+), 354 deletions(-)


base-commit: 50c7f34c5c7403a12003c6759f6f6ca9a5a10872
--
2.51.0

*** BLURB HERE ***

Mike Rapoport (Microsoft) (4):
  mm: don't special case !MMU for is_zero_pfn() and my_zero_pfn()
  mm: rename my_zero_pfn() to zero_pfn()
  arch, mm: consolidate empty_zero_page
  mm: cache struct page for empty_zero_page and return it from
    ZERO_PAGE()

 arch/alpha/include/asm/pgtable.h          |  6 ---
 arch/arc/include/asm/pgtable.h            |  3 --
 arch/arc/mm/init.c                        |  2 -
 arch/arm/include/asm/pgtable.h            |  9 ----
 arch/arm/mm/mmu.c                         |  7 ----
 arch/arm/mm/nommu.c                       |  7 ----
 arch/arm64/include/asm/pgtable.h          |  7 ----
 arch/arm64/mm/init.c                      |  5 +++
 arch/arm64/mm/mmu.c                       |  7 ----
 arch/csky/include/asm/pgtable.h           |  3 --
 arch/csky/mm/init.c                       |  3 --
 arch/hexagon/include/asm/pgtable.h        |  6 ---
 arch/hexagon/kernel/head.S                |  5 ---
 arch/hexagon/kernel/hexagon_ksyms.c       |  1 -
 arch/loongarch/include/asm/pgtable.h      |  9 ----
 arch/loongarch/mm/init.c                  |  3 --
 arch/m68k/include/asm/pgtable_mm.h        |  9 ----
 arch/m68k/include/asm/pgtable_no.h        |  7 ----
 arch/m68k/mm/init.c                       |  9 ----
 arch/m68k/mm/mcfmmu.c                     |  2 -
 arch/m68k/mm/motorola.c                   |  6 ---
 arch/m68k/mm/sun3mmu.c                    |  2 -
 arch/microblaze/include/asm/pgtable.h     | 10 -----
 arch/microblaze/kernel/head.S             |  4 --
 arch/microblaze/kernel/microblaze_ksyms.c |  2 -
 arch/mips/mm/init.c                       | 11 +----
 arch/nios2/include/asm/pgtable.h          |  7 ----
 arch/nios2/kernel/head.S                  | 10 -----
 arch/nios2/kernel/nios2_ksyms.c           |  1 -
 arch/openrisc/include/asm/pgtable.h       |  4 --
 arch/openrisc/kernel/head.S               |  3 --
 arch/openrisc/kernel/or32_ksyms.c         |  1 -
 arch/openrisc/mm/init.c                   |  3 --
 arch/parisc/include/asm/pgtable.h         | 11 -----
 arch/parisc/mm/init.c                     |  6 ---
 arch/powerpc/include/asm/pgtable.h        |  6 ---
 arch/powerpc/mm/mem.c                     |  3 --
 arch/riscv/include/asm/pgtable.h          |  7 ----
 arch/riscv/mm/init.c                      |  4 --
 arch/s390/mm/init.c                       |  4 +-
 arch/sh/include/asm/pgtable.h             |  8 ----
 arch/sh/include/asm/setup.h               |  3 +-
 arch/sh/kernel/head_32.S                  |  4 +-
 arch/sh/kernel/sh_ksyms_32.c              |  1 -
 arch/sh/mm/init.c                         |  1 -
 arch/sparc/include/asm/pgtable_32.h       |  8 ----
 arch/sparc/include/asm/pgtable_64.h       |  3 --
 arch/sparc/include/asm/setup.h            |  2 -
 arch/sparc/kernel/head_32.S               |  7 ----
 arch/sparc/mm/init_32.c                   |  4 --
 arch/sparc/mm/init_64.c                   | 24 ++++-------
 arch/um/include/asm/pgtable.h             |  9 ----
 arch/um/include/shared/kern_util.h        |  1 -
 arch/um/kernel/mem.c                      | 16 -------
 arch/um/kernel/um_arch.c                  |  1 -
 arch/x86/include/asm/pgtable.h            |  8 ----
 arch/x86/kernel/head_32.S                 |  4 --
 arch/x86/kernel/head_64.S                 |  7 ----
 arch/x86/kvm/mmu/spte.h                   |  2 +-
 arch/xtensa/include/asm/pgtable.h         |  4 --
 arch/xtensa/kernel/head.S                 |  3 --
 arch/xtensa/kernel/xtensa_ksyms.c         |  2 -
 fs/dax.c                                  |  2 +-
 fs/proc/vmcore.c                          |  2 +-
 include/linux/pgtable.h                   | 51 +++++++++++++++--------
 mm/huge_memory.c                          |  2 +-
 mm/memory.c                               | 15 +------
 mm/migrate.c                              |  2 +-
 mm/mm_init.c                              | 28 +++++++++++++
 mm/userfaultfd.c                          |  4 +-
 70 files changed, 89 insertions(+), 354 deletions(-)


base-commit: 50c7f34c5c7403a12003c6759f6f6ca9a5a10872
--
2.51.0


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH v3 1/4] mm: don't special case !MMU for is_zero_pfn() and my_zero_pfn()
  2026-02-11 10:31 [PATCH v3 0/4] arch, mm: consolidate empty_zero_page Mike Rapoport
@ 2026-02-11 10:31 ` Mike Rapoport
  2026-02-12  8:58   ` David Hildenbrand (Arm)
  2026-02-12 18:30   ` Liam R. Howlett
  2026-02-11 10:31 ` [PATCH v3 2/4] mm: rename my_zero_pfn() to zero_pfn() Mike Rapoport
                   ` (2 subsequent siblings)
  3 siblings, 2 replies; 17+ messages in thread
From: Mike Rapoport @ 2026-02-11 10:31 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Andreas Larsson, Borislav Petkov, Brian Cain, Catalin Marinas,
	Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, David Hildenbrand, Dinh Nguyen,
	Geert Uytterhoeven, Guo Ren, Helge Deller, Huacai Chen,
	Ingo Molnar, Johannes Berg, John Paul Adrian Glaubitz,
	Liam R. Howlett, Lorenzo Stoakes, Madhavan Srinivasan,
	Magnus Lindholm, Matt Turner, Max Filippov, Michael Ellerman,
	Michal Hocko, Michal Simek, Mike Rapoport, Palmer Dabbelt,
	Richard Weinberger, Russell King, Stafford Horne,
	Suren Baghdasaryan, Thomas Gleixner, Vineet Gupta,
	Vlastimil Babka, Will Deacon, linux-alpha, linux-kernel,
	linux-snps-arc, linux-arm-kernel, linux-csky, linux-hexagon,
	loongarch, linux-m68k, linux-openrisc, linux-parisc,
	linuxppc-dev, linux-riscv, linux-sh, sparclinux, linux-um,
	linux-mm, x86

From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>

nommu architectures have empty_zero_page and define ZERO_PAGE() and
although they don't really use it to populate page tables, there is no
reason to hardwire !MMU implementation of is_zero_pfn() and my_zero_pfn()
to 0.

Drop #ifdef CONFIG_MMU around implementations of is_zero_pfn() and
my_zero_pfn() and remove !MMU version.

While on it, make zero_pfn __ro_after_init.

Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
---
 include/linux/pgtable.h | 14 +-------------
 mm/memory.c             | 13 -------------
 mm/mm_init.c            | 10 ++++++++++
 3 files changed, 11 insertions(+), 26 deletions(-)

diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 827dca25c0bc..08a88b0d56e5 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1882,7 +1882,6 @@ static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
 	pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot);
 }
 
-#ifdef CONFIG_MMU
 #ifdef __HAVE_COLOR_ZERO_PAGE
 static inline int is_zero_pfn(unsigned long pfn)
 {
@@ -1905,18 +1904,7 @@ static inline unsigned long my_zero_pfn(unsigned long addr)
 	extern unsigned long zero_pfn;
 	return zero_pfn;
 }
-#endif
-#else
-static inline int is_zero_pfn(unsigned long pfn)
-{
-	return 0;
-}
-
-static inline unsigned long my_zero_pfn(unsigned long addr)
-{
-	return 0;
-}
-#endif /* CONFIG_MMU */
+#endif /* __HAVE_COLOR_ZERO_PAGE */
 
 #ifdef CONFIG_MMU
 
diff --git a/mm/memory.c b/mm/memory.c
index 187f16b7e996..51d2018a387a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -162,21 +162,8 @@ static int __init disable_randmaps(char *s)
 }
 __setup("norandmaps", disable_randmaps);
 
-unsigned long zero_pfn __read_mostly;
-EXPORT_SYMBOL(zero_pfn);
-
 unsigned long highest_memmap_pfn __read_mostly;
 
-/*
- * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
- */
-static int __init init_zero_pfn(void)
-{
-	zero_pfn = page_to_pfn(ZERO_PAGE(0));
-	return 0;
-}
-early_initcall(init_zero_pfn);
-
 void mm_trace_rss_stat(struct mm_struct *mm, int member)
 {
 	trace_rss_stat(mm, member);
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 1a29a719af58..dcf9eff34f83 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -53,6 +53,9 @@ EXPORT_SYMBOL(mem_map);
 void *high_memory;
 EXPORT_SYMBOL(high_memory);
 
+unsigned long zero_pfn __ro_after_init;
+EXPORT_SYMBOL(zero_pfn);
+
 #ifdef CONFIG_DEBUG_MEMORY_INIT
 int __meminitdata mminit_loglevel;
 
@@ -2667,6 +2670,13 @@ static void __init mem_init_print_info(void)
 		);
 }
 
+static int __init init_zero_pfn(void)
+{
+	zero_pfn = page_to_pfn(ZERO_PAGE(0));
+	return 0;
+}
+early_initcall(init_zero_pfn);
+
 void __init __weak arch_mm_preinit(void)
 {
 }
-- 
2.51.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH v3 2/4] mm: rename my_zero_pfn() to zero_pfn()
  2026-02-11 10:31 [PATCH v3 0/4] arch, mm: consolidate empty_zero_page Mike Rapoport
  2026-02-11 10:31 ` [PATCH v3 1/4] mm: don't special case !MMU for is_zero_pfn() and my_zero_pfn() Mike Rapoport
@ 2026-02-11 10:31 ` Mike Rapoport
  2026-02-12  9:01   ` David Hildenbrand (Arm)
                     ` (2 more replies)
  2026-02-11 10:31 ` [PATCH v3 3/4] arch, mm: consolidate empty_zero_page Mike Rapoport
  2026-02-11 10:31 ` [PATCH v3 4/4] mm: cache struct page for empty_zero_page and return it from ZERO_PAGE() Mike Rapoport
  3 siblings, 3 replies; 17+ messages in thread
From: Mike Rapoport @ 2026-02-11 10:31 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Andreas Larsson, Borislav Petkov, Brian Cain, Catalin Marinas,
	Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, David Hildenbrand, Dinh Nguyen,
	Geert Uytterhoeven, Guo Ren, Helge Deller, Huacai Chen,
	Ingo Molnar, Johannes Berg, John Paul Adrian Glaubitz,
	Liam R. Howlett, Lorenzo Stoakes, Madhavan Srinivasan,
	Magnus Lindholm, Matt Turner, Max Filippov, Michael Ellerman,
	Michal Hocko, Michal Simek, Mike Rapoport, Palmer Dabbelt,
	Richard Weinberger, Russell King, Stafford Horne,
	Suren Baghdasaryan, Thomas Gleixner, Vineet Gupta,
	Vlastimil Babka, Will Deacon, linux-alpha, linux-kernel,
	linux-snps-arc, linux-arm-kernel, linux-csky, linux-hexagon,
	loongarch, linux-m68k, linux-openrisc, linux-parisc,
	linuxppc-dev, linux-riscv, linux-sh, sparclinux, linux-um,
	linux-mm, x86

From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>

my_zero_pfn() is a silly name.

Rename zero_pfn variable to zero_page_pfn and my_zero_pfn() function to
zero_pfn().

While on it, move extern declarations of zero_page_pfn outside the
functions that use it and add a comment about what ZERO_PAGE is.

Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
---
 arch/x86/kvm/mmu/spte.h |  2 +-
 fs/dax.c                |  2 +-
 fs/proc/vmcore.c        |  2 +-
 include/linux/pgtable.h | 28 ++++++++++++++++++++--------
 mm/huge_memory.c        |  2 +-
 mm/memory.c             |  2 +-
 mm/migrate.c            |  2 +-
 mm/mm_init.c            | 10 +++++-----
 mm/userfaultfd.c        |  4 ++--
 9 files changed, 33 insertions(+), 21 deletions(-)

diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 91ce29fd6f1b..8c0ffa2cded6 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -248,7 +248,7 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
 
 static inline hpa_t kvm_mmu_get_dummy_root(void)
 {
-	return my_zero_pfn(0) << PAGE_SHIFT;
+	return zero_pfn(0) << PAGE_SHIFT;
 }
 
 static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
diff --git a/fs/dax.c b/fs/dax.c
index 289e6254aa30..b78cff9c91b3 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1360,7 +1360,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
 {
 	struct inode *inode = iter->inode;
 	unsigned long vaddr = vmf->address;
-	unsigned long pfn = my_zero_pfn(vaddr);
+	unsigned long pfn = zero_pfn(vaddr);
 	vm_fault_t ret;
 
 	*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index f188bd900eb2..44d15436439f 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -525,7 +525,7 @@ static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
 {
 	unsigned long map_size;
 	unsigned long pos_start, pos_end, pos;
-	unsigned long zeropage_pfn = my_zero_pfn(0);
+	unsigned long zeropage_pfn = zero_pfn(0);
 	size_t len = 0;
 
 	pos_start = pfn;
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 08a88b0d56e5..9bacf4df9769 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1882,27 +1882,39 @@ static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
 	pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot);
 }
 
+/*
+ * ZERO_PAGE() is global shared page(s) that is always zero. It is used for
+ * zero-mapped memory areas, CoW etc.
+ *
+ * On architectures that __HAVE_COLOR_ZERO_PAGE there are several such pages
+ * for different ranges in the virtual address space.
+ *
+ * zero_page_pfn identifies the first (or the only) pfn for these pages.
+ */
 #ifdef __HAVE_COLOR_ZERO_PAGE
 static inline int is_zero_pfn(unsigned long pfn)
 {
-	extern unsigned long zero_pfn;
-	unsigned long offset_from_zero_pfn = pfn - zero_pfn;
+	extern unsigned long zero_page_pfn;
+	unsigned long offset_from_zero_pfn = pfn - zero_page_pfn;
+
 	return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
 }
 
-#define my_zero_pfn(addr)	page_to_pfn(ZERO_PAGE(addr))
+#define zero_pfn(addr)	page_to_pfn(ZERO_PAGE(addr))
 
 #else
 static inline int is_zero_pfn(unsigned long pfn)
 {
-	extern unsigned long zero_pfn;
-	return pfn == zero_pfn;
+	extern unsigned long zero_page_pfn;
+
+	return pfn == zero_page_pfn;
 }
 
-static inline unsigned long my_zero_pfn(unsigned long addr)
+static inline unsigned long zero_pfn(unsigned long addr)
 {
-	extern unsigned long zero_pfn;
-	return zero_pfn;
+	extern unsigned long zero_page_pfn;
+
+	return zero_page_pfn;
 }
 #endif /* __HAVE_COLOR_ZERO_PAGE */
 
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 44ff8a648afd..bc15fd152526 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2968,7 +2968,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
 	for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
 		pte_t entry;
 
-		entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
+		entry = pfn_pte(zero_pfn(addr), vma->vm_page_prot);
 		entry = pte_mkspecial(entry);
 		if (pmd_uffd_wp(old_pmd))
 			entry = pte_mkuffd_wp(entry);
diff --git a/mm/memory.c b/mm/memory.c
index 51d2018a387a..ae610afa9cea 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5165,7 +5165,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 	/* Use the zero-page for reads */
 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
 			!mm_forbids_zeropage(vma->vm_mm)) {
-		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
+		entry = pte_mkspecial(pfn_pte(zero_pfn(vmf->address),
 						vma->vm_page_prot));
 		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
 				vmf->address, &vmf->ptl);
diff --git a/mm/migrate.c b/mm/migrate.c
index 1bf2cf8c44dd..739c4e03769b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -321,7 +321,7 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
 	if (!pages_identical(page, ZERO_PAGE(0)))
 		return false;
 
-	newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
+	newpte = pte_mkspecial(pfn_pte(zero_pfn(pvmw->address),
 					pvmw->vma->vm_page_prot));
 
 	if (pte_swp_soft_dirty(old_pte))
diff --git a/mm/mm_init.c b/mm/mm_init.c
index dcf9eff34f83..a0ca236eb4f5 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -53,8 +53,8 @@ EXPORT_SYMBOL(mem_map);
 void *high_memory;
 EXPORT_SYMBOL(high_memory);
 
-unsigned long zero_pfn __ro_after_init;
-EXPORT_SYMBOL(zero_pfn);
+unsigned long zero_page_pfn __ro_after_init;
+EXPORT_SYMBOL(zero_page_pfn);
 
 #ifdef CONFIG_DEBUG_MEMORY_INIT
 int __meminitdata mminit_loglevel;
@@ -2670,12 +2670,12 @@ static void __init mem_init_print_info(void)
 		);
 }
 
-static int __init init_zero_pfn(void)
+static int __init init_zero_page_pfn(void)
 {
-	zero_pfn = page_to_pfn(ZERO_PAGE(0));
+	zero_page_pfn = page_to_pfn(ZERO_PAGE(0));
 	return 0;
 }
-early_initcall(init_zero_pfn);
+early_initcall(init_zero_page_pfn);
 
 void __init __weak arch_mm_preinit(void)
 {
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 927086bb4a3c..e19872e51878 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -357,7 +357,7 @@ static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
 	if (mm_forbids_zeropage(dst_vma->vm_mm))
 		return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, dst_addr);
 
-	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
+	_dst_pte = pte_mkspecial(pfn_pte(zero_pfn(dst_addr),
 					 dst_vma->vm_page_prot));
 	ret = -EAGAIN;
 	dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
@@ -1229,7 +1229,7 @@ static int move_zeropage_pte(struct mm_struct *mm,
 		return -EAGAIN;
 	}
 
-	zero_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
+	zero_pte = pte_mkspecial(pfn_pte(zero_pfn(dst_addr),
 					 dst_vma->vm_page_prot));
 	ptep_clear_flush(src_vma, src_addr, src_pte);
 	set_pte_at(mm, dst_addr, dst_pte, zero_pte);
-- 
2.51.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH v3 3/4] arch, mm: consolidate empty_zero_page
  2026-02-11 10:31 [PATCH v3 0/4] arch, mm: consolidate empty_zero_page Mike Rapoport
  2026-02-11 10:31 ` [PATCH v3 1/4] mm: don't special case !MMU for is_zero_pfn() and my_zero_pfn() Mike Rapoport
  2026-02-11 10:31 ` [PATCH v3 2/4] mm: rename my_zero_pfn() to zero_pfn() Mike Rapoport
@ 2026-02-11 10:31 ` Mike Rapoport
  2026-02-11 20:14   ` Magnus Lindholm
                     ` (4 more replies)
  2026-02-11 10:31 ` [PATCH v3 4/4] mm: cache struct page for empty_zero_page and return it from ZERO_PAGE() Mike Rapoport
  3 siblings, 5 replies; 17+ messages in thread
From: Mike Rapoport @ 2026-02-11 10:31 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Andreas Larsson, Borislav Petkov, Brian Cain, Catalin Marinas,
	Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, David Hildenbrand, Dinh Nguyen,
	Geert Uytterhoeven, Guo Ren, Helge Deller, Huacai Chen,
	Ingo Molnar, Johannes Berg, John Paul Adrian Glaubitz,
	Liam R. Howlett, Lorenzo Stoakes, Madhavan Srinivasan,
	Magnus Lindholm, Matt Turner, Max Filippov, Michael Ellerman,
	Michal Hocko, Michal Simek, Mike Rapoport, Palmer Dabbelt,
	Richard Weinberger, Russell King, Stafford Horne,
	Suren Baghdasaryan, Thomas Gleixner, Vineet Gupta,
	Vlastimil Babka, Will Deacon, linux-alpha, linux-kernel,
	linux-snps-arc, linux-arm-kernel, linux-csky, linux-hexagon,
	loongarch, linux-m68k, linux-openrisc, linux-parisc,
	linuxppc-dev, linux-riscv, linux-sh, sparclinux, linux-um,
	linux-mm, x86

From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>

Reduce 22 declarations of empty_zero_page to 3 and 23 declarations of
ZERO_PAGE() to 4.

Every architecture defines empty_zero_page that way or another, but for the
most of them it is always a page aligned page in BSS and most definitions
of ZERO_PAGE do virt_to_page(empty_zero_page).

Move Linus vetted x86 definition of empty_zero_page and ZERO_PAGE() to the
core MM and drop these definitions in architectures that do not implement
colored zero page (MIPS and s390).

ZERO_PAGE() remains a macro because turning it to a wrapper for a static
inline causes severe pain in header dependencies.

For the most part the change is mechanical, with these being noteworthy:

* alpha: aliased empty_zero_page with ZERO_PGE that was also used for boot
  parameters. Switching to a generic empty_zero_page removes the aliasing
  and keeps ZERO_PGE for boot parameters only
* arm64: uses __pa_symbol() in ZERO_PAGE() so that definition of
  ZERO_PAGE() is kept intact.
* m68k/parisc/um: allocated empty_zero_page from memblock,
  although they do not support zero page coloring and having it in BSS
  will work fine.
* sparc64 can have empty_zero_page in BSS rather allocate it, but it
  can't use virt_to_page() for BSS. Keep it's definition of ZERO_PAGE()
  but instead of allocating it, make mem_map_zero point to
  empty_zero_page.
* sh: used empty_zero_page for boot parameters at the very early boot.
  Rename the parameters page to boot_params_page and let sh use the generic
  empty_zero_page.
* hexagon: had an amusing comment about empty_zero_page

	/* A handy thing to have if one has the RAM. Declared in head.S */

  that unfortunately had to go :)

Acked-by: Helge Deller <deller@gmx.de>   # parisc
Tested-by: Helge Deller <deller@gmx.de>  # parisc
Reviewed-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
---
 arch/alpha/include/asm/pgtable.h          |  6 ------
 arch/arc/include/asm/pgtable.h            |  3 ---
 arch/arc/mm/init.c                        |  2 --
 arch/arm/include/asm/pgtable.h            |  9 ---------
 arch/arm/mm/mmu.c                         |  7 -------
 arch/arm/mm/nommu.c                       |  7 -------
 arch/arm64/include/asm/pgtable.h          |  1 -
 arch/arm64/mm/mmu.c                       |  7 -------
 arch/csky/include/asm/pgtable.h           |  3 ---
 arch/csky/mm/init.c                       |  3 ---
 arch/hexagon/include/asm/pgtable.h        |  6 ------
 arch/hexagon/kernel/head.S                |  5 -----
 arch/hexagon/kernel/hexagon_ksyms.c       |  1 -
 arch/loongarch/include/asm/pgtable.h      |  9 ---------
 arch/loongarch/mm/init.c                  |  3 ---
 arch/m68k/include/asm/pgtable_mm.h        |  9 ---------
 arch/m68k/include/asm/pgtable_no.h        |  7 -------
 arch/m68k/mm/init.c                       |  9 ---------
 arch/m68k/mm/mcfmmu.c                     |  2 --
 arch/m68k/mm/motorola.c                   |  6 ------
 arch/m68k/mm/sun3mmu.c                    |  2 --
 arch/microblaze/include/asm/pgtable.h     | 10 ----------
 arch/microblaze/kernel/head.S             |  4 ----
 arch/microblaze/kernel/microblaze_ksyms.c |  2 --
 arch/nios2/include/asm/pgtable.h          |  7 -------
 arch/nios2/kernel/head.S                  | 10 ----------
 arch/nios2/kernel/nios2_ksyms.c           |  1 -
 arch/openrisc/include/asm/pgtable.h       |  4 ----
 arch/openrisc/kernel/head.S               |  3 ---
 arch/openrisc/kernel/or32_ksyms.c         |  1 -
 arch/openrisc/mm/init.c                   |  3 ---
 arch/parisc/include/asm/pgtable.h         | 11 -----------
 arch/parisc/mm/init.c                     |  6 ------
 arch/powerpc/include/asm/pgtable.h        |  6 ------
 arch/powerpc/mm/mem.c                     |  3 ---
 arch/riscv/include/asm/pgtable.h          |  7 -------
 arch/riscv/mm/init.c                      |  4 ----
 arch/sh/include/asm/pgtable.h             |  8 --------
 arch/sh/include/asm/setup.h               |  3 ++-
 arch/sh/kernel/head_32.S                  |  4 ++--
 arch/sh/kernel/sh_ksyms_32.c              |  1 -
 arch/sh/mm/init.c                         |  1 -
 arch/sparc/include/asm/pgtable_32.h       |  8 --------
 arch/sparc/include/asm/setup.h            |  2 --
 arch/sparc/kernel/head_32.S               |  7 -------
 arch/sparc/mm/init_32.c                   |  4 ----
 arch/sparc/mm/init_64.c                   | 11 ++++-------
 arch/um/include/asm/pgtable.h             |  9 ---------
 arch/um/include/shared/kern_util.h        |  1 -
 arch/um/kernel/mem.c                      | 16 ----------------
 arch/um/kernel/um_arch.c                  |  1 -
 arch/x86/include/asm/pgtable.h            |  8 --------
 arch/x86/kernel/head_32.S                 |  4 ----
 arch/x86/kernel/head_64.S                 |  7 -------
 arch/xtensa/include/asm/pgtable.h         |  4 ----
 arch/xtensa/kernel/head.S                 |  3 ---
 arch/xtensa/kernel/xtensa_ksyms.c         |  2 --
 include/linux/pgtable.h                   | 10 ++++++++++
 mm/mm_init.c                              |  5 +++++
 59 files changed, 23 insertions(+), 285 deletions(-)

diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 90e7a9539102..12a3c5f8ece8 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -125,12 +125,6 @@ struct vm_area_struct;
  */
 #define pgprot_noncached(prot)	(prot)
 
-/*
- * ZERO_PAGE is a global shared page that is always zero:  used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr)	(virt_to_page(ZERO_PGE))
-
 /*
  * On certain platforms whose physical address space can overlap KSEG,
  * namely EV6 and above, we must re-twiddle the physaddr to restore the
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index bd580e2b62d7..0fdaea81b5fa 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -21,9 +21,6 @@
 
 #ifndef __ASSEMBLER__
 
-extern char empty_zero_page[PAGE_SIZE];
-#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
-
 extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
 
 /* to cope with aliasing VIPT cache */
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index a5e92f46e5d1..d6b5c27a0098 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -19,8 +19,6 @@
 #include <asm/arcregs.h>
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
-char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
-EXPORT_SYMBOL(empty_zero_page);
 
 static const unsigned long low_mem_start = CONFIG_LINUX_RAM_BASE;
 static unsigned long low_mem_sz;
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 6fa9acd6a7f5..982795cf4563 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -10,15 +10,6 @@
 #include <linux/const.h>
 #include <asm/proc-fns.h>
 
-#ifndef __ASSEMBLY__
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
-#endif
-
 #include <asm-generic/pgtable-nopud.h>
 
 #ifndef CONFIG_MMU
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 518def8314e7..23b87b5ef7f1 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -41,13 +41,6 @@
 
 extern unsigned long __atags_pointer;
 
-/*
- * empty_zero_page is a special page that is used for
- * zero-initialized data and COW.
- */
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
-EXPORT_SYMBOL(empty_zero_page);
-
 /*
  * The pmd table for the upper-most set of pages.
  */
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 7e42d8accec6..040ea43cce32 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -27,13 +27,6 @@
 
 unsigned long vectors_base;
 
-/*
- * empty_zero_page is a special page that is used for
- * zero-initialized data and COW.
- */
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
-EXPORT_SYMBOL(empty_zero_page);
-
 #ifdef CONFIG_ARM_MPU
 struct mpu_rgn_info mpu_rgn_info;
 #endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index d94445b4f3df..63da07398a30 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -110,7 +110,6 @@ static inline void arch_leave_lazy_mmu_mode(void)
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
  */
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 #define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
 
 #define pte_ERROR(e)	\
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a6a00accf4f9..0f6a171faf1f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -64,13 +64,6 @@ static bool rodata_is_rw __ro_after_init = true;
  */
 long __section(".mmuoff.data.write") __early_cpu_boot_status;
 
-/*
- * Empty_zero_page is a special page that is used for zero-initialized data
- * and COW.
- */
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
-EXPORT_SYMBOL(empty_zero_page);
-
 static DEFINE_SPINLOCK(swapper_pgdir_lock);
 static DEFINE_MUTEX(fixmap_lock);
 
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index d606afbabce1..bafcd5823531 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -76,9 +76,6 @@
 #define MAX_SWAPFILES_CHECK() \
 		BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5)
 
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
-
 extern void load_pgd(unsigned long pg_dir);
 extern pte_t invalid_pte_table[PTRS_PER_PTE];
 
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index 573da66b2543..fa16015ea1c0 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -38,9 +38,6 @@ pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
 pte_t kernel_pte_tables[PTRS_KERN_TABLE] __page_aligned_bss;
 
 EXPORT_SYMBOL(invalid_pte_table);
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
-						__page_aligned_bss;
-EXPORT_SYMBOL(empty_zero_page);
 
 void free_initmem(void)
 {
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index fbf24d1d1ca6..27b269e2870d 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -14,9 +14,6 @@
 #include <asm/page.h>
 #include <asm-generic/pgtable-nopmd.h>
 
-/* A handy thing to have if one has the RAM. Declared in head.S */
-extern unsigned long empty_zero_page;
-
 /*
  * The PTE model described here is that of the Hexagon Virtual Machine,
  * which autonomously walks 2-level page tables.  At a lower level, we
@@ -348,9 +345,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 	return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK);
 }
 
-/* ZERO_PAGE - returns the globally shared zero page */
-#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
-
 /*
  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
  * are !pte_none() && !pte_present().
diff --git a/arch/hexagon/kernel/head.S b/arch/hexagon/kernel/head.S
index 0b016308cc79..908ffece9132 100644
--- a/arch/hexagon/kernel/head.S
+++ b/arch/hexagon/kernel/head.S
@@ -216,8 +216,3 @@ __head_s_vaddr_target:
 .p2align PAGE_SHIFT
 ENTRY(external_cmdline_buffer)
         .fill _PAGE_SIZE,1,0
-
-.data
-.p2align PAGE_SHIFT
-ENTRY(empty_zero_page)
-        .fill _PAGE_SIZE,1,0
diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c
index 36a80e31d187..81bc6f81e200 100644
--- a/arch/hexagon/kernel/hexagon_ksyms.c
+++ b/arch/hexagon/kernel/hexagon_ksyms.c
@@ -17,7 +17,6 @@ EXPORT_SYMBOL(raw_copy_to_user);
 EXPORT_SYMBOL(__vmgetie);
 EXPORT_SYMBOL(__vmsetie);
 EXPORT_SYMBOL(__vmyield);
-EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memset);
 
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index c33b3bcb733e..a244de27a03e 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -74,15 +74,6 @@
 struct mm_struct;
 struct vm_area_struct;
 
-/*
- * ZERO_PAGE is a global shared page that is always zero; used
- * for zero-mapped memory areas etc..
- */
-
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-
-#define ZERO_PAGE(vaddr)	virt_to_page(empty_zero_page)
-
 #ifdef CONFIG_32BIT
 
 #define VMALLOC_START	(vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index c331bf69d2ec..00f3822b6e47 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -36,9 +36,6 @@
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
 
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
-EXPORT_SYMBOL(empty_zero_page);
-
 void copy_user_highpage(struct page *to, struct page *from,
 	unsigned long vaddr, struct vm_area_struct *vma)
 {
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index bba64a9c49ac..7501ff030c63 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -110,15 +110,6 @@ extern unsigned long m68k_vmalloc_end;
 #define VMALLOC_END KMAP_START
 #endif
 
-/* zero page used for uninitialized stuff */
-extern void *empty_zero_page;
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
-
 extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode);
 
 /*
diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h
index 1a86c15b9008..11751807a3f3 100644
--- a/arch/m68k/include/asm/pgtable_no.h
+++ b/arch/m68k/include/asm/pgtable_no.h
@@ -30,13 +30,6 @@
 
 #define swapper_pg_dir ((pgd_t *) 0)
 
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern void *empty_zero_page;
-#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
-
 /*
  * All 32bit addresses are effectively valid for vmalloc...
  * Sort of meaningless for non-VM targets.
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 53b71f786c27..3b88c0dd1616 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -33,13 +33,6 @@
 #include <asm/sections.h>
 #include <asm/tlb.h>
 
-/*
- * ZERO_PAGE is a special page that is used for zero-initialized
- * data and COW.
- */
-void *empty_zero_page;
-EXPORT_SYMBOL(empty_zero_page);
-
 void __init arch_zone_limits_init(unsigned long *max_zone_pfns)
 {
 	max_zone_pfns[ZONE_DMA] = PFN_DOWN(memblock_end_of_DRAM());
@@ -71,8 +64,6 @@ void __init paging_init(void)
 	unsigned long end_mem = memory_end & PAGE_MASK;
 
 	high_memory = (void *) end_mem;
-
-	empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
 }
 
 #endif /* CONFIG_MMU */
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 3418fd864237..4924f2ff8ef8 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -41,8 +41,6 @@ void __init paging_init(void)
 	unsigned long next_pgtable;
 	int i;
 
-	empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
-
 	pg_dir = swapper_pg_dir;
 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
 
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 127a3fa69f4c..b30aa69a73a6 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -498,12 +498,6 @@ void __init paging_init(void)
 
 	early_memtest(min_addr, max_addr);
 
-	/*
-	 * initialize the bad page table and bad page to point
-	 * to a couple of allocated pages
-	 */
-	empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
-
 	/*
 	 * Set up SFC/DFC registers
 	 */
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index c801677f7df8..f139cc15753a 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -43,8 +43,6 @@ void __init paging_init(void)
 	unsigned long bootmem_end;
 	unsigned long size;
 
-	empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
-
 	address = PAGE_OFFSET;
 	pg_dir = swapper_pg_dir;
 	memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir));
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 4eb76de6be4a..ea72291de553 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -207,16 +207,6 @@ extern pte_t *va_to_pte(unsigned long address);
  * Also, write permissions imply read permissions.
  */
 
-#ifndef __ASSEMBLER__
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-
-#endif /* __ASSEMBLER__ */
-
 #define pte_none(pte)		((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
 #define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
 #define pte_clear(mm, addr, ptep) \
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index ec2fcb545e64..808019c3b7ac 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -39,10 +39,6 @@
 #include <asm/processor.h>
 
 .section .data
-.global empty_zero_page
-.align 12
-empty_zero_page:
-	.space	PAGE_SIZE
 .global swapper_pg_dir
 swapper_pg_dir:
 	.space	PAGE_SIZE
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index a8553f54152b..ad7596d7ba07 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -33,8 +33,6 @@ EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memmove);
 #endif
 
-EXPORT_SYMBOL(empty_zero_page);
-
 EXPORT_SYMBOL(mbc);
 
 extern void __divsi3(void);
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index 844dce55569f..d389aa9ca57c 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -65,13 +65,6 @@ struct mm_struct;
 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
-
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
 
diff --git a/arch/nios2/kernel/head.S b/arch/nios2/kernel/head.S
index 372ce4a33018..613212e1a63a 100644
--- a/arch/nios2/kernel/head.S
+++ b/arch/nios2/kernel/head.S
@@ -23,16 +23,6 @@
 #include <asm/asm-offsets.h>
 #include <asm/asm-macros.h>
 
-/*
- * ZERO_PAGE is a special page that is used for zero-initialized
- * data and COW.
- */
-.data
-.global empty_zero_page
-.align 12
-empty_zero_page:
-	.space	PAGE_SIZE
-
 /*
  * This global variable is used as an extension to the nios'
  * STATUS register to emulate a user/supervisor mode.
diff --git a/arch/nios2/kernel/nios2_ksyms.c b/arch/nios2/kernel/nios2_ksyms.c
index 54f7b23df1bf..c40aa39e8658 100644
--- a/arch/nios2/kernel/nios2_ksyms.c
+++ b/arch/nios2/kernel/nios2_ksyms.c
@@ -20,7 +20,6 @@ EXPORT_SYMBOL(memmove);
 
 /* memory management */
 
-EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(flush_icache_range);
 
 /*
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index b218050e2f6d..6b89996d0b62 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -179,10 +179,6 @@ extern void paging_init(void);
 	__pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
 		 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI)
 
-/* zero page used for uninitialized stuff */
-extern unsigned long empty_zero_page[2048];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-
 #define pte_none(x)	(!pte_val(x))
 #define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
 #define pte_clear(mm, addr, xp)	do { pte_val(*(xp)) = 0; } while (0)
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
index bd760066f1cd..45890393947d 100644
--- a/arch/openrisc/kernel/head.S
+++ b/arch/openrisc/kernel/head.S
@@ -1563,9 +1563,6 @@ _string_nl:
  */
 	.section .data,"aw"
 	.align	8192
-	.global  empty_zero_page
-empty_zero_page:
-	.space  8192
 
 	.global  swapper_pg_dir
 swapper_pg_dir:
diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c
index 212e5f85004c..84a937a64e2a 100644
--- a/arch/openrisc/kernel/or32_ksyms.c
+++ b/arch/openrisc/kernel/or32_ksyms.c
@@ -40,7 +40,6 @@ DECLARE_EXPORT(__ashldi3);
 DECLARE_EXPORT(__lshrdi3);
 DECLARE_EXPORT(__ucmpdi2);
 
-EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(__copy_tofrom_user);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(memset);
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index 78fb0734cdbc..89d8c6df8855 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -188,9 +188,6 @@ void __init mem_init(void)
 {
 	BUG_ON(!mem_map);
 
-	/* clear the zero-page */
-	memset((void *)empty_zero_page, 0, PAGE_SIZE);
-
 	printk("mem_init_done ...........................................\n");
 	mem_init_done = 1;
 	return;
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 2c139a4dbf4b..cbdc01a26ea0 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -262,17 +262,6 @@ extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
 
 extern pte_t pg0[];
 
-/* zero page used for uninitialized stuff */
-
-extern unsigned long *empty_zero_page;
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-
 #define pte_none(x)     (pte_val(x) == 0)
 #define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
 #define pte_user(x)	(pte_val(x) & _PAGE_USER)
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 6a39e031e5ff..be3380c9bcda 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -604,9 +604,6 @@ void __init mem_init(void)
 #endif
 }
 
-unsigned long *empty_zero_page __ro_after_init;
-EXPORT_SYMBOL(empty_zero_page);
-
 /*
  * pagetable_init() sets up the page tables
  *
@@ -639,9 +636,6 @@ static void __init pagetable_init(void)
 			  initrd_end - initrd_start, PAGE_KERNEL, 0);
 	}
 #endif
-
-	empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
-
 }
 
 static void __init gateway_init(void)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index dcd3a88caaf6..b27d94c06d0e 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -76,12 +76,6 @@ static inline const void *pmd_page_vaddr(pmd_t pmd)
 }
 #define pmd_page_vaddr pmd_page_vaddr
 #endif
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 
 extern pgd_t swapper_pg_dir[];
 
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 3789a51bdaae..85508392a6b6 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -34,9 +34,6 @@
 
 unsigned long long memory_limit __initdata;
 
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
-EXPORT_SYMBOL(empty_zero_page);
-
 pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
 				pgprot_t vma_prot)
 {
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 9ecbf0366719..a6b496f4944f 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -1258,13 +1258,6 @@ extern u64 satp_mode;
 void paging_init(void);
 void misc_mem_init(void);
 
-/*
- * ZERO_PAGE is a global shared page that is always zero,
- * used for zero-mapped memory areas, etc.
- */
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-
 /*
  * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
  * TLB flush will be required as a result of the "set". For example, use
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 848efeb9e163..95ac79c62067 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -69,10 +69,6 @@ unsigned long vmemmap_start_pfn __ro_after_init;
 EXPORT_SYMBOL(vmemmap_start_pfn);
 #endif
 
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
-							__page_aligned_bss;
-EXPORT_SYMBOL(empty_zero_page);
-
 extern char _start[];
 void *_dtb_early_va __initdata;
 uintptr_t _dtb_early_pa __initdata;
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 10fa8f2bb8d1..d5ce0950a323 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -20,14 +20,6 @@
 #ifndef __ASSEMBLER__
 #include <asm/addrspace.h>
 #include <asm/fixmap.h>
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-
 #endif /* !__ASSEMBLER__ */
 
 /*
diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h
index 84bb23a771f3..63c9efc06348 100644
--- a/arch/sh/include/asm/setup.h
+++ b/arch/sh/include/asm/setup.h
@@ -7,7 +7,8 @@
 /*
  * This is set up by the setup-routine at boot-time
  */
-#define PARAM	((unsigned char *)empty_zero_page)
+extern unsigned char *boot_params_page;
+#define PARAM boot_params_page
 
 #define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000))
 #define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004))
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index b603b7968b38..0b91bb85d40a 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -26,7 +26,7 @@
 #endif
 
 	.section	.empty_zero_page, "aw"
-ENTRY(empty_zero_page)
+ENTRY(boot_params_page)
 	.long	1		/* MOUNT_ROOT_RDONLY */
 	.long	0		/* RAMDISK_FLAGS */
 	.long	0x0200		/* ORIG_ROOT_DEV */
@@ -39,7 +39,7 @@ ENTRY(empty_zero_page)
 	.long	0x53453f00 + 29	/* "SE?" = 29 bit */
 #endif
 1:
-	.skip	PAGE_SIZE - empty_zero_page - 1b
+	.skip	PAGE_SIZE - boot_params_page - 1b
 
 	__HEAD
 
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 5858936cb431..041191002e2e 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -20,7 +20,6 @@ EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial_copy_generic);
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(empty_zero_page);
 #ifdef CONFIG_FLATMEM
 /* need in pfn_valid macro */
 EXPORT_SYMBOL(min_low_pfn);
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 464a3a63e2fa..4e40d5e96be9 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -332,7 +332,6 @@ void __init mem_init(void)
 	cpu_cache_init();
 
 	/* clear the zero-page */
-	memset(empty_zero_page, 0, PAGE_SIZE);
 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
 
 	vsyscall_init();
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index a9f802d1dd64..f89b1250661d 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -71,14 +71,6 @@ extern unsigned long ptr_in_current_pgd;
 extern unsigned long phys_base;
 extern unsigned long pfn_base;
 
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-
 /*
  * In general all page table modifications should use the V8 atomic
  * swap instruction.  This insures the mmu and the cpu are in sync
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index 72205684e51e..21bed5514028 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -17,8 +17,6 @@ extern char reboot_command[];
  */
 extern unsigned char boot_cpu_id;
 
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-
 extern int serial_console;
 static inline int con_is_present(void)
 {
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index 38345460d542..8c320fa25a67 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -57,13 +57,6 @@ sun4e_notsup:
 
 	.align PAGE_SIZE
 
-/* This was the only reasonable way I could think of to properly align
- * these page-table data structures.
- */
-	.globl empty_zero_page
-empty_zero_page:	.skip PAGE_SIZE
-EXPORT_SYMBOL(empty_zero_page)
-
 	.global root_flags
 	.global ram_flags
 	.global root_dev
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index fdc93dd12c3e..e0e66f91ceeb 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -246,10 +246,6 @@ void __init arch_mm_preinit(void)
 		prom_halt();
 	}
 
-
-	/* Saves us work later. */
-	memset((void *)empty_zero_page, 0, PAGE_SIZE);
-
 	i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
 	i += 1;
 	sparc_valid_addr_bitmap = (unsigned long *)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 4f7bdb18774b..0cc8de2fea90 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2498,6 +2498,9 @@ static void __init register_page_bootmem_info(void)
 }
 void __init mem_init(void)
 {
+	phys_addr_t zero_page_pa = kern_base +
+		((unsigned long)&empty_zero_page[0] - KERNBASE);
+
 	/*
 	 * Must be done after boot memory is put on freelist, because here we
 	 * might set fields in deferred struct pages that have not yet been
@@ -2510,13 +2513,7 @@ void __init mem_init(void)
 	 * Set up the zero page, mark it reserved, so that page count
 	 * is not manipulated when freeing the page from user ptes.
 	 */
-	mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
-	if (mem_map_zero == NULL) {
-		prom_printf("paging_init: Cannot alloc zero page.\n");
-		prom_halt();
-	}
-	mark_page_reserved(mem_map_zero);
-
+	mem_map_zero = pfn_to_page(PHYS_PFN(zero_page_pa));
 
 	if (tlb_type == cheetah || tlb_type == cheetah_plus)
 		cheetah_ecache_flush_init();
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 3b42b0f45bf6..19e0608fb649 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -34,9 +34,6 @@
 
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 
-/* zero page used for uninitialized stuff */
-extern unsigned long *empty_zero_page;
-
 /* Just any arbitrary offset to the start of the vmalloc VM area: the
  * current 8MB value just means that there will be a 8MB "hole" after the
  * physical memory until the kernel virtual memory starts.  That means that
@@ -74,12 +71,6 @@ extern unsigned long *empty_zero_page;
  * get..
  */
 
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
-
 #define pte_clear(mm, addr, xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEEDSYNC))
 
 #define pmd_none(x)	(!((unsigned long)pmd_val(x) & ~_PAGE_NEEDSYNC))
diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
index 38321188c04c..9812efd14ec0 100644
--- a/arch/um/include/shared/kern_util.h
+++ b/arch/um/include/shared/kern_util.h
@@ -38,7 +38,6 @@ extern void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs
 extern void uml_pm_wake(void);
 
 extern int start_uml(void);
-extern void paging_init(void);
 
 extern void uml_cleanup(void);
 extern void do_uml_exitcalls(void);
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 89c8c8b94a79..1eef0e42ef5d 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -44,10 +44,6 @@ __section(".kasan_init") __used
 = kasan_init;
 #endif
 
-/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
-unsigned long *empty_zero_page = NULL;
-EXPORT_SYMBOL(empty_zero_page);
-
 /*
  * Initialized during boot, and readonly for initializing page tables
  * afterwards
@@ -65,9 +61,6 @@ void __init arch_mm_preinit(void)
 	/* Safe to call after jump_label_init(). Enables KASAN. */
 	kasan_init_generic();
 
-	/* clear the zero-page */
-	memset(empty_zero_page, 0, PAGE_SIZE);
-
 	/* Map in the area just after the brk now that kmalloc is about
 	 * to be turned on.
 	 */
@@ -89,15 +82,6 @@ void __init arch_zone_limits_init(unsigned long *max_zone_pfns)
 	max_zone_pfns[ZONE_NORMAL] = high_physmem >> PAGE_SHIFT;
 }
 
-void __init paging_init(void)
-{
-	empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
-							       PAGE_SIZE);
-	if (!empty_zero_page)
-		panic("%s: Failed to allocate %lu bytes align=%lx\n",
-		      __func__, PAGE_SIZE, PAGE_SIZE);
-}
-
 /*
  * This can't do anything because nothing in the kernel image can be freed
  * since it's not in kernel physical memory.
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index e2b24e1ecfa6..2141f5f1f5a2 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -413,7 +413,6 @@ void __init setup_arch(char **cmdline_p)
 	uml_dtb_init();
 	read_initrd();
 
-	paging_init();
 	strscpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
 	*cmdline_p = command_line;
 	setup_hostinfo(host_info, sizeof host_info);
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 1662c5a8f445..54289f4587a4 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -47,14 +47,6 @@ void ptdump_walk_user_pgd_level_checkwx(void);
 #define debug_checkwx_user()	do { } while (0)
 #endif
 
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
-	__visible;
-#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
-
 extern spinlock_t pgd_lock;
 extern struct list_head pgd_list;
 
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 80ef5d386b03..5171cb746444 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -441,10 +441,6 @@ initial_pg_fixmap:
 swapper_pg_dir:
 	.fill 1024,4,0
 	.fill PTI_USER_PGD_FILL,4,0
-.globl empty_zero_page
-empty_zero_page:
-	.fill 4096,1,0
-EXPORT_SYMBOL(empty_zero_page)
 
 /*
  * This starts the data section.
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 21816b48537c..cbf7647a25d8 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -712,10 +712,3 @@ SYM_PIC_ALIAS(phys_base);
 EXPORT_SYMBOL(phys_base)
 
 #include "../xen/xen-head.S"
-
-	__PAGE_ALIGNED_BSS
-SYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
-	.skip PAGE_SIZE
-SYM_DATA_END(empty_zero_page)
-EXPORT_SYMBOL(empty_zero_page)
-
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 50a136213b2b..61f07d981a94 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -209,10 +209,6 @@
 #define pgd_ERROR(e) \
 	printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 
-extern unsigned long empty_zero_page[1024];
-
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-
 #ifdef CONFIG_MMU
 extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
 extern void paging_init(void);
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 8484294bc623..4b0c5c5e685a 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -381,6 +381,3 @@ ENTRY(swapper_pg_dir)
 	.fill	PAGE_SIZE, 1, 0
 END(swapper_pg_dir)
 #endif
-ENTRY(empty_zero_page)
-	.fill	PAGE_SIZE, 1, 0
-END(empty_zero_page)
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 62d81e76e18e..ced335b4df5f 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -15,8 +15,6 @@
 #include <linux/module.h>
 #include <asm/pgtable.h>
 
-EXPORT_SYMBOL(empty_zero_page);
-
 unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v)
 {
 	BUG();
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 9bacf4df9769..3d48eea57cd2 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1890,6 +1890,9 @@ static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
  * for different ranges in the virtual address space.
  *
  * zero_page_pfn identifies the first (or the only) pfn for these pages.
+ *
+ * For architectures that don't __HAVE_COLOR_ZERO_PAGE the zero page lives in
+ * empty_zero_page in BSS.
  */
 #ifdef __HAVE_COLOR_ZERO_PAGE
 static inline int is_zero_pfn(unsigned long pfn)
@@ -1916,6 +1919,13 @@ static inline unsigned long zero_pfn(unsigned long addr)
 
 	return zero_page_pfn;
 }
+
+extern uint8_t empty_zero_page[PAGE_SIZE];
+
+#ifndef ZERO_PAGE
+#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
+#endif
+
 #endif /* __HAVE_COLOR_ZERO_PAGE */
 
 #ifdef CONFIG_MMU
diff --git a/mm/mm_init.c b/mm/mm_init.c
index a0ca236eb4f5..1eac634ece1a 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -56,6 +56,11 @@ EXPORT_SYMBOL(high_memory);
 unsigned long zero_page_pfn __ro_after_init;
 EXPORT_SYMBOL(zero_page_pfn);
 
+#ifndef __HAVE_COLOR_ZERO_PAGE
+uint8_t empty_zero_page[PAGE_SIZE] __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
+#endif
+
 #ifdef CONFIG_DEBUG_MEMORY_INIT
 int __meminitdata mminit_loglevel;
 
-- 
2.51.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH v3 4/4] mm: cache struct page for empty_zero_page and return it from ZERO_PAGE()
  2026-02-11 10:31 [PATCH v3 0/4] arch, mm: consolidate empty_zero_page Mike Rapoport
                   ` (2 preceding siblings ...)
  2026-02-11 10:31 ` [PATCH v3 3/4] arch, mm: consolidate empty_zero_page Mike Rapoport
@ 2026-02-11 10:31 ` Mike Rapoport
  2026-02-12  9:08   ` David Hildenbrand (Arm)
  2026-02-12 18:40   ` Liam R. Howlett
  3 siblings, 2 replies; 17+ messages in thread
From: Mike Rapoport @ 2026-02-11 10:31 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Andreas Larsson, Borislav Petkov, Brian Cain, Catalin Marinas,
	Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, David Hildenbrand, Dinh Nguyen,
	Geert Uytterhoeven, Guo Ren, Helge Deller, Huacai Chen,
	Ingo Molnar, Johannes Berg, John Paul Adrian Glaubitz,
	Liam R. Howlett, Lorenzo Stoakes, Madhavan Srinivasan,
	Magnus Lindholm, Matt Turner, Max Filippov, Michael Ellerman,
	Michal Hocko, Michal Simek, Mike Rapoport, Palmer Dabbelt,
	Richard Weinberger, Russell King, Stafford Horne,
	Suren Baghdasaryan, Thomas Gleixner, Vineet Gupta,
	Vlastimil Babka, Will Deacon, linux-alpha, linux-kernel,
	linux-snps-arc, linux-arm-kernel, linux-csky, linux-hexagon,
	loongarch, linux-m68k, linux-openrisc, linux-parisc,
	linuxppc-dev, linux-riscv, linux-sh, sparclinux, linux-um,
	linux-mm, x86

From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>

For most architectures every invocation of ZERO_PAGE() does
virt_to_page(empty_zero_page). But empty_zero_page is in BSS and it is
enough to get its struct page once at initialization time and then use
it whenever a zero page should be accessed.

Add yet another __zero_page variable that will be initialized as
virt_to_page(empty_zero_page) for most architectures in a weak
arch_setup_zero_pages() function.

For architectures that use colored zero pages (MIPS and s390) rename their
setup_zero_pages() to arch_setup_zero_pages() and make it global rather
than static.

For architectures that cannot use virt_to_page() for BSS (arm64 and
sparc64) add override of arch_setup_zero_pages().

Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
---
 arch/arm64/include/asm/pgtable.h    |  6 ------
 arch/arm64/mm/init.c                |  5 +++++
 arch/mips/mm/init.c                 | 11 +----------
 arch/s390/mm/init.c                 |  4 +---
 arch/sparc/include/asm/pgtable_64.h |  3 ---
 arch/sparc/mm/init_64.c             | 17 +++++++----------
 include/linux/pgtable.h             | 11 ++++++++---
 mm/mm_init.c                        | 21 +++++++++++++++++----
 8 files changed, 39 insertions(+), 39 deletions(-)

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 63da07398a30..2c1ec7cc8612 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -106,12 +106,6 @@ static inline void arch_leave_lazy_mmu_mode(void)
 #define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp)	\
 	local_flush_tlb_page_nonotify(vma, address)
 
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
-
 #define pte_ERROR(e)	\
 	pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
 
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 96711b8578fd..417ec7efe569 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -328,6 +328,11 @@ void __init bootmem_init(void)
 	memblock_dump_all();
 }
 
+void __init arch_setup_zero_pages(void)
+{
+	__zero_page = phys_to_page(__pa_symbol(empty_zero_page));
+}
+
 void __init arch_mm_preinit(void)
 {
 	unsigned int flags = SWIOTLB_VERBOSE;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 4f6449ad02ca..55b25e85122a 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -56,10 +56,7 @@ unsigned long empty_zero_page, zero_page_mask;
 EXPORT_SYMBOL_GPL(empty_zero_page);
 EXPORT_SYMBOL(zero_page_mask);
 
-/*
- * Not static inline because used by IP27 special magic initialization code
- */
-static void __init setup_zero_pages(void)
+void __init arch_setup_zero_pages(void)
 {
 	unsigned int order;
 
@@ -450,7 +447,6 @@ void __init arch_mm_preinit(void)
 	BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
 
 	maar_init();
-	setup_zero_pages();	/* Setup zeroed pages.  */
 	highmem_init();
 
 #ifdef CONFIG_64BIT
@@ -461,11 +457,6 @@ void __init arch_mm_preinit(void)
 				0x80000000 - 4, KCORE_TEXT);
 #endif
 }
-#else  /* CONFIG_NUMA */
-void __init arch_mm_preinit(void)
-{
-	setup_zero_pages();	/* This comes from node 0 */
-}
 #endif /* !CONFIG_NUMA */
 
 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 3c20475cbee2..1f72efc2a579 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -69,7 +69,7 @@ unsigned long empty_zero_page, zero_page_mask;
 EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(zero_page_mask);
 
-static void __init setup_zero_pages(void)
+void __init arch_setup_zero_pages(void)
 {
 	unsigned long total_pages = memblock_estimated_nr_free_pages();
 	unsigned int order;
@@ -159,8 +159,6 @@ void __init arch_mm_preinit(void)
 	cpumask_set_cpu(0, mm_cpumask(&init_mm));
 
 	pv_init();
-
-	setup_zero_pages();	/* Setup zeroed pages. */
 }
 
 unsigned long memory_block_size_bytes(void)
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 615f460c50af..74ede706fb32 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -210,9 +210,6 @@ extern unsigned long _PAGE_CACHE;
 extern unsigned long pg_iobits;
 extern unsigned long _PAGE_ALL_SZ_BITS;
 
-extern struct page *mem_map_zero;
-#define ZERO_PAGE(vaddr)	(mem_map_zero)
-
 /* PFNs are real physical page numbers.  However, mem_map only begins to record
  * per-page information starting at pfn_base.  This is to handle systems where
  * the first physical page in the machine is at some huge physical address,
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 0cc8de2fea90..707c1df67d79 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -177,9 +177,6 @@ extern unsigned long sparc_ramdisk_image64;
 extern unsigned int sparc_ramdisk_image;
 extern unsigned int sparc_ramdisk_size;
 
-struct page *mem_map_zero __read_mostly;
-EXPORT_SYMBOL(mem_map_zero);
-
 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
 
 unsigned long sparc64_kern_pri_context __read_mostly;
@@ -2496,11 +2493,17 @@ static void __init register_page_bootmem_info(void)
 			register_page_bootmem_info_node(NODE_DATA(i));
 #endif
 }
-void __init mem_init(void)
+
+void __init arch_setup_zero_pages(void)
 {
 	phys_addr_t zero_page_pa = kern_base +
 		((unsigned long)&empty_zero_page[0] - KERNBASE);
 
+	__zero_page = phys_to_page(zero_page_pa);
+}
+
+void __init mem_init(void)
+{
 	/*
 	 * Must be done after boot memory is put on freelist, because here we
 	 * might set fields in deferred struct pages that have not yet been
@@ -2509,12 +2512,6 @@ void __init mem_init(void)
 	 */
 	register_page_bootmem_info();
 
-	/*
-	 * Set up the zero page, mark it reserved, so that page count
-	 * is not manipulated when freeing the page from user ptes.
-	 */
-	mem_map_zero = pfn_to_page(PHYS_PFN(zero_page_pa));
-
 	if (tlb_type == cheetah || tlb_type == cheetah_plus)
 		cheetah_ecache_flush_init();
 }
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 3d48eea57cd2..1da21ec62836 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1894,6 +1894,8 @@ static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
  * For architectures that don't __HAVE_COLOR_ZERO_PAGE the zero page lives in
  * empty_zero_page in BSS.
  */
+void arch_setup_zero_pages(void);
+
 #ifdef __HAVE_COLOR_ZERO_PAGE
 static inline int is_zero_pfn(unsigned long pfn)
 {
@@ -1921,10 +1923,13 @@ static inline unsigned long zero_pfn(unsigned long addr)
 }
 
 extern uint8_t empty_zero_page[PAGE_SIZE];
+extern struct page *__zero_page;
 
-#ifndef ZERO_PAGE
-#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
-#endif
+static inline struct page *_zero_page(unsigned long addr)
+{
+	return __zero_page;
+}
+#define ZERO_PAGE(vaddr) _zero_page(vaddr)
 
 #endif /* __HAVE_COLOR_ZERO_PAGE */
 
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 1eac634ece1a..b08608c1b71d 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -59,7 +59,10 @@ EXPORT_SYMBOL(zero_page_pfn);
 #ifndef __HAVE_COLOR_ZERO_PAGE
 uint8_t empty_zero_page[PAGE_SIZE] __page_aligned_bss;
 EXPORT_SYMBOL(empty_zero_page);
-#endif
+
+struct page *__zero_page __ro_after_init;
+EXPORT_SYMBOL(__zero_page);
+#endif /* __HAVE_COLOR_ZERO_PAGE */
 
 #ifdef CONFIG_DEBUG_MEMORY_INIT
 int __meminitdata mminit_loglevel;
@@ -2675,12 +2678,21 @@ static void __init mem_init_print_info(void)
 		);
 }
 
-static int __init init_zero_page_pfn(void)
+#ifndef __HAVE_COLOR_ZERO_PAGE
+/*
+ * architectures that __HAVE_COLOR_ZERO_PAGE must define this function
+ */
+void __init __weak arch_setup_zero_pages(void)
+{
+	__zero_page = virt_to_page(empty_zero_page);
+}
+#endif
+
+static void __init init_zero_page_pfn(void)
 {
+	arch_setup_zero_pages();
 	zero_page_pfn = page_to_pfn(ZERO_PAGE(0));
-	return 0;
 }
-early_initcall(init_zero_page_pfn);
 
 void __init __weak arch_mm_preinit(void)
 {
@@ -2704,6 +2716,7 @@ void __init mm_core_init_early(void)
 void __init mm_core_init(void)
 {
 	arch_mm_preinit();
+	init_zero_page_pfn();
 
 	/* Initializations relying on SMP setup */
 	BUILD_BUG_ON(MAX_ZONELISTS > 2);
-- 
2.51.0



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v3 3/4] arch, mm: consolidate empty_zero_page
  2026-02-11 10:31 ` [PATCH v3 3/4] arch, mm: consolidate empty_zero_page Mike Rapoport
@ 2026-02-11 20:14   ` Magnus Lindholm
  2026-02-12  5:33   ` Dinh Nguyen
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 17+ messages in thread
From: Magnus Lindholm @ 2026-02-11 20:14 UTC (permalink / raw)
  To: Mike Rapoport
  Cc: Andrew Morton, Andreas Larsson, Borislav Petkov, Brian Cain,
	Catalin Marinas, Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, David Hildenbrand, Dinh Nguyen,
	Geert Uytterhoeven, Guo Ren, Helge Deller, Huacai Chen,
	Ingo Molnar, Johannes Berg, John Paul Adrian Glaubitz,
	Liam R. Howlett, Lorenzo Stoakes, Madhavan Srinivasan,
	Matt Turner, Max Filippov, Michael Ellerman, Michal Hocko,
	Michal Simek, Palmer Dabbelt, Richard Weinberger, Russell King,
	Stafford Horne, Suren Baghdasaryan, Thomas Gleixner,
	Vineet Gupta, Vlastimil Babka, Will Deacon, linux-alpha,
	linux-kernel, linux-snps-arc, linux-arm-kernel, linux-csky,
	linux-hexagon, loongarch, linux-m68k, linux-openrisc,
	linux-parisc, linuxppc-dev, linux-riscv, linux-sh, sparclinux,
	linux-um, linux-mm, x86

On Wed, Feb 11, 2026 at 11:32 AM Mike Rapoport <rppt@kernel.org> wrote:
>
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
>
> Reduce 22 declarations of empty_zero_page to 3 and 23 declarations of
> ZERO_PAGE() to 4.
>
> Every architecture defines empty_zero_page that way or another, but for the
> most of them it is always a page aligned page in BSS and most definitions
> of ZERO_PAGE do virt_to_page(empty_zero_page).
>
> Move Linus vetted x86 definition of empty_zero_page and ZERO_PAGE() to the
> core MM and drop these definitions in architectures that do not implement
> colored zero page (MIPS and s390).
>
> ZERO_PAGE() remains a macro because turning it to a wrapper for a static
> inline causes severe pain in header dependencies.
>
> For the most part the change is mechanical, with these being noteworthy:
>
> * alpha: aliased empty_zero_page with ZERO_PGE that was also used for boot
>   parameters. Switching to a generic empty_zero_page removes the aliasing
>   and keeps ZERO_PGE for boot parameters only
> * arm64: uses __pa_symbol() in ZERO_PAGE() so that definition of
>   ZERO_PAGE() is kept intact.
> * m68k/parisc/um: allocated empty_zero_page from memblock,
>   although they do not support zero page coloring and having it in BSS
>   will work fine.
> * sparc64 can have empty_zero_page in BSS rather allocate it, but it
>   can't use virt_to_page() for BSS. Keep it's definition of ZERO_PAGE()
>   but instead of allocating it, make mem_map_zero point to
>   empty_zero_page.
> * sh: used empty_zero_page for boot parameters at the very early boot.
>   Rename the parameters page to boot_params_page and let sh use the generic
>   empty_zero_page.
> * hexagon: had an amusing comment about empty_zero_page
>
>         /* A handy thing to have if one has the RAM. Declared in head.S */
>
>   that unfortunately had to go :)
>
> Acked-by: Helge Deller <deller@gmx.de>   # parisc
> Tested-by: Helge Deller <deller@gmx.de>  # parisc
> Reviewed-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org>
> Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
> ---
>  arch/alpha/include/asm/pgtable.h          |  6 ------

> diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
> index 90e7a9539102..12a3c5f8ece8 100644
> --- a/arch/alpha/include/asm/pgtable.h
> +++ b/arch/alpha/include/asm/pgtable.h
> @@ -125,12 +125,6 @@ struct vm_area_struct;
>   */
>  #define pgprot_noncached(prot) (prot)
>
> -/*
> - * ZERO_PAGE is a global shared page that is always zero:  used
> - * for zero-mapped memory areas etc..
> - */
> -#define ZERO_PAGE(vaddr)       (virt_to_page(ZERO_PGE))
> -
>  /*
>   * On certain platforms whose physical address space can overlap KSEG,
>   * namely EV6 and above, we must re-twiddle the physaddr to restore the

This looks good from an Alpha perspective

Acked-by: Magnus Lindholm <linmag7@gmail.com>


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v3 3/4] arch, mm: consolidate empty_zero_page
  2026-02-11 10:31 ` [PATCH v3 3/4] arch, mm: consolidate empty_zero_page Mike Rapoport
  2026-02-11 20:14   ` Magnus Lindholm
@ 2026-02-12  5:33   ` Dinh Nguyen
  2026-02-12  8:38   ` Andreas Larsson
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 17+ messages in thread
From: Dinh Nguyen @ 2026-02-12  5:33 UTC (permalink / raw)
  To: Mike Rapoport, Andrew Morton
  Cc: Andreas Larsson, Borislav Petkov, Brian Cain, Catalin Marinas,
	Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, David Hildenbrand,
	Geert Uytterhoeven, Guo Ren, Helge Deller, Huacai Chen,
	Ingo Molnar, Johannes Berg, John Paul Adrian Glaubitz,
	Liam R. Howlett, Lorenzo Stoakes, Madhavan Srinivasan,
	Magnus Lindholm, Matt Turner, Max Filippov, Michael Ellerman,
	Michal Hocko, Michal Simek, Palmer Dabbelt, Richard Weinberger,
	Russell King, Stafford Horne, Suren Baghdasaryan,
	Thomas Gleixner, Vineet Gupta, Vlastimil Babka, Will Deacon,
	linux-alpha, linux-kernel, linux-snps-arc, linux-arm-kernel,
	linux-csky, linux-hexagon, loongarch, linux-m68k, linux-openrisc,
	linux-parisc, linuxppc-dev, linux-riscv, linux-sh, sparclinux,
	linux-um, linux-mm, x86



On 2/11/26 04:31, Mike Rapoport wrote:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
> 
> Reduce 22 declarations of empty_zero_page to 3 and 23 declarations of
> ZERO_PAGE() to 4.
> 
> Every architecture defines empty_zero_page that way or another, but for the
> most of them it is always a page aligned page in BSS and most definitions
> of ZERO_PAGE do virt_to_page(empty_zero_page).
> 
> Move Linus vetted x86 definition of empty_zero_page and ZERO_PAGE() to the
> core MM and drop these definitions in architectures that do not implement
> colored zero page (MIPS and s390).
> 
> ZERO_PAGE() remains a macro because turning it to a wrapper for a static
> inline causes severe pain in header dependencies.
> 
> For the most part the change is mechanical, with these being noteworthy:
> 
> * alpha: aliased empty_zero_page with ZERO_PGE that was also used for boot
>    parameters. Switching to a generic empty_zero_page removes the aliasing
>    and keeps ZERO_PGE for boot parameters only
> * arm64: uses __pa_symbol() in ZERO_PAGE() so that definition of
>    ZERO_PAGE() is kept intact.
> * m68k/parisc/um: allocated empty_zero_page from memblock,
>    although they do not support zero page coloring and having it in BSS
>    will work fine.
> * sparc64 can have empty_zero_page in BSS rather allocate it, but it
>    can't use virt_to_page() for BSS. Keep it's definition of ZERO_PAGE()
>    but instead of allocating it, make mem_map_zero point to
>    empty_zero_page.
> * sh: used empty_zero_page for boot parameters at the very early boot.
>    Rename the parameters page to boot_params_page and let sh use the generic
>    empty_zero_page.
> * hexagon: had an amusing comment about empty_zero_page
> 
> 	/* A handy thing to have if one has the RAM. Declared in head.S */
> 
>    that unfortunately had to go :)
> 
> Acked-by: Helge Deller <deller@gmx.de>   # parisc
> Tested-by: Helge Deller <deller@gmx.de>  # parisc
> Reviewed-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org>
> Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
> ---

>   arch/nios2/include/asm/pgtable.h          |  7 -------
>   arch/nios2/kernel/head.S                  | 10 ----------
>   arch/nios2/kernel/nios2_ksyms.c           |  1 -

For nios2,

Acked-by: Dinh Nguyen <dinguyen@kernel.org>


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v3 3/4] arch, mm: consolidate empty_zero_page
  2026-02-11 10:31 ` [PATCH v3 3/4] arch, mm: consolidate empty_zero_page Mike Rapoport
  2026-02-11 20:14   ` Magnus Lindholm
  2026-02-12  5:33   ` Dinh Nguyen
@ 2026-02-12  8:38   ` Andreas Larsson
  2026-02-12  9:04   ` David Hildenbrand (Arm)
  2026-02-12 18:38   ` Liam R. Howlett
  4 siblings, 0 replies; 17+ messages in thread
From: Andreas Larsson @ 2026-02-12  8:38 UTC (permalink / raw)
  To: Mike Rapoport, Andrew Morton
  Cc: Borislav Petkov, Brian Cain, Catalin Marinas,
	Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, David Hildenbrand, Dinh Nguyen,
	Geert Uytterhoeven, Guo Ren, Helge Deller, Huacai Chen,
	Ingo Molnar, Johannes Berg, John Paul Adrian Glaubitz,
	Liam R. Howlett, Lorenzo Stoakes, Madhavan Srinivasan,
	Magnus Lindholm, Matt Turner, Max Filippov, Michael Ellerman,
	Michal Hocko, Michal Simek, Palmer Dabbelt, Richard Weinberger,
	Russell King, Stafford Horne, Suren Baghdasaryan,
	Thomas Gleixner, Vineet Gupta, Vlastimil Babka, Will Deacon,
	linux-alpha, linux-kernel, linux-snps-arc, linux-arm-kernel,
	linux-csky, linux-hexagon, loongarch, linux-m68k, linux-openrisc,
	linux-parisc, linuxppc-dev, linux-riscv, linux-sh, sparclinux,
	linux-um, linux-mm, x86

On 2026-02-11 11:31, Mike Rapoport wrote:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
> 
> Reduce 22 declarations of empty_zero_page to 3 and 23 declarations of
> ZERO_PAGE() to 4.
> 
> Every architecture defines empty_zero_page that way or another, but for the
> most of them it is always a page aligned page in BSS and most definitions
> of ZERO_PAGE do virt_to_page(empty_zero_page).
> 
> Move Linus vetted x86 definition of empty_zero_page and ZERO_PAGE() to the
> core MM and drop these definitions in architectures that do not implement
> colored zero page (MIPS and s390).
> 
> ZERO_PAGE() remains a macro because turning it to a wrapper for a static
> inline causes severe pain in header dependencies.
> 
> For the most part the change is mechanical, with these being noteworthy:
> 
> * alpha: aliased empty_zero_page with ZERO_PGE that was also used for boot
>   parameters. Switching to a generic empty_zero_page removes the aliasing
>   and keeps ZERO_PGE for boot parameters only
> * arm64: uses __pa_symbol() in ZERO_PAGE() so that definition of
>   ZERO_PAGE() is kept intact.
> * m68k/parisc/um: allocated empty_zero_page from memblock,
>   although they do not support zero page coloring and having it in BSS
>   will work fine.
> * sparc64 can have empty_zero_page in BSS rather allocate it, but it
>   can't use virt_to_page() for BSS. Keep it's definition of ZERO_PAGE()
>   but instead of allocating it, make mem_map_zero point to
>   empty_zero_page.
> * sh: used empty_zero_page for boot parameters at the very early boot.
>   Rename the parameters page to boot_params_page and let sh use the generic
>   empty_zero_page.
> * hexagon: had an amusing comment about empty_zero_page
> 
> 	/* A handy thing to have if one has the RAM. Declared in head.S */
> 
>   that unfortunately had to go :)
> 
> Acked-by: Helge Deller <deller@gmx.de>   # parisc
> Tested-by: Helge Deller <deller@gmx.de>  # parisc
> Reviewed-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org>
> Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
> ---

>  arch/sparc/include/asm/pgtable_32.h       |  8 --------
>  arch/sparc/include/asm/setup.h            |  2 --
>  arch/sparc/kernel/head_32.S               |  7 -------
>  arch/sparc/mm/init_32.c                   |  4 ----
>  arch/sparc/mm/init_64.c                   | 11 ++++-------
Acked-by: Andreas Larsson <andreas@gaisler.com> #sparc

Cheers,
Andreas



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v3 1/4] mm: don't special case !MMU for is_zero_pfn() and my_zero_pfn()
  2026-02-11 10:31 ` [PATCH v3 1/4] mm: don't special case !MMU for is_zero_pfn() and my_zero_pfn() Mike Rapoport
@ 2026-02-12  8:58   ` David Hildenbrand (Arm)
  2026-02-12 18:30   ` Liam R. Howlett
  1 sibling, 0 replies; 17+ messages in thread
From: David Hildenbrand (Arm) @ 2026-02-12  8:58 UTC (permalink / raw)
  To: Mike Rapoport, Andrew Morton
  Cc: Andreas Larsson, Borislav Petkov, Brian Cain, Catalin Marinas,
	Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, Dinh Nguyen, Geert Uytterhoeven,
	Guo Ren, Helge Deller, Huacai Chen, Ingo Molnar, Johannes Berg,
	John Paul Adrian Glaubitz, Liam R. Howlett, Lorenzo Stoakes,
	Madhavan Srinivasan, Magnus Lindholm, Matt Turner, Max Filippov,
	Michael Ellerman, Michal Hocko, Michal Simek, Palmer Dabbelt,
	Richard Weinberger, Russell King, Stafford Horne,
	Suren Baghdasaryan, Thomas Gleixner, Vineet Gupta,
	Vlastimil Babka, Will Deacon, linux-alpha, linux-kernel,
	linux-snps-arc, linux-arm-kernel, linux-csky, linux-hexagon,
	loongarch, linux-m68k, linux-openrisc, linux-parisc,
	linuxppc-dev, linux-riscv, linux-sh, sparclinux, linux-um,
	linux-mm, x86

On 2/11/26 11:31, Mike Rapoport wrote:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
> 
> nommu architectures have empty_zero_page and define ZERO_PAGE() and
> although they don't really use it to populate page tables, there is no
> reason to hardwire !MMU implementation of is_zero_pfn() and my_zero_pfn()
> to 0.
> 
> Drop #ifdef CONFIG_MMU around implementations of is_zero_pfn() and
> my_zero_pfn() and remove !MMU version.
> 
> While on it, make zero_pfn __ro_after_init.
> 
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
> ---

Acked-by: David Hildenbrand (Arm) <david@kernel.org>

-- 
Cheers,

David


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v3 2/4] mm: rename my_zero_pfn() to zero_pfn()
  2026-02-11 10:31 ` [PATCH v3 2/4] mm: rename my_zero_pfn() to zero_pfn() Mike Rapoport
@ 2026-02-12  9:01   ` David Hildenbrand (Arm)
  2026-02-12 15:28   ` Vlastimil Babka
  2026-02-12 18:33   ` Liam R. Howlett
  2 siblings, 0 replies; 17+ messages in thread
From: David Hildenbrand (Arm) @ 2026-02-12  9:01 UTC (permalink / raw)
  To: Mike Rapoport, Andrew Morton
  Cc: Andreas Larsson, Borislav Petkov, Brian Cain, Catalin Marinas,
	Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, Dinh Nguyen, Geert Uytterhoeven,
	Guo Ren, Helge Deller, Huacai Chen, Ingo Molnar, Johannes Berg,
	John Paul Adrian Glaubitz, Liam R. Howlett, Lorenzo Stoakes,
	Madhavan Srinivasan, Magnus Lindholm, Matt Turner, Max Filippov,
	Michael Ellerman, Michal Hocko, Michal Simek, Palmer Dabbelt,
	Richard Weinberger, Russell King, Stafford Horne,
	Suren Baghdasaryan, Thomas Gleixner, Vineet Gupta,
	Vlastimil Babka, Will Deacon, linux-alpha, linux-kernel,
	linux-snps-arc, linux-arm-kernel, linux-csky, linux-hexagon,
	loongarch, linux-m68k, linux-openrisc, linux-parisc,
	linuxppc-dev, linux-riscv, linux-sh, sparclinux, linux-um,
	linux-mm, x86

On 2/11/26 11:31, Mike Rapoport wrote:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
> 
> my_zero_pfn() is a silly name.
> 
> Rename zero_pfn variable to zero_page_pfn and my_zero_pfn() function to
> zero_pfn().
> 
> While on it, move extern declarations of zero_page_pfn outside the
> functions that use it and add a comment about what ZERO_PAGE is.
> 
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
> ---

Acked-by: David Hildenbrand (Arm) <david@kernel.org>

-- 
Cheers,

David


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v3 3/4] arch, mm: consolidate empty_zero_page
  2026-02-11 10:31 ` [PATCH v3 3/4] arch, mm: consolidate empty_zero_page Mike Rapoport
                     ` (2 preceding siblings ...)
  2026-02-12  8:38   ` Andreas Larsson
@ 2026-02-12  9:04   ` David Hildenbrand (Arm)
  2026-02-12 18:38   ` Liam R. Howlett
  4 siblings, 0 replies; 17+ messages in thread
From: David Hildenbrand (Arm) @ 2026-02-12  9:04 UTC (permalink / raw)
  To: Mike Rapoport, Andrew Morton
  Cc: Andreas Larsson, Borislav Petkov, Brian Cain, Catalin Marinas,
	Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, Dinh Nguyen, Geert Uytterhoeven,
	Guo Ren, Helge Deller, Huacai Chen, Ingo Molnar, Johannes Berg,
	John Paul Adrian Glaubitz, Liam R. Howlett, Lorenzo Stoakes,
	Madhavan Srinivasan, Magnus Lindholm, Matt Turner, Max Filippov,
	Michael Ellerman, Michal Hocko, Michal Simek, Palmer Dabbelt,
	Richard Weinberger, Russell King, Stafford Horne,
	Suren Baghdasaryan, Thomas Gleixner, Vineet Gupta,
	Vlastimil Babka, Will Deacon, linux-alpha, linux-kernel,
	linux-snps-arc, linux-arm-kernel, linux-csky, linux-hexagon,
	loongarch, linux-m68k, linux-openrisc, linux-parisc,
	linuxppc-dev, linux-riscv, linux-sh, sparclinux, linux-um,
	linux-mm, x86

On 2/11/26 11:31, Mike Rapoport wrote:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
> 
> Reduce 22 declarations of empty_zero_page to 3 and 23 declarations of
> ZERO_PAGE() to 4.
> 
> Every architecture defines empty_zero_page that way or another, but for the
> most of them it is always a page aligned page in BSS and most definitions
> of ZERO_PAGE do virt_to_page(empty_zero_page).
> 
> Move Linus vetted x86 definition of empty_zero_page and ZERO_PAGE() to the
> core MM and drop these definitions in architectures that do not implement
> colored zero page (MIPS and s390).
> 
> ZERO_PAGE() remains a macro because turning it to a wrapper for a static
> inline causes severe pain in header dependencies.
> 
> For the most part the change is mechanical, with these being noteworthy:
> 
> * alpha: aliased empty_zero_page with ZERO_PGE that was also used for boot
>    parameters. Switching to a generic empty_zero_page removes the aliasing
>    and keeps ZERO_PGE for boot parameters only
> * arm64: uses __pa_symbol() in ZERO_PAGE() so that definition of
>    ZERO_PAGE() is kept intact.
> * m68k/parisc/um: allocated empty_zero_page from memblock,
>    although they do not support zero page coloring and having it in BSS
>    will work fine.
> * sparc64 can have empty_zero_page in BSS rather allocate it, but it
>    can't use virt_to_page() for BSS. Keep it's definition of ZERO_PAGE()
>    but instead of allocating it, make mem_map_zero point to
>    empty_zero_page.
> * sh: used empty_zero_page for boot parameters at the very early boot.
>    Rename the parameters page to boot_params_page and let sh use the generic
>    empty_zero_page.
> * hexagon: had an amusing comment about empty_zero_page
> 
> 	/* A handy thing to have if one has the RAM. Declared in head.S */
> 
>    that unfortunately had to go :)
> 
> Acked-by: Helge Deller <deller@gmx.de>   # parisc
> Tested-by: Helge Deller <deller@gmx.de>  # parisc
> Reviewed-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org>
> Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
> ---

Acked-by: David Hildenbrand (Arm) <david@kernel.org>

-- 
Cheers,

David


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v3 4/4] mm: cache struct page for empty_zero_page and return it from ZERO_PAGE()
  2026-02-11 10:31 ` [PATCH v3 4/4] mm: cache struct page for empty_zero_page and return it from ZERO_PAGE() Mike Rapoport
@ 2026-02-12  9:08   ` David Hildenbrand (Arm)
  2026-02-12 18:40   ` Liam R. Howlett
  1 sibling, 0 replies; 17+ messages in thread
From: David Hildenbrand (Arm) @ 2026-02-12  9:08 UTC (permalink / raw)
  To: Mike Rapoport, Andrew Morton
  Cc: Andreas Larsson, Borislav Petkov, Brian Cain, Catalin Marinas,
	Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, Dinh Nguyen, Geert Uytterhoeven,
	Guo Ren, Helge Deller, Huacai Chen, Ingo Molnar, Johannes Berg,
	John Paul Adrian Glaubitz, Liam R. Howlett, Lorenzo Stoakes,
	Madhavan Srinivasan, Magnus Lindholm, Matt Turner, Max Filippov,
	Michael Ellerman, Michal Hocko, Michal Simek, Palmer Dabbelt,
	Richard Weinberger, Russell King, Stafford Horne,
	Suren Baghdasaryan, Thomas Gleixner, Vineet Gupta,
	Vlastimil Babka, Will Deacon, linux-alpha, linux-kernel,
	linux-snps-arc, linux-arm-kernel, linux-csky, linux-hexagon,
	loongarch, linux-m68k, linux-openrisc, linux-parisc,
	linuxppc-dev, linux-riscv, linux-sh, sparclinux, linux-um,
	linux-mm, x86

On 2/11/26 11:31, Mike Rapoport wrote:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
> 
> For most architectures every invocation of ZERO_PAGE() does
> virt_to_page(empty_zero_page). But empty_zero_page is in BSS and it is
> enough to get its struct page once at initialization time and then use
> it whenever a zero page should be accessed.
> 
> Add yet another __zero_page variable that will be initialized as
> virt_to_page(empty_zero_page) for most architectures in a weak
> arch_setup_zero_pages() function.
> 
> For architectures that use colored zero pages (MIPS and s390) rename their
> setup_zero_pages() to arch_setup_zero_pages() and make it global rather
> than static.
> 
> For architectures that cannot use virt_to_page() for BSS (arm64 and
> sparc64) add override of arch_setup_zero_pages().
> 
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
> ---

Acked-by: David Hildenbrand (Arm) <david@kernel.org>

-- 
Cheers,

David


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v3 2/4] mm: rename my_zero_pfn() to zero_pfn()
  2026-02-11 10:31 ` [PATCH v3 2/4] mm: rename my_zero_pfn() to zero_pfn() Mike Rapoport
  2026-02-12  9:01   ` David Hildenbrand (Arm)
@ 2026-02-12 15:28   ` Vlastimil Babka
  2026-02-12 18:33   ` Liam R. Howlett
  2 siblings, 0 replies; 17+ messages in thread
From: Vlastimil Babka @ 2026-02-12 15:28 UTC (permalink / raw)
  To: Mike Rapoport, Andrew Morton
  Cc: Andreas Larsson, Borislav Petkov, Brian Cain, Catalin Marinas,
	Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, David Hildenbrand, Dinh Nguyen,
	Geert Uytterhoeven, Guo Ren, Helge Deller, Huacai Chen,
	Ingo Molnar, Johannes Berg, John Paul Adrian Glaubitz,
	Liam R. Howlett, Lorenzo Stoakes, Madhavan Srinivasan,
	Magnus Lindholm, Matt Turner, Max Filippov, Michael Ellerman,
	Michal Hocko, Michal Simek, Palmer Dabbelt, Richard Weinberger,
	Russell King, Stafford Horne, Suren Baghdasaryan,
	Thomas Gleixner, Vineet Gupta, Will Deacon, linux-alpha,
	linux-kernel, linux-snps-arc, linux-arm-kernel, linux-csky,
	linux-hexagon, loongarch, linux-m68k, linux-openrisc,
	linux-parisc, linuxppc-dev, linux-riscv, linux-sh, sparclinux,
	linux-um, linux-mm, x86

On 2/11/26 11:31, Mike Rapoport wrote:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
> 
> my_zero_pfn() is a silly name.
> 
> Rename zero_pfn variable to zero_page_pfn and my_zero_pfn() function to
> zero_pfn().
> 
> While on it, move extern declarations of zero_page_pfn outside the
> functions that use it and add a comment about what ZERO_PAGE is.
> 
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>

Acked-by: Vlastimil Babka <vbabka@suse.cz>



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v3 1/4] mm: don't special case !MMU for is_zero_pfn() and my_zero_pfn()
  2026-02-11 10:31 ` [PATCH v3 1/4] mm: don't special case !MMU for is_zero_pfn() and my_zero_pfn() Mike Rapoport
  2026-02-12  8:58   ` David Hildenbrand (Arm)
@ 2026-02-12 18:30   ` Liam R. Howlett
  1 sibling, 0 replies; 17+ messages in thread
From: Liam R. Howlett @ 2026-02-12 18:30 UTC (permalink / raw)
  To: Mike Rapoport
  Cc: Andrew Morton, Andreas Larsson, Borislav Petkov, Brian Cain,
	Catalin Marinas, Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, David Hildenbrand, Dinh Nguyen,
	Geert Uytterhoeven, Guo Ren, Helge Deller, Huacai Chen,
	Ingo Molnar, Johannes Berg, John Paul Adrian Glaubitz,
	Lorenzo Stoakes, Madhavan Srinivasan, Magnus Lindholm,
	Matt Turner, Max Filippov, Michael Ellerman, Michal Hocko,
	Michal Simek, Palmer Dabbelt, Richard Weinberger, Russell King,
	Stafford Horne, Suren Baghdasaryan, Thomas Gleixner,
	Vineet Gupta, Vlastimil Babka, Will Deacon, linux-alpha,
	linux-kernel, linux-snps-arc, linux-arm-kernel, linux-csky,
	linux-hexagon, loongarch, linux-m68k, linux-openrisc,
	linux-parisc, linuxppc-dev, linux-riscv, linux-sh, sparclinux,
	linux-um, linux-mm, x86

* Mike Rapoport <rppt@kernel.org> [260211 05:32]:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
> 
> nommu architectures have empty_zero_page and define ZERO_PAGE() and
> although they don't really use it to populate page tables, there is no
> reason to hardwire !MMU implementation of is_zero_pfn() and my_zero_pfn()
> to 0.
> 
> Drop #ifdef CONFIG_MMU around implementations of is_zero_pfn() and
> my_zero_pfn() and remove !MMU version.
> 
> While on it, make zero_pfn __ro_after_init.
> 
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>

Acked-by: Liam R. Howlett <Liam.Howlett@oracle.com>

> ---
>  include/linux/pgtable.h | 14 +-------------
>  mm/memory.c             | 13 -------------
>  mm/mm_init.c            | 10 ++++++++++
>  3 files changed, 11 insertions(+), 26 deletions(-)
> 
> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> index 827dca25c0bc..08a88b0d56e5 100644
> --- a/include/linux/pgtable.h
> +++ b/include/linux/pgtable.h
> @@ -1882,7 +1882,6 @@ static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
>  	pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot);
>  }
>  
> -#ifdef CONFIG_MMU
>  #ifdef __HAVE_COLOR_ZERO_PAGE
>  static inline int is_zero_pfn(unsigned long pfn)
>  {
> @@ -1905,18 +1904,7 @@ static inline unsigned long my_zero_pfn(unsigned long addr)
>  	extern unsigned long zero_pfn;
>  	return zero_pfn;
>  }
> -#endif
> -#else
> -static inline int is_zero_pfn(unsigned long pfn)
> -{
> -	return 0;
> -}
> -
> -static inline unsigned long my_zero_pfn(unsigned long addr)
> -{
> -	return 0;
> -}
> -#endif /* CONFIG_MMU */
> +#endif /* __HAVE_COLOR_ZERO_PAGE */
>  
>  #ifdef CONFIG_MMU
>  
> diff --git a/mm/memory.c b/mm/memory.c
> index 187f16b7e996..51d2018a387a 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -162,21 +162,8 @@ static int __init disable_randmaps(char *s)
>  }
>  __setup("norandmaps", disable_randmaps);
>  
> -unsigned long zero_pfn __read_mostly;
> -EXPORT_SYMBOL(zero_pfn);
> -
>  unsigned long highest_memmap_pfn __read_mostly;
>  
> -/*
> - * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
> - */
> -static int __init init_zero_pfn(void)
> -{
> -	zero_pfn = page_to_pfn(ZERO_PAGE(0));
> -	return 0;
> -}
> -early_initcall(init_zero_pfn);
> -
>  void mm_trace_rss_stat(struct mm_struct *mm, int member)
>  {
>  	trace_rss_stat(mm, member);
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index 1a29a719af58..dcf9eff34f83 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -53,6 +53,9 @@ EXPORT_SYMBOL(mem_map);
>  void *high_memory;
>  EXPORT_SYMBOL(high_memory);
>  
> +unsigned long zero_pfn __ro_after_init;
> +EXPORT_SYMBOL(zero_pfn);
> +
>  #ifdef CONFIG_DEBUG_MEMORY_INIT
>  int __meminitdata mminit_loglevel;
>  
> @@ -2667,6 +2670,13 @@ static void __init mem_init_print_info(void)
>  		);
>  }
>  
> +static int __init init_zero_pfn(void)
> +{
> +	zero_pfn = page_to_pfn(ZERO_PAGE(0));
> +	return 0;
> +}
> +early_initcall(init_zero_pfn);
> +
>  void __init __weak arch_mm_preinit(void)
>  {
>  }
> -- 
> 2.51.0
> 


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v3 2/4] mm: rename my_zero_pfn() to zero_pfn()
  2026-02-11 10:31 ` [PATCH v3 2/4] mm: rename my_zero_pfn() to zero_pfn() Mike Rapoport
  2026-02-12  9:01   ` David Hildenbrand (Arm)
  2026-02-12 15:28   ` Vlastimil Babka
@ 2026-02-12 18:33   ` Liam R. Howlett
  2 siblings, 0 replies; 17+ messages in thread
From: Liam R. Howlett @ 2026-02-12 18:33 UTC (permalink / raw)
  To: Mike Rapoport
  Cc: Andrew Morton, Andreas Larsson, Borislav Petkov, Brian Cain,
	Catalin Marinas, Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, David Hildenbrand, Dinh Nguyen,
	Geert Uytterhoeven, Guo Ren, Helge Deller, Huacai Chen,
	Ingo Molnar, Johannes Berg, John Paul Adrian Glaubitz,
	Lorenzo Stoakes, Madhavan Srinivasan, Magnus Lindholm,
	Matt Turner, Max Filippov, Michael Ellerman, Michal Hocko,
	Michal Simek, Palmer Dabbelt, Richard Weinberger, Russell King,
	Stafford Horne, Suren Baghdasaryan, Thomas Gleixner,
	Vineet Gupta, Vlastimil Babka, Will Deacon, linux-alpha,
	linux-kernel, linux-snps-arc, linux-arm-kernel, linux-csky,
	linux-hexagon, loongarch, linux-m68k, linux-openrisc,
	linux-parisc, linuxppc-dev, linux-riscv, linux-sh, sparclinux,
	linux-um, linux-mm, x86

* Mike Rapoport <rppt@kernel.org> [260211 05:32]:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
> 
> my_zero_pfn() is a silly name.
> 
> Rename zero_pfn variable to zero_page_pfn and my_zero_pfn() function to
> zero_pfn().
> 
> While on it, move extern declarations of zero_page_pfn outside the
> functions that use it and add a comment about what ZERO_PAGE is.
> 
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>

Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>

> ---
>  arch/x86/kvm/mmu/spte.h |  2 +-
>  fs/dax.c                |  2 +-
>  fs/proc/vmcore.c        |  2 +-
>  include/linux/pgtable.h | 28 ++++++++++++++++++++--------
>  mm/huge_memory.c        |  2 +-
>  mm/memory.c             |  2 +-
>  mm/migrate.c            |  2 +-
>  mm/mm_init.c            | 10 +++++-----
>  mm/userfaultfd.c        |  4 ++--
>  9 files changed, 33 insertions(+), 21 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
> index 91ce29fd6f1b..8c0ffa2cded6 100644
> --- a/arch/x86/kvm/mmu/spte.h
> +++ b/arch/x86/kvm/mmu/spte.h
> @@ -248,7 +248,7 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
>  
>  static inline hpa_t kvm_mmu_get_dummy_root(void)
>  {
> -	return my_zero_pfn(0) << PAGE_SHIFT;
> +	return zero_pfn(0) << PAGE_SHIFT;
>  }
>  
>  static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
> diff --git a/fs/dax.c b/fs/dax.c
> index 289e6254aa30..b78cff9c91b3 100644
> --- a/fs/dax.c
> +++ b/fs/dax.c
> @@ -1360,7 +1360,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
>  {
>  	struct inode *inode = iter->inode;
>  	unsigned long vaddr = vmf->address;
> -	unsigned long pfn = my_zero_pfn(vaddr);
> +	unsigned long pfn = zero_pfn(vaddr);
>  	vm_fault_t ret;
>  
>  	*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
> diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
> index f188bd900eb2..44d15436439f 100644
> --- a/fs/proc/vmcore.c
> +++ b/fs/proc/vmcore.c
> @@ -525,7 +525,7 @@ static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
>  {
>  	unsigned long map_size;
>  	unsigned long pos_start, pos_end, pos;
> -	unsigned long zeropage_pfn = my_zero_pfn(0);
> +	unsigned long zeropage_pfn = zero_pfn(0);
>  	size_t len = 0;
>  
>  	pos_start = pfn;
> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> index 08a88b0d56e5..9bacf4df9769 100644
> --- a/include/linux/pgtable.h
> +++ b/include/linux/pgtable.h
> @@ -1882,27 +1882,39 @@ static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
>  	pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot);
>  }
>  
> +/*
> + * ZERO_PAGE() is global shared page(s) that is always zero. It is used for
> + * zero-mapped memory areas, CoW etc.
> + *
> + * On architectures that __HAVE_COLOR_ZERO_PAGE there are several such pages
> + * for different ranges in the virtual address space.
> + *
> + * zero_page_pfn identifies the first (or the only) pfn for these pages.
> + */
>  #ifdef __HAVE_COLOR_ZERO_PAGE
>  static inline int is_zero_pfn(unsigned long pfn)
>  {
> -	extern unsigned long zero_pfn;
> -	unsigned long offset_from_zero_pfn = pfn - zero_pfn;
> +	extern unsigned long zero_page_pfn;
> +	unsigned long offset_from_zero_pfn = pfn - zero_page_pfn;
> +
>  	return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
>  }
>  
> -#define my_zero_pfn(addr)	page_to_pfn(ZERO_PAGE(addr))
> +#define zero_pfn(addr)	page_to_pfn(ZERO_PAGE(addr))
>  
>  #else
>  static inline int is_zero_pfn(unsigned long pfn)
>  {
> -	extern unsigned long zero_pfn;
> -	return pfn == zero_pfn;
> +	extern unsigned long zero_page_pfn;
> +
> +	return pfn == zero_page_pfn;
>  }
>  
> -static inline unsigned long my_zero_pfn(unsigned long addr)
> +static inline unsigned long zero_pfn(unsigned long addr)
>  {
> -	extern unsigned long zero_pfn;
> -	return zero_pfn;
> +	extern unsigned long zero_page_pfn;
> +
> +	return zero_page_pfn;
>  }
>  #endif /* __HAVE_COLOR_ZERO_PAGE */
>  
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 44ff8a648afd..bc15fd152526 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2968,7 +2968,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
>  	for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
>  		pte_t entry;
>  
> -		entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
> +		entry = pfn_pte(zero_pfn(addr), vma->vm_page_prot);
>  		entry = pte_mkspecial(entry);
>  		if (pmd_uffd_wp(old_pmd))
>  			entry = pte_mkuffd_wp(entry);
> diff --git a/mm/memory.c b/mm/memory.c
> index 51d2018a387a..ae610afa9cea 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -5165,7 +5165,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
>  	/* Use the zero-page for reads */
>  	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
>  			!mm_forbids_zeropage(vma->vm_mm)) {
> -		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
> +		entry = pte_mkspecial(pfn_pte(zero_pfn(vmf->address),
>  						vma->vm_page_prot));
>  		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
>  				vmf->address, &vmf->ptl);
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 1bf2cf8c44dd..739c4e03769b 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -321,7 +321,7 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
>  	if (!pages_identical(page, ZERO_PAGE(0)))
>  		return false;
>  
> -	newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
> +	newpte = pte_mkspecial(pfn_pte(zero_pfn(pvmw->address),
>  					pvmw->vma->vm_page_prot));
>  
>  	if (pte_swp_soft_dirty(old_pte))
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index dcf9eff34f83..a0ca236eb4f5 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -53,8 +53,8 @@ EXPORT_SYMBOL(mem_map);
>  void *high_memory;
>  EXPORT_SYMBOL(high_memory);
>  
> -unsigned long zero_pfn __ro_after_init;
> -EXPORT_SYMBOL(zero_pfn);
> +unsigned long zero_page_pfn __ro_after_init;
> +EXPORT_SYMBOL(zero_page_pfn);
>  
>  #ifdef CONFIG_DEBUG_MEMORY_INIT
>  int __meminitdata mminit_loglevel;
> @@ -2670,12 +2670,12 @@ static void __init mem_init_print_info(void)
>  		);
>  }
>  
> -static int __init init_zero_pfn(void)
> +static int __init init_zero_page_pfn(void)
>  {
> -	zero_pfn = page_to_pfn(ZERO_PAGE(0));
> +	zero_page_pfn = page_to_pfn(ZERO_PAGE(0));
>  	return 0;
>  }
> -early_initcall(init_zero_pfn);
> +early_initcall(init_zero_page_pfn);
>  
>  void __init __weak arch_mm_preinit(void)
>  {
> diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> index 927086bb4a3c..e19872e51878 100644
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
> @@ -357,7 +357,7 @@ static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
>  	if (mm_forbids_zeropage(dst_vma->vm_mm))
>  		return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, dst_addr);
>  
> -	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
> +	_dst_pte = pte_mkspecial(pfn_pte(zero_pfn(dst_addr),
>  					 dst_vma->vm_page_prot));
>  	ret = -EAGAIN;
>  	dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
> @@ -1229,7 +1229,7 @@ static int move_zeropage_pte(struct mm_struct *mm,
>  		return -EAGAIN;
>  	}
>  
> -	zero_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
> +	zero_pte = pte_mkspecial(pfn_pte(zero_pfn(dst_addr),
>  					 dst_vma->vm_page_prot));
>  	ptep_clear_flush(src_vma, src_addr, src_pte);
>  	set_pte_at(mm, dst_addr, dst_pte, zero_pte);
> -- 
> 2.51.0
> 


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v3 3/4] arch, mm: consolidate empty_zero_page
  2026-02-11 10:31 ` [PATCH v3 3/4] arch, mm: consolidate empty_zero_page Mike Rapoport
                     ` (3 preceding siblings ...)
  2026-02-12  9:04   ` David Hildenbrand (Arm)
@ 2026-02-12 18:38   ` Liam R. Howlett
  4 siblings, 0 replies; 17+ messages in thread
From: Liam R. Howlett @ 2026-02-12 18:38 UTC (permalink / raw)
  To: Mike Rapoport
  Cc: Andrew Morton, Andreas Larsson, Borislav Petkov, Brian Cain,
	Catalin Marinas, Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, David Hildenbrand, Dinh Nguyen,
	Geert Uytterhoeven, Guo Ren, Helge Deller, Huacai Chen,
	Ingo Molnar, Johannes Berg, John Paul Adrian Glaubitz,
	Lorenzo Stoakes, Madhavan Srinivasan, Magnus Lindholm,
	Matt Turner, Max Filippov, Michael Ellerman, Michal Hocko,
	Michal Simek, Palmer Dabbelt, Richard Weinberger, Russell King,
	Stafford Horne, Suren Baghdasaryan, Thomas Gleixner,
	Vineet Gupta, Vlastimil Babka, Will Deacon, linux-alpha,
	linux-kernel, linux-snps-arc, linux-arm-kernel, linux-csky,
	linux-hexagon, loongarch, linux-m68k, linux-openrisc,
	linux-parisc, linuxppc-dev, linux-riscv, linux-sh, sparclinux,
	linux-um, linux-mm, x86

* Mike Rapoport <rppt@kernel.org> [260211 05:32]:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
> 
> Reduce 22 declarations of empty_zero_page to 3 and 23 declarations of
> ZERO_PAGE() to 4.
> 
> Every architecture defines empty_zero_page that way or another, but for the
> most of them it is always a page aligned page in BSS and most definitions
> of ZERO_PAGE do virt_to_page(empty_zero_page).
> 
> Move Linus vetted x86 definition of empty_zero_page and ZERO_PAGE() to the
> core MM and drop these definitions in architectures that do not implement
> colored zero page (MIPS and s390).
> 
> ZERO_PAGE() remains a macro because turning it to a wrapper for a static
> inline causes severe pain in header dependencies.
> 
> For the most part the change is mechanical, with these being noteworthy:
> 
> * alpha: aliased empty_zero_page with ZERO_PGE that was also used for boot
>   parameters. Switching to a generic empty_zero_page removes the aliasing
>   and keeps ZERO_PGE for boot parameters only
> * arm64: uses __pa_symbol() in ZERO_PAGE() so that definition of
>   ZERO_PAGE() is kept intact.
> * m68k/parisc/um: allocated empty_zero_page from memblock,
>   although they do not support zero page coloring and having it in BSS
>   will work fine.
> * sparc64 can have empty_zero_page in BSS rather allocate it, but it
>   can't use virt_to_page() for BSS. Keep it's definition of ZERO_PAGE()
>   but instead of allocating it, make mem_map_zero point to
>   empty_zero_page.
> * sh: used empty_zero_page for boot parameters at the very early boot.
>   Rename the parameters page to boot_params_page and let sh use the generic
>   empty_zero_page.
> * hexagon: had an amusing comment about empty_zero_page
> 
> 	/* A handy thing to have if one has the RAM. Declared in head.S */
> 
>   that unfortunately had to go :)
> 
> Acked-by: Helge Deller <deller@gmx.de>   # parisc
> Tested-by: Helge Deller <deller@gmx.de>  # parisc
> Reviewed-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org>
> Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>

Acked-by: Liam R. Howlett <Liam.Howlett@oracle.com>

> ---
>  arch/alpha/include/asm/pgtable.h          |  6 ------
>  arch/arc/include/asm/pgtable.h            |  3 ---
>  arch/arc/mm/init.c                        |  2 --
>  arch/arm/include/asm/pgtable.h            |  9 ---------
>  arch/arm/mm/mmu.c                         |  7 -------
>  arch/arm/mm/nommu.c                       |  7 -------
>  arch/arm64/include/asm/pgtable.h          |  1 -
>  arch/arm64/mm/mmu.c                       |  7 -------
>  arch/csky/include/asm/pgtable.h           |  3 ---
>  arch/csky/mm/init.c                       |  3 ---
>  arch/hexagon/include/asm/pgtable.h        |  6 ------
>  arch/hexagon/kernel/head.S                |  5 -----
>  arch/hexagon/kernel/hexagon_ksyms.c       |  1 -
>  arch/loongarch/include/asm/pgtable.h      |  9 ---------
>  arch/loongarch/mm/init.c                  |  3 ---
>  arch/m68k/include/asm/pgtable_mm.h        |  9 ---------
>  arch/m68k/include/asm/pgtable_no.h        |  7 -------
>  arch/m68k/mm/init.c                       |  9 ---------
>  arch/m68k/mm/mcfmmu.c                     |  2 --
>  arch/m68k/mm/motorola.c                   |  6 ------
>  arch/m68k/mm/sun3mmu.c                    |  2 --
>  arch/microblaze/include/asm/pgtable.h     | 10 ----------
>  arch/microblaze/kernel/head.S             |  4 ----
>  arch/microblaze/kernel/microblaze_ksyms.c |  2 --
>  arch/nios2/include/asm/pgtable.h          |  7 -------
>  arch/nios2/kernel/head.S                  | 10 ----------
>  arch/nios2/kernel/nios2_ksyms.c           |  1 -
>  arch/openrisc/include/asm/pgtable.h       |  4 ----
>  arch/openrisc/kernel/head.S               |  3 ---
>  arch/openrisc/kernel/or32_ksyms.c         |  1 -
>  arch/openrisc/mm/init.c                   |  3 ---
>  arch/parisc/include/asm/pgtable.h         | 11 -----------
>  arch/parisc/mm/init.c                     |  6 ------
>  arch/powerpc/include/asm/pgtable.h        |  6 ------
>  arch/powerpc/mm/mem.c                     |  3 ---
>  arch/riscv/include/asm/pgtable.h          |  7 -------
>  arch/riscv/mm/init.c                      |  4 ----
>  arch/sh/include/asm/pgtable.h             |  8 --------
>  arch/sh/include/asm/setup.h               |  3 ++-
>  arch/sh/kernel/head_32.S                  |  4 ++--
>  arch/sh/kernel/sh_ksyms_32.c              |  1 -
>  arch/sh/mm/init.c                         |  1 -
>  arch/sparc/include/asm/pgtable_32.h       |  8 --------
>  arch/sparc/include/asm/setup.h            |  2 --
>  arch/sparc/kernel/head_32.S               |  7 -------
>  arch/sparc/mm/init_32.c                   |  4 ----
>  arch/sparc/mm/init_64.c                   | 11 ++++-------
>  arch/um/include/asm/pgtable.h             |  9 ---------
>  arch/um/include/shared/kern_util.h        |  1 -
>  arch/um/kernel/mem.c                      | 16 ----------------
>  arch/um/kernel/um_arch.c                  |  1 -
>  arch/x86/include/asm/pgtable.h            |  8 --------
>  arch/x86/kernel/head_32.S                 |  4 ----
>  arch/x86/kernel/head_64.S                 |  7 -------
>  arch/xtensa/include/asm/pgtable.h         |  4 ----
>  arch/xtensa/kernel/head.S                 |  3 ---
>  arch/xtensa/kernel/xtensa_ksyms.c         |  2 --
>  include/linux/pgtable.h                   | 10 ++++++++++
>  mm/mm_init.c                              |  5 +++++
>  59 files changed, 23 insertions(+), 285 deletions(-)
> 
> diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
> index 90e7a9539102..12a3c5f8ece8 100644
> --- a/arch/alpha/include/asm/pgtable.h
> +++ b/arch/alpha/include/asm/pgtable.h
> @@ -125,12 +125,6 @@ struct vm_area_struct;
>   */
>  #define pgprot_noncached(prot)	(prot)
>  
> -/*
> - * ZERO_PAGE is a global shared page that is always zero:  used
> - * for zero-mapped memory areas etc..
> - */
> -#define ZERO_PAGE(vaddr)	(virt_to_page(ZERO_PGE))
> -
>  /*
>   * On certain platforms whose physical address space can overlap KSEG,
>   * namely EV6 and above, we must re-twiddle the physaddr to restore the
> diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
> index bd580e2b62d7..0fdaea81b5fa 100644
> --- a/arch/arc/include/asm/pgtable.h
> +++ b/arch/arc/include/asm/pgtable.h
> @@ -21,9 +21,6 @@
>  
>  #ifndef __ASSEMBLER__
>  
> -extern char empty_zero_page[PAGE_SIZE];
> -#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
> -
>  extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
>  
>  /* to cope with aliasing VIPT cache */
> diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
> index a5e92f46e5d1..d6b5c27a0098 100644
> --- a/arch/arc/mm/init.c
> +++ b/arch/arc/mm/init.c
> @@ -19,8 +19,6 @@
>  #include <asm/arcregs.h>
>  
>  pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
> -char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
> -EXPORT_SYMBOL(empty_zero_page);
>  
>  static const unsigned long low_mem_start = CONFIG_LINUX_RAM_BASE;
>  static unsigned long low_mem_sz;
> diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
> index 6fa9acd6a7f5..982795cf4563 100644
> --- a/arch/arm/include/asm/pgtable.h
> +++ b/arch/arm/include/asm/pgtable.h
> @@ -10,15 +10,6 @@
>  #include <linux/const.h>
>  #include <asm/proc-fns.h>
>  
> -#ifndef __ASSEMBLY__
> -/*
> - * ZERO_PAGE is a global shared page that is always zero: used
> - * for zero-mapped memory areas etc..
> - */
> -extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
> -#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
> -#endif
> -
>  #include <asm-generic/pgtable-nopud.h>
>  
>  #ifndef CONFIG_MMU
> diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
> index 518def8314e7..23b87b5ef7f1 100644
> --- a/arch/arm/mm/mmu.c
> +++ b/arch/arm/mm/mmu.c
> @@ -41,13 +41,6 @@
>  
>  extern unsigned long __atags_pointer;
>  
> -/*
> - * empty_zero_page is a special page that is used for
> - * zero-initialized data and COW.
> - */
> -unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
> -EXPORT_SYMBOL(empty_zero_page);
> -
>  /*
>   * The pmd table for the upper-most set of pages.
>   */
> diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
> index 7e42d8accec6..040ea43cce32 100644
> --- a/arch/arm/mm/nommu.c
> +++ b/arch/arm/mm/nommu.c
> @@ -27,13 +27,6 @@
>  
>  unsigned long vectors_base;
>  
> -/*
> - * empty_zero_page is a special page that is used for
> - * zero-initialized data and COW.
> - */
> -unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
> -EXPORT_SYMBOL(empty_zero_page);
> -
>  #ifdef CONFIG_ARM_MPU
>  struct mpu_rgn_info mpu_rgn_info;
>  #endif
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index d94445b4f3df..63da07398a30 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -110,7 +110,6 @@ static inline void arch_leave_lazy_mmu_mode(void)
>   * ZERO_PAGE is a global shared page that is always zero: used
>   * for zero-mapped memory areas etc..
>   */
> -extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
>  #define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
>  
>  #define pte_ERROR(e)	\
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index a6a00accf4f9..0f6a171faf1f 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -64,13 +64,6 @@ static bool rodata_is_rw __ro_after_init = true;
>   */
>  long __section(".mmuoff.data.write") __early_cpu_boot_status;
>  
> -/*
> - * Empty_zero_page is a special page that is used for zero-initialized data
> - * and COW.
> - */
> -unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
> -EXPORT_SYMBOL(empty_zero_page);
> -
>  static DEFINE_SPINLOCK(swapper_pgdir_lock);
>  static DEFINE_MUTEX(fixmap_lock);
>  
> diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
> index d606afbabce1..bafcd5823531 100644
> --- a/arch/csky/include/asm/pgtable.h
> +++ b/arch/csky/include/asm/pgtable.h
> @@ -76,9 +76,6 @@
>  #define MAX_SWAPFILES_CHECK() \
>  		BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5)
>  
> -extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
> -#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
> -
>  extern void load_pgd(unsigned long pg_dir);
>  extern pte_t invalid_pte_table[PTRS_PER_PTE];
>  
> diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
> index 573da66b2543..fa16015ea1c0 100644
> --- a/arch/csky/mm/init.c
> +++ b/arch/csky/mm/init.c
> @@ -38,9 +38,6 @@ pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
>  pte_t kernel_pte_tables[PTRS_KERN_TABLE] __page_aligned_bss;
>  
>  EXPORT_SYMBOL(invalid_pte_table);
> -unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
> -						__page_aligned_bss;
> -EXPORT_SYMBOL(empty_zero_page);
>  
>  void free_initmem(void)
>  {
> diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
> index fbf24d1d1ca6..27b269e2870d 100644
> --- a/arch/hexagon/include/asm/pgtable.h
> +++ b/arch/hexagon/include/asm/pgtable.h
> @@ -14,9 +14,6 @@
>  #include <asm/page.h>
>  #include <asm-generic/pgtable-nopmd.h>
>  
> -/* A handy thing to have if one has the RAM. Declared in head.S */
> -extern unsigned long empty_zero_page;
> -
>  /*
>   * The PTE model described here is that of the Hexagon Virtual Machine,
>   * which autonomously walks 2-level page tables.  At a lower level, we
> @@ -348,9 +345,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
>  	return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK);
>  }
>  
> -/* ZERO_PAGE - returns the globally shared zero page */
> -#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
> -
>  /*
>   * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
>   * are !pte_none() && !pte_present().
> diff --git a/arch/hexagon/kernel/head.S b/arch/hexagon/kernel/head.S
> index 0b016308cc79..908ffece9132 100644
> --- a/arch/hexagon/kernel/head.S
> +++ b/arch/hexagon/kernel/head.S
> @@ -216,8 +216,3 @@ __head_s_vaddr_target:
>  .p2align PAGE_SHIFT
>  ENTRY(external_cmdline_buffer)
>          .fill _PAGE_SIZE,1,0
> -
> -.data
> -.p2align PAGE_SHIFT
> -ENTRY(empty_zero_page)
> -        .fill _PAGE_SIZE,1,0
> diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c
> index 36a80e31d187..81bc6f81e200 100644
> --- a/arch/hexagon/kernel/hexagon_ksyms.c
> +++ b/arch/hexagon/kernel/hexagon_ksyms.c
> @@ -17,7 +17,6 @@ EXPORT_SYMBOL(raw_copy_to_user);
>  EXPORT_SYMBOL(__vmgetie);
>  EXPORT_SYMBOL(__vmsetie);
>  EXPORT_SYMBOL(__vmyield);
> -EXPORT_SYMBOL(empty_zero_page);
>  EXPORT_SYMBOL(memcpy);
>  EXPORT_SYMBOL(memset);
>  
> diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
> index c33b3bcb733e..a244de27a03e 100644
> --- a/arch/loongarch/include/asm/pgtable.h
> +++ b/arch/loongarch/include/asm/pgtable.h
> @@ -74,15 +74,6 @@
>  struct mm_struct;
>  struct vm_area_struct;
>  
> -/*
> - * ZERO_PAGE is a global shared page that is always zero; used
> - * for zero-mapped memory areas etc..
> - */
> -
> -extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
> -
> -#define ZERO_PAGE(vaddr)	virt_to_page(empty_zero_page)
> -
>  #ifdef CONFIG_32BIT
>  
>  #define VMALLOC_START	(vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
> diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
> index c331bf69d2ec..00f3822b6e47 100644
> --- a/arch/loongarch/mm/init.c
> +++ b/arch/loongarch/mm/init.c
> @@ -36,9 +36,6 @@
>  #include <asm/pgalloc.h>
>  #include <asm/tlb.h>
>  
> -unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
> -EXPORT_SYMBOL(empty_zero_page);
> -
>  void copy_user_highpage(struct page *to, struct page *from,
>  	unsigned long vaddr, struct vm_area_struct *vma)
>  {
> diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
> index bba64a9c49ac..7501ff030c63 100644
> --- a/arch/m68k/include/asm/pgtable_mm.h
> +++ b/arch/m68k/include/asm/pgtable_mm.h
> @@ -110,15 +110,6 @@ extern unsigned long m68k_vmalloc_end;
>  #define VMALLOC_END KMAP_START
>  #endif
>  
> -/* zero page used for uninitialized stuff */
> -extern void *empty_zero_page;
> -
> -/*
> - * ZERO_PAGE is a global shared page that is always zero: used
> - * for zero-mapped memory areas etc..
> - */
> -#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
> -
>  extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode);
>  
>  /*
> diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h
> index 1a86c15b9008..11751807a3f3 100644
> --- a/arch/m68k/include/asm/pgtable_no.h
> +++ b/arch/m68k/include/asm/pgtable_no.h
> @@ -30,13 +30,6 @@
>  
>  #define swapper_pg_dir ((pgd_t *) 0)
>  
> -/*
> - * ZERO_PAGE is a global shared page that is always zero: used
> - * for zero-mapped memory areas etc..
> - */
> -extern void *empty_zero_page;
> -#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
> -
>  /*
>   * All 32bit addresses are effectively valid for vmalloc...
>   * Sort of meaningless for non-VM targets.
> diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
> index 53b71f786c27..3b88c0dd1616 100644
> --- a/arch/m68k/mm/init.c
> +++ b/arch/m68k/mm/init.c
> @@ -33,13 +33,6 @@
>  #include <asm/sections.h>
>  #include <asm/tlb.h>
>  
> -/*
> - * ZERO_PAGE is a special page that is used for zero-initialized
> - * data and COW.
> - */
> -void *empty_zero_page;
> -EXPORT_SYMBOL(empty_zero_page);
> -
>  void __init arch_zone_limits_init(unsigned long *max_zone_pfns)
>  {
>  	max_zone_pfns[ZONE_DMA] = PFN_DOWN(memblock_end_of_DRAM());
> @@ -71,8 +64,6 @@ void __init paging_init(void)
>  	unsigned long end_mem = memory_end & PAGE_MASK;
>  
>  	high_memory = (void *) end_mem;
> -
> -	empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
>  }
>  
>  #endif /* CONFIG_MMU */
> diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
> index 3418fd864237..4924f2ff8ef8 100644
> --- a/arch/m68k/mm/mcfmmu.c
> +++ b/arch/m68k/mm/mcfmmu.c
> @@ -41,8 +41,6 @@ void __init paging_init(void)
>  	unsigned long next_pgtable;
>  	int i;
>  
> -	empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
> -
>  	pg_dir = swapper_pg_dir;
>  	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
>  
> diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
> index 127a3fa69f4c..b30aa69a73a6 100644
> --- a/arch/m68k/mm/motorola.c
> +++ b/arch/m68k/mm/motorola.c
> @@ -498,12 +498,6 @@ void __init paging_init(void)
>  
>  	early_memtest(min_addr, max_addr);
>  
> -	/*
> -	 * initialize the bad page table and bad page to point
> -	 * to a couple of allocated pages
> -	 */
> -	empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
> -
>  	/*
>  	 * Set up SFC/DFC registers
>  	 */
> diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
> index c801677f7df8..f139cc15753a 100644
> --- a/arch/m68k/mm/sun3mmu.c
> +++ b/arch/m68k/mm/sun3mmu.c
> @@ -43,8 +43,6 @@ void __init paging_init(void)
>  	unsigned long bootmem_end;
>  	unsigned long size;
>  
> -	empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
> -
>  	address = PAGE_OFFSET;
>  	pg_dir = swapper_pg_dir;
>  	memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir));
> diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
> index 4eb76de6be4a..ea72291de553 100644
> --- a/arch/microblaze/include/asm/pgtable.h
> +++ b/arch/microblaze/include/asm/pgtable.h
> @@ -207,16 +207,6 @@ extern pte_t *va_to_pte(unsigned long address);
>   * Also, write permissions imply read permissions.
>   */
>  
> -#ifndef __ASSEMBLER__
> -/*
> - * ZERO_PAGE is a global shared page that is always zero: used
> - * for zero-mapped memory areas etc..
> - */
> -extern unsigned long empty_zero_page[1024];
> -#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
> -
> -#endif /* __ASSEMBLER__ */
> -
>  #define pte_none(pte)		((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
>  #define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
>  #define pte_clear(mm, addr, ptep) \
> diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
> index ec2fcb545e64..808019c3b7ac 100644
> --- a/arch/microblaze/kernel/head.S
> +++ b/arch/microblaze/kernel/head.S
> @@ -39,10 +39,6 @@
>  #include <asm/processor.h>
>  
>  .section .data
> -.global empty_zero_page
> -.align 12
> -empty_zero_page:
> -	.space	PAGE_SIZE
>  .global swapper_pg_dir
>  swapper_pg_dir:
>  	.space	PAGE_SIZE
> diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
> index a8553f54152b..ad7596d7ba07 100644
> --- a/arch/microblaze/kernel/microblaze_ksyms.c
> +++ b/arch/microblaze/kernel/microblaze_ksyms.c
> @@ -33,8 +33,6 @@ EXPORT_SYMBOL(memcpy);
>  EXPORT_SYMBOL(memmove);
>  #endif
>  
> -EXPORT_SYMBOL(empty_zero_page);
> -
>  EXPORT_SYMBOL(mbc);
>  
>  extern void __divsi3(void);
> diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
> index 844dce55569f..d389aa9ca57c 100644
> --- a/arch/nios2/include/asm/pgtable.h
> +++ b/arch/nios2/include/asm/pgtable.h
> @@ -65,13 +65,6 @@ struct mm_struct;
>  #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
>  #define PGDIR_MASK	(~(PGDIR_SIZE-1))
>  
> -/*
> - * ZERO_PAGE is a global shared page that is always zero: used
> - * for zero-mapped memory areas etc..
> - */
> -extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
> -#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
> -
>  extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
>  extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
>  
> diff --git a/arch/nios2/kernel/head.S b/arch/nios2/kernel/head.S
> index 372ce4a33018..613212e1a63a 100644
> --- a/arch/nios2/kernel/head.S
> +++ b/arch/nios2/kernel/head.S
> @@ -23,16 +23,6 @@
>  #include <asm/asm-offsets.h>
>  #include <asm/asm-macros.h>
>  
> -/*
> - * ZERO_PAGE is a special page that is used for zero-initialized
> - * data and COW.
> - */
> -.data
> -.global empty_zero_page
> -.align 12
> -empty_zero_page:
> -	.space	PAGE_SIZE
> -
>  /*
>   * This global variable is used as an extension to the nios'
>   * STATUS register to emulate a user/supervisor mode.
> diff --git a/arch/nios2/kernel/nios2_ksyms.c b/arch/nios2/kernel/nios2_ksyms.c
> index 54f7b23df1bf..c40aa39e8658 100644
> --- a/arch/nios2/kernel/nios2_ksyms.c
> +++ b/arch/nios2/kernel/nios2_ksyms.c
> @@ -20,7 +20,6 @@ EXPORT_SYMBOL(memmove);
>  
>  /* memory management */
>  
> -EXPORT_SYMBOL(empty_zero_page);
>  EXPORT_SYMBOL(flush_icache_range);
>  
>  /*
> diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
> index b218050e2f6d..6b89996d0b62 100644
> --- a/arch/openrisc/include/asm/pgtable.h
> +++ b/arch/openrisc/include/asm/pgtable.h
> @@ -179,10 +179,6 @@ extern void paging_init(void);
>  	__pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
>  		 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI)
>  
> -/* zero page used for uninitialized stuff */
> -extern unsigned long empty_zero_page[2048];
> -#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
> -
>  #define pte_none(x)	(!pte_val(x))
>  #define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
>  #define pte_clear(mm, addr, xp)	do { pte_val(*(xp)) = 0; } while (0)
> diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
> index bd760066f1cd..45890393947d 100644
> --- a/arch/openrisc/kernel/head.S
> +++ b/arch/openrisc/kernel/head.S
> @@ -1563,9 +1563,6 @@ _string_nl:
>   */
>  	.section .data,"aw"
>  	.align	8192
> -	.global  empty_zero_page
> -empty_zero_page:
> -	.space  8192
>  
>  	.global  swapper_pg_dir
>  swapper_pg_dir:
> diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c
> index 212e5f85004c..84a937a64e2a 100644
> --- a/arch/openrisc/kernel/or32_ksyms.c
> +++ b/arch/openrisc/kernel/or32_ksyms.c
> @@ -40,7 +40,6 @@ DECLARE_EXPORT(__ashldi3);
>  DECLARE_EXPORT(__lshrdi3);
>  DECLARE_EXPORT(__ucmpdi2);
>  
> -EXPORT_SYMBOL(empty_zero_page);
>  EXPORT_SYMBOL(__copy_tofrom_user);
>  EXPORT_SYMBOL(__clear_user);
>  EXPORT_SYMBOL(memset);
> diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
> index 78fb0734cdbc..89d8c6df8855 100644
> --- a/arch/openrisc/mm/init.c
> +++ b/arch/openrisc/mm/init.c
> @@ -188,9 +188,6 @@ void __init mem_init(void)
>  {
>  	BUG_ON(!mem_map);
>  
> -	/* clear the zero-page */
> -	memset((void *)empty_zero_page, 0, PAGE_SIZE);
> -
>  	printk("mem_init_done ...........................................\n");
>  	mem_init_done = 1;
>  	return;
> diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
> index 2c139a4dbf4b..cbdc01a26ea0 100644
> --- a/arch/parisc/include/asm/pgtable.h
> +++ b/arch/parisc/include/asm/pgtable.h
> @@ -262,17 +262,6 @@ extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
>  
>  extern pte_t pg0[];
>  
> -/* zero page used for uninitialized stuff */
> -
> -extern unsigned long *empty_zero_page;
> -
> -/*
> - * ZERO_PAGE is a global shared page that is always zero: used
> - * for zero-mapped memory areas etc..
> - */
> -
> -#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
> -
>  #define pte_none(x)     (pte_val(x) == 0)
>  #define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
>  #define pte_user(x)	(pte_val(x) & _PAGE_USER)
> diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
> index 6a39e031e5ff..be3380c9bcda 100644
> --- a/arch/parisc/mm/init.c
> +++ b/arch/parisc/mm/init.c
> @@ -604,9 +604,6 @@ void __init mem_init(void)
>  #endif
>  }
>  
> -unsigned long *empty_zero_page __ro_after_init;
> -EXPORT_SYMBOL(empty_zero_page);
> -
>  /*
>   * pagetable_init() sets up the page tables
>   *
> @@ -639,9 +636,6 @@ static void __init pagetable_init(void)
>  			  initrd_end - initrd_start, PAGE_KERNEL, 0);
>  	}
>  #endif
> -
> -	empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
> -
>  }
>  
>  static void __init gateway_init(void)
> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> index dcd3a88caaf6..b27d94c06d0e 100644
> --- a/arch/powerpc/include/asm/pgtable.h
> +++ b/arch/powerpc/include/asm/pgtable.h
> @@ -76,12 +76,6 @@ static inline const void *pmd_page_vaddr(pmd_t pmd)
>  }
>  #define pmd_page_vaddr pmd_page_vaddr
>  #endif
> -/*
> - * ZERO_PAGE is a global shared page that is always zero: used
> - * for zero-mapped memory areas etc..
> - */
> -extern unsigned long empty_zero_page[];
> -#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
>  
>  extern pgd_t swapper_pg_dir[];
>  
> diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
> index 3789a51bdaae..85508392a6b6 100644
> --- a/arch/powerpc/mm/mem.c
> +++ b/arch/powerpc/mm/mem.c
> @@ -34,9 +34,6 @@
>  
>  unsigned long long memory_limit __initdata;
>  
> -unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
> -EXPORT_SYMBOL(empty_zero_page);
> -
>  pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
>  				pgprot_t vma_prot)
>  {
> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> index 9ecbf0366719..a6b496f4944f 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -1258,13 +1258,6 @@ extern u64 satp_mode;
>  void paging_init(void);
>  void misc_mem_init(void);
>  
> -/*
> - * ZERO_PAGE is a global shared page that is always zero,
> - * used for zero-mapped memory areas, etc.
> - */
> -extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
> -#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
> -
>  /*
>   * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
>   * TLB flush will be required as a result of the "set". For example, use
> diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> index 848efeb9e163..95ac79c62067 100644
> --- a/arch/riscv/mm/init.c
> +++ b/arch/riscv/mm/init.c
> @@ -69,10 +69,6 @@ unsigned long vmemmap_start_pfn __ro_after_init;
>  EXPORT_SYMBOL(vmemmap_start_pfn);
>  #endif
>  
> -unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
> -							__page_aligned_bss;
> -EXPORT_SYMBOL(empty_zero_page);
> -
>  extern char _start[];
>  void *_dtb_early_va __initdata;
>  uintptr_t _dtb_early_pa __initdata;
> diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
> index 10fa8f2bb8d1..d5ce0950a323 100644
> --- a/arch/sh/include/asm/pgtable.h
> +++ b/arch/sh/include/asm/pgtable.h
> @@ -20,14 +20,6 @@
>  #ifndef __ASSEMBLER__
>  #include <asm/addrspace.h>
>  #include <asm/fixmap.h>
> -
> -/*
> - * ZERO_PAGE is a global shared page that is always zero: used
> - * for zero-mapped memory areas etc..
> - */
> -extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
> -#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
> -
>  #endif /* !__ASSEMBLER__ */
>  
>  /*
> diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h
> index 84bb23a771f3..63c9efc06348 100644
> --- a/arch/sh/include/asm/setup.h
> +++ b/arch/sh/include/asm/setup.h
> @@ -7,7 +7,8 @@
>  /*
>   * This is set up by the setup-routine at boot-time
>   */
> -#define PARAM	((unsigned char *)empty_zero_page)
> +extern unsigned char *boot_params_page;
> +#define PARAM boot_params_page
>  
>  #define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000))
>  #define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004))
> diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
> index b603b7968b38..0b91bb85d40a 100644
> --- a/arch/sh/kernel/head_32.S
> +++ b/arch/sh/kernel/head_32.S
> @@ -26,7 +26,7 @@
>  #endif
>  
>  	.section	.empty_zero_page, "aw"
> -ENTRY(empty_zero_page)
> +ENTRY(boot_params_page)
>  	.long	1		/* MOUNT_ROOT_RDONLY */
>  	.long	0		/* RAMDISK_FLAGS */
>  	.long	0x0200		/* ORIG_ROOT_DEV */
> @@ -39,7 +39,7 @@ ENTRY(empty_zero_page)
>  	.long	0x53453f00 + 29	/* "SE?" = 29 bit */
>  #endif
>  1:
> -	.skip	PAGE_SIZE - empty_zero_page - 1b
> +	.skip	PAGE_SIZE - boot_params_page - 1b
>  
>  	__HEAD
>  
> diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
> index 5858936cb431..041191002e2e 100644
> --- a/arch/sh/kernel/sh_ksyms_32.c
> +++ b/arch/sh/kernel/sh_ksyms_32.c
> @@ -20,7 +20,6 @@ EXPORT_SYMBOL(csum_partial);
>  EXPORT_SYMBOL(csum_partial_copy_generic);
>  EXPORT_SYMBOL(copy_page);
>  EXPORT_SYMBOL(__clear_user);
> -EXPORT_SYMBOL(empty_zero_page);
>  #ifdef CONFIG_FLATMEM
>  /* need in pfn_valid macro */
>  EXPORT_SYMBOL(min_low_pfn);
> diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
> index 464a3a63e2fa..4e40d5e96be9 100644
> --- a/arch/sh/mm/init.c
> +++ b/arch/sh/mm/init.c
> @@ -332,7 +332,6 @@ void __init mem_init(void)
>  	cpu_cache_init();
>  
>  	/* clear the zero-page */
> -	memset(empty_zero_page, 0, PAGE_SIZE);
>  	__flush_wback_region(empty_zero_page, PAGE_SIZE);
>  
>  	vsyscall_init();
> diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
> index a9f802d1dd64..f89b1250661d 100644
> --- a/arch/sparc/include/asm/pgtable_32.h
> +++ b/arch/sparc/include/asm/pgtable_32.h
> @@ -71,14 +71,6 @@ extern unsigned long ptr_in_current_pgd;
>  extern unsigned long phys_base;
>  extern unsigned long pfn_base;
>  
> -/*
> - * ZERO_PAGE is a global shared page that is always zero: used
> - * for zero-mapped memory areas etc..
> - */
> -extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
> -
> -#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
> -
>  /*
>   * In general all page table modifications should use the V8 atomic
>   * swap instruction.  This insures the mmu and the cpu are in sync
> diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
> index 72205684e51e..21bed5514028 100644
> --- a/arch/sparc/include/asm/setup.h
> +++ b/arch/sparc/include/asm/setup.h
> @@ -17,8 +17,6 @@ extern char reboot_command[];
>   */
>  extern unsigned char boot_cpu_id;
>  
> -extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
> -
>  extern int serial_console;
>  static inline int con_is_present(void)
>  {
> diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
> index 38345460d542..8c320fa25a67 100644
> --- a/arch/sparc/kernel/head_32.S
> +++ b/arch/sparc/kernel/head_32.S
> @@ -57,13 +57,6 @@ sun4e_notsup:
>  
>  	.align PAGE_SIZE
>  
> -/* This was the only reasonable way I could think of to properly align
> - * these page-table data structures.
> - */
> -	.globl empty_zero_page
> -empty_zero_page:	.skip PAGE_SIZE
> -EXPORT_SYMBOL(empty_zero_page)
> -
>  	.global root_flags
>  	.global ram_flags
>  	.global root_dev
> diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
> index fdc93dd12c3e..e0e66f91ceeb 100644
> --- a/arch/sparc/mm/init_32.c
> +++ b/arch/sparc/mm/init_32.c
> @@ -246,10 +246,6 @@ void __init arch_mm_preinit(void)
>  		prom_halt();
>  	}
>  
> -
> -	/* Saves us work later. */
> -	memset((void *)empty_zero_page, 0, PAGE_SIZE);
> -
>  	i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
>  	i += 1;
>  	sparc_valid_addr_bitmap = (unsigned long *)
> diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
> index 4f7bdb18774b..0cc8de2fea90 100644
> --- a/arch/sparc/mm/init_64.c
> +++ b/arch/sparc/mm/init_64.c
> @@ -2498,6 +2498,9 @@ static void __init register_page_bootmem_info(void)
>  }
>  void __init mem_init(void)
>  {
> +	phys_addr_t zero_page_pa = kern_base +
> +		((unsigned long)&empty_zero_page[0] - KERNBASE);
> +
>  	/*
>  	 * Must be done after boot memory is put on freelist, because here we
>  	 * might set fields in deferred struct pages that have not yet been
> @@ -2510,13 +2513,7 @@ void __init mem_init(void)
>  	 * Set up the zero page, mark it reserved, so that page count
>  	 * is not manipulated when freeing the page from user ptes.
>  	 */
> -	mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
> -	if (mem_map_zero == NULL) {
> -		prom_printf("paging_init: Cannot alloc zero page.\n");
> -		prom_halt();
> -	}
> -	mark_page_reserved(mem_map_zero);
> -
> +	mem_map_zero = pfn_to_page(PHYS_PFN(zero_page_pa));
>  
>  	if (tlb_type == cheetah || tlb_type == cheetah_plus)
>  		cheetah_ecache_flush_init();
> diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
> index 3b42b0f45bf6..19e0608fb649 100644
> --- a/arch/um/include/asm/pgtable.h
> +++ b/arch/um/include/asm/pgtable.h
> @@ -34,9 +34,6 @@
>  
>  extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
>  
> -/* zero page used for uninitialized stuff */
> -extern unsigned long *empty_zero_page;
> -
>  /* Just any arbitrary offset to the start of the vmalloc VM area: the
>   * current 8MB value just means that there will be a 8MB "hole" after the
>   * physical memory until the kernel virtual memory starts.  That means that
> @@ -74,12 +71,6 @@ extern unsigned long *empty_zero_page;
>   * get..
>   */
>  
> -/*
> - * ZERO_PAGE is a global shared page that is always zero: used
> - * for zero-mapped memory areas etc..
> - */
> -#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
> -
>  #define pte_clear(mm, addr, xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEEDSYNC))
>  
>  #define pmd_none(x)	(!((unsigned long)pmd_val(x) & ~_PAGE_NEEDSYNC))
> diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
> index 38321188c04c..9812efd14ec0 100644
> --- a/arch/um/include/shared/kern_util.h
> +++ b/arch/um/include/shared/kern_util.h
> @@ -38,7 +38,6 @@ extern void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs
>  extern void uml_pm_wake(void);
>  
>  extern int start_uml(void);
> -extern void paging_init(void);
>  
>  extern void uml_cleanup(void);
>  extern void do_uml_exitcalls(void);
> diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
> index 89c8c8b94a79..1eef0e42ef5d 100644
> --- a/arch/um/kernel/mem.c
> +++ b/arch/um/kernel/mem.c
> @@ -44,10 +44,6 @@ __section(".kasan_init") __used
>  = kasan_init;
>  #endif
>  
> -/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
> -unsigned long *empty_zero_page = NULL;
> -EXPORT_SYMBOL(empty_zero_page);
> -
>  /*
>   * Initialized during boot, and readonly for initializing page tables
>   * afterwards
> @@ -65,9 +61,6 @@ void __init arch_mm_preinit(void)
>  	/* Safe to call after jump_label_init(). Enables KASAN. */
>  	kasan_init_generic();
>  
> -	/* clear the zero-page */
> -	memset(empty_zero_page, 0, PAGE_SIZE);
> -
>  	/* Map in the area just after the brk now that kmalloc is about
>  	 * to be turned on.
>  	 */
> @@ -89,15 +82,6 @@ void __init arch_zone_limits_init(unsigned long *max_zone_pfns)
>  	max_zone_pfns[ZONE_NORMAL] = high_physmem >> PAGE_SHIFT;
>  }
>  
> -void __init paging_init(void)
> -{
> -	empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
> -							       PAGE_SIZE);
> -	if (!empty_zero_page)
> -		panic("%s: Failed to allocate %lu bytes align=%lx\n",
> -		      __func__, PAGE_SIZE, PAGE_SIZE);
> -}
> -
>  /*
>   * This can't do anything because nothing in the kernel image can be freed
>   * since it's not in kernel physical memory.
> diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
> index e2b24e1ecfa6..2141f5f1f5a2 100644
> --- a/arch/um/kernel/um_arch.c
> +++ b/arch/um/kernel/um_arch.c
> @@ -413,7 +413,6 @@ void __init setup_arch(char **cmdline_p)
>  	uml_dtb_init();
>  	read_initrd();
>  
> -	paging_init();
>  	strscpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
>  	*cmdline_p = command_line;
>  	setup_hostinfo(host_info, sizeof host_info);
> diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
> index 1662c5a8f445..54289f4587a4 100644
> --- a/arch/x86/include/asm/pgtable.h
> +++ b/arch/x86/include/asm/pgtable.h
> @@ -47,14 +47,6 @@ void ptdump_walk_user_pgd_level_checkwx(void);
>  #define debug_checkwx_user()	do { } while (0)
>  #endif
>  
> -/*
> - * ZERO_PAGE is a global shared page that is always zero: used
> - * for zero-mapped memory areas etc..
> - */
> -extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
> -	__visible;
> -#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
> -
>  extern spinlock_t pgd_lock;
>  extern struct list_head pgd_list;
>  
> diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
> index 80ef5d386b03..5171cb746444 100644
> --- a/arch/x86/kernel/head_32.S
> +++ b/arch/x86/kernel/head_32.S
> @@ -441,10 +441,6 @@ initial_pg_fixmap:
>  swapper_pg_dir:
>  	.fill 1024,4,0
>  	.fill PTI_USER_PGD_FILL,4,0
> -.globl empty_zero_page
> -empty_zero_page:
> -	.fill 4096,1,0
> -EXPORT_SYMBOL(empty_zero_page)
>  
>  /*
>   * This starts the data section.
> diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
> index 21816b48537c..cbf7647a25d8 100644
> --- a/arch/x86/kernel/head_64.S
> +++ b/arch/x86/kernel/head_64.S
> @@ -712,10 +712,3 @@ SYM_PIC_ALIAS(phys_base);
>  EXPORT_SYMBOL(phys_base)
>  
>  #include "../xen/xen-head.S"
> -
> -	__PAGE_ALIGNED_BSS
> -SYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
> -	.skip PAGE_SIZE
> -SYM_DATA_END(empty_zero_page)
> -EXPORT_SYMBOL(empty_zero_page)
> -
> diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
> index 50a136213b2b..61f07d981a94 100644
> --- a/arch/xtensa/include/asm/pgtable.h
> +++ b/arch/xtensa/include/asm/pgtable.h
> @@ -209,10 +209,6 @@
>  #define pgd_ERROR(e) \
>  	printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
>  
> -extern unsigned long empty_zero_page[1024];
> -
> -#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
> -
>  #ifdef CONFIG_MMU
>  extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
>  extern void paging_init(void);
> diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
> index 8484294bc623..4b0c5c5e685a 100644
> --- a/arch/xtensa/kernel/head.S
> +++ b/arch/xtensa/kernel/head.S
> @@ -381,6 +381,3 @@ ENTRY(swapper_pg_dir)
>  	.fill	PAGE_SIZE, 1, 0
>  END(swapper_pg_dir)
>  #endif
> -ENTRY(empty_zero_page)
> -	.fill	PAGE_SIZE, 1, 0
> -END(empty_zero_page)
> diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
> index 62d81e76e18e..ced335b4df5f 100644
> --- a/arch/xtensa/kernel/xtensa_ksyms.c
> +++ b/arch/xtensa/kernel/xtensa_ksyms.c
> @@ -15,8 +15,6 @@
>  #include <linux/module.h>
>  #include <asm/pgtable.h>
>  
> -EXPORT_SYMBOL(empty_zero_page);
> -
>  unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v)
>  {
>  	BUG();
> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> index 9bacf4df9769..3d48eea57cd2 100644
> --- a/include/linux/pgtable.h
> +++ b/include/linux/pgtable.h
> @@ -1890,6 +1890,9 @@ static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
>   * for different ranges in the virtual address space.
>   *
>   * zero_page_pfn identifies the first (or the only) pfn for these pages.
> + *
> + * For architectures that don't __HAVE_COLOR_ZERO_PAGE the zero page lives in
> + * empty_zero_page in BSS.
>   */
>  #ifdef __HAVE_COLOR_ZERO_PAGE
>  static inline int is_zero_pfn(unsigned long pfn)
> @@ -1916,6 +1919,13 @@ static inline unsigned long zero_pfn(unsigned long addr)
>  
>  	return zero_page_pfn;
>  }
> +
> +extern uint8_t empty_zero_page[PAGE_SIZE];
> +
> +#ifndef ZERO_PAGE
> +#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
> +#endif
> +
>  #endif /* __HAVE_COLOR_ZERO_PAGE */
>  
>  #ifdef CONFIG_MMU
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index a0ca236eb4f5..1eac634ece1a 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -56,6 +56,11 @@ EXPORT_SYMBOL(high_memory);
>  unsigned long zero_page_pfn __ro_after_init;
>  EXPORT_SYMBOL(zero_page_pfn);
>  
> +#ifndef __HAVE_COLOR_ZERO_PAGE
> +uint8_t empty_zero_page[PAGE_SIZE] __page_aligned_bss;
> +EXPORT_SYMBOL(empty_zero_page);
> +#endif
> +
>  #ifdef CONFIG_DEBUG_MEMORY_INIT
>  int __meminitdata mminit_loglevel;
>  
> -- 
> 2.51.0
> 


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v3 4/4] mm: cache struct page for empty_zero_page and return it from ZERO_PAGE()
  2026-02-11 10:31 ` [PATCH v3 4/4] mm: cache struct page for empty_zero_page and return it from ZERO_PAGE() Mike Rapoport
  2026-02-12  9:08   ` David Hildenbrand (Arm)
@ 2026-02-12 18:40   ` Liam R. Howlett
  1 sibling, 0 replies; 17+ messages in thread
From: Liam R. Howlett @ 2026-02-12 18:40 UTC (permalink / raw)
  To: Mike Rapoport
  Cc: Andrew Morton, Andreas Larsson, Borislav Petkov, Brian Cain,
	Catalin Marinas, Christophe Leroy (CS GROUP),
	David S. Miller, Dave Hansen, David Hildenbrand, Dinh Nguyen,
	Geert Uytterhoeven, Guo Ren, Helge Deller, Huacai Chen,
	Ingo Molnar, Johannes Berg, John Paul Adrian Glaubitz,
	Lorenzo Stoakes, Madhavan Srinivasan, Magnus Lindholm,
	Matt Turner, Max Filippov, Michael Ellerman, Michal Hocko,
	Michal Simek, Palmer Dabbelt, Richard Weinberger, Russell King,
	Stafford Horne, Suren Baghdasaryan, Thomas Gleixner,
	Vineet Gupta, Vlastimil Babka, Will Deacon, linux-alpha,
	linux-kernel, linux-snps-arc, linux-arm-kernel, linux-csky,
	linux-hexagon, loongarch, linux-m68k, linux-openrisc,
	linux-parisc, linuxppc-dev, linux-riscv, linux-sh, sparclinux,
	linux-um, linux-mm, x86

* Mike Rapoport <rppt@kernel.org> [260211 05:32]:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
> 
> For most architectures every invocation of ZERO_PAGE() does
> virt_to_page(empty_zero_page). But empty_zero_page is in BSS and it is
> enough to get its struct page once at initialization time and then use
> it whenever a zero page should be accessed.
> 
> Add yet another __zero_page variable that will be initialized as
> virt_to_page(empty_zero_page) for most architectures in a weak
> arch_setup_zero_pages() function.
> 
> For architectures that use colored zero pages (MIPS and s390) rename their
> setup_zero_pages() to arch_setup_zero_pages() and make it global rather
> than static.
> 
> For architectures that cannot use virt_to_page() for BSS (arm64 and
> sparc64) add override of arch_setup_zero_pages().
> 
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>

Acked-by: Liam R. Howlett <Liam.Howlett@oracle.com>

> ---
>  arch/arm64/include/asm/pgtable.h    |  6 ------
>  arch/arm64/mm/init.c                |  5 +++++
>  arch/mips/mm/init.c                 | 11 +----------
>  arch/s390/mm/init.c                 |  4 +---
>  arch/sparc/include/asm/pgtable_64.h |  3 ---
>  arch/sparc/mm/init_64.c             | 17 +++++++----------
>  include/linux/pgtable.h             | 11 ++++++++---
>  mm/mm_init.c                        | 21 +++++++++++++++++----
>  8 files changed, 39 insertions(+), 39 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index 63da07398a30..2c1ec7cc8612 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -106,12 +106,6 @@ static inline void arch_leave_lazy_mmu_mode(void)
>  #define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp)	\
>  	local_flush_tlb_page_nonotify(vma, address)
>  
> -/*
> - * ZERO_PAGE is a global shared page that is always zero: used
> - * for zero-mapped memory areas etc..
> - */
> -#define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
> -
>  #define pte_ERROR(e)	\
>  	pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
>  
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index 96711b8578fd..417ec7efe569 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -328,6 +328,11 @@ void __init bootmem_init(void)
>  	memblock_dump_all();
>  }
>  
> +void __init arch_setup_zero_pages(void)
> +{
> +	__zero_page = phys_to_page(__pa_symbol(empty_zero_page));
> +}
> +
>  void __init arch_mm_preinit(void)
>  {
>  	unsigned int flags = SWIOTLB_VERBOSE;
> diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
> index 4f6449ad02ca..55b25e85122a 100644
> --- a/arch/mips/mm/init.c
> +++ b/arch/mips/mm/init.c
> @@ -56,10 +56,7 @@ unsigned long empty_zero_page, zero_page_mask;
>  EXPORT_SYMBOL_GPL(empty_zero_page);
>  EXPORT_SYMBOL(zero_page_mask);
>  
> -/*
> - * Not static inline because used by IP27 special magic initialization code
> - */
> -static void __init setup_zero_pages(void)
> +void __init arch_setup_zero_pages(void)
>  {
>  	unsigned int order;
>  
> @@ -450,7 +447,6 @@ void __init arch_mm_preinit(void)
>  	BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
>  
>  	maar_init();
> -	setup_zero_pages();	/* Setup zeroed pages.  */
>  	highmem_init();
>  
>  #ifdef CONFIG_64BIT
> @@ -461,11 +457,6 @@ void __init arch_mm_preinit(void)
>  				0x80000000 - 4, KCORE_TEXT);
>  #endif
>  }
> -#else  /* CONFIG_NUMA */
> -void __init arch_mm_preinit(void)
> -{
> -	setup_zero_pages();	/* This comes from node 0 */
> -}
>  #endif /* !CONFIG_NUMA */
>  
>  void free_init_pages(const char *what, unsigned long begin, unsigned long end)
> diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
> index 3c20475cbee2..1f72efc2a579 100644
> --- a/arch/s390/mm/init.c
> +++ b/arch/s390/mm/init.c
> @@ -69,7 +69,7 @@ unsigned long empty_zero_page, zero_page_mask;
>  EXPORT_SYMBOL(empty_zero_page);
>  EXPORT_SYMBOL(zero_page_mask);
>  
> -static void __init setup_zero_pages(void)
> +void __init arch_setup_zero_pages(void)
>  {
>  	unsigned long total_pages = memblock_estimated_nr_free_pages();
>  	unsigned int order;
> @@ -159,8 +159,6 @@ void __init arch_mm_preinit(void)
>  	cpumask_set_cpu(0, mm_cpumask(&init_mm));
>  
>  	pv_init();
> -
> -	setup_zero_pages();	/* Setup zeroed pages. */
>  }
>  
>  unsigned long memory_block_size_bytes(void)
> diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
> index 615f460c50af..74ede706fb32 100644
> --- a/arch/sparc/include/asm/pgtable_64.h
> +++ b/arch/sparc/include/asm/pgtable_64.h
> @@ -210,9 +210,6 @@ extern unsigned long _PAGE_CACHE;
>  extern unsigned long pg_iobits;
>  extern unsigned long _PAGE_ALL_SZ_BITS;
>  
> -extern struct page *mem_map_zero;
> -#define ZERO_PAGE(vaddr)	(mem_map_zero)
> -
>  /* PFNs are real physical page numbers.  However, mem_map only begins to record
>   * per-page information starting at pfn_base.  This is to handle systems where
>   * the first physical page in the machine is at some huge physical address,
> diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
> index 0cc8de2fea90..707c1df67d79 100644
> --- a/arch/sparc/mm/init_64.c
> +++ b/arch/sparc/mm/init_64.c
> @@ -177,9 +177,6 @@ extern unsigned long sparc_ramdisk_image64;
>  extern unsigned int sparc_ramdisk_image;
>  extern unsigned int sparc_ramdisk_size;
>  
> -struct page *mem_map_zero __read_mostly;
> -EXPORT_SYMBOL(mem_map_zero);
> -
>  unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
>  
>  unsigned long sparc64_kern_pri_context __read_mostly;
> @@ -2496,11 +2493,17 @@ static void __init register_page_bootmem_info(void)
>  			register_page_bootmem_info_node(NODE_DATA(i));
>  #endif
>  }
> -void __init mem_init(void)
> +
> +void __init arch_setup_zero_pages(void)
>  {
>  	phys_addr_t zero_page_pa = kern_base +
>  		((unsigned long)&empty_zero_page[0] - KERNBASE);
>  
> +	__zero_page = phys_to_page(zero_page_pa);
> +}
> +
> +void __init mem_init(void)
> +{
>  	/*
>  	 * Must be done after boot memory is put on freelist, because here we
>  	 * might set fields in deferred struct pages that have not yet been
> @@ -2509,12 +2512,6 @@ void __init mem_init(void)
>  	 */
>  	register_page_bootmem_info();
>  
> -	/*
> -	 * Set up the zero page, mark it reserved, so that page count
> -	 * is not manipulated when freeing the page from user ptes.
> -	 */
> -	mem_map_zero = pfn_to_page(PHYS_PFN(zero_page_pa));
> -
>  	if (tlb_type == cheetah || tlb_type == cheetah_plus)
>  		cheetah_ecache_flush_init();
>  }
> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> index 3d48eea57cd2..1da21ec62836 100644
> --- a/include/linux/pgtable.h
> +++ b/include/linux/pgtable.h
> @@ -1894,6 +1894,8 @@ static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
>   * For architectures that don't __HAVE_COLOR_ZERO_PAGE the zero page lives in
>   * empty_zero_page in BSS.
>   */
> +void arch_setup_zero_pages(void);
> +
>  #ifdef __HAVE_COLOR_ZERO_PAGE
>  static inline int is_zero_pfn(unsigned long pfn)
>  {
> @@ -1921,10 +1923,13 @@ static inline unsigned long zero_pfn(unsigned long addr)
>  }
>  
>  extern uint8_t empty_zero_page[PAGE_SIZE];
> +extern struct page *__zero_page;
>  
> -#ifndef ZERO_PAGE
> -#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
> -#endif
> +static inline struct page *_zero_page(unsigned long addr)
> +{
> +	return __zero_page;
> +}
> +#define ZERO_PAGE(vaddr) _zero_page(vaddr)
>  
>  #endif /* __HAVE_COLOR_ZERO_PAGE */
>  
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index 1eac634ece1a..b08608c1b71d 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -59,7 +59,10 @@ EXPORT_SYMBOL(zero_page_pfn);
>  #ifndef __HAVE_COLOR_ZERO_PAGE
>  uint8_t empty_zero_page[PAGE_SIZE] __page_aligned_bss;
>  EXPORT_SYMBOL(empty_zero_page);
> -#endif
> +
> +struct page *__zero_page __ro_after_init;
> +EXPORT_SYMBOL(__zero_page);
> +#endif /* __HAVE_COLOR_ZERO_PAGE */
>  
>  #ifdef CONFIG_DEBUG_MEMORY_INIT
>  int __meminitdata mminit_loglevel;
> @@ -2675,12 +2678,21 @@ static void __init mem_init_print_info(void)
>  		);
>  }
>  
> -static int __init init_zero_page_pfn(void)
> +#ifndef __HAVE_COLOR_ZERO_PAGE
> +/*
> + * architectures that __HAVE_COLOR_ZERO_PAGE must define this function
> + */
> +void __init __weak arch_setup_zero_pages(void)
> +{
> +	__zero_page = virt_to_page(empty_zero_page);
> +}
> +#endif
> +
> +static void __init init_zero_page_pfn(void)
>  {
> +	arch_setup_zero_pages();
>  	zero_page_pfn = page_to_pfn(ZERO_PAGE(0));
> -	return 0;
>  }
> -early_initcall(init_zero_page_pfn);
>  
>  void __init __weak arch_mm_preinit(void)
>  {
> @@ -2704,6 +2716,7 @@ void __init mm_core_init_early(void)
>  void __init mm_core_init(void)
>  {
>  	arch_mm_preinit();
> +	init_zero_page_pfn();
>  
>  	/* Initializations relying on SMP setup */
>  	BUILD_BUG_ON(MAX_ZONELISTS > 2);
> -- 
> 2.51.0
> 


^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2026-02-12 18:41 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2026-02-11 10:31 [PATCH v3 0/4] arch, mm: consolidate empty_zero_page Mike Rapoport
2026-02-11 10:31 ` [PATCH v3 1/4] mm: don't special case !MMU for is_zero_pfn() and my_zero_pfn() Mike Rapoport
2026-02-12  8:58   ` David Hildenbrand (Arm)
2026-02-12 18:30   ` Liam R. Howlett
2026-02-11 10:31 ` [PATCH v3 2/4] mm: rename my_zero_pfn() to zero_pfn() Mike Rapoport
2026-02-12  9:01   ` David Hildenbrand (Arm)
2026-02-12 15:28   ` Vlastimil Babka
2026-02-12 18:33   ` Liam R. Howlett
2026-02-11 10:31 ` [PATCH v3 3/4] arch, mm: consolidate empty_zero_page Mike Rapoport
2026-02-11 20:14   ` Magnus Lindholm
2026-02-12  5:33   ` Dinh Nguyen
2026-02-12  8:38   ` Andreas Larsson
2026-02-12  9:04   ` David Hildenbrand (Arm)
2026-02-12 18:38   ` Liam R. Howlett
2026-02-11 10:31 ` [PATCH v3 4/4] mm: cache struct page for empty_zero_page and return it from ZERO_PAGE() Mike Rapoport
2026-02-12  9:08   ` David Hildenbrand (Arm)
2026-02-12 18:40   ` Liam R. Howlett

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox