linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Ryan Roberts <ryan.roberts@arm.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	"Matthew Wilcox (Oracle)" <willy@infradead.org>,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
	Yin Fengwei <fengwei.yin@intel.com>,
	David Hildenbrand <david@redhat.com>, Yu Zhao <yuzhao@google.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>,
	Geert Uytterhoeven <geert@linux-m68k.org>,
	Christian Borntraeger <borntraeger@linux.ibm.com>,
	Sven Schnelle <svens@linux.ibm.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	"H. Peter Anvin" <hpa@zytor.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	linux-alpha@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, linux-ia64@vger.kernel.org,
	linux-m68k@lists.linux-m68k.org, linux-s390@vger.kernel.org
Subject: [PATCH v1 02/10] mm: pass gfp flags and order to vma_alloc_zeroed_movable_folio()
Date: Mon, 26 Jun 2023 18:14:22 +0100	[thread overview]
Message-ID: <20230626171430.3167004-3-ryan.roberts@arm.com> (raw)
In-Reply-To: <20230626171430.3167004-1-ryan.roberts@arm.com>

Allow allocation of large folios with vma_alloc_zeroed_movable_folio().
This prepares the ground for large anonymous folios. The generic
implementation of vma_alloc_zeroed_movable_folio() now uses
clear_huge_page() to zero the allocated folio since it may now be a
non-0 order.

Currently the function is always called with order 0 and no extra gfp
flags, so no functional change intended. But a subsequent commit will
take advantage of the new parameters to allocate large folios. The extra
gfp flags will be used to control the reclaim policy.

Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
---
 arch/alpha/include/asm/page.h   |  5 +++--
 arch/arm64/include/asm/page.h   |  3 ++-
 arch/arm64/mm/fault.c           |  7 ++++---
 arch/ia64/include/asm/page.h    |  5 +++--
 arch/m68k/include/asm/page_no.h |  7 ++++---
 arch/s390/include/asm/page.h    |  5 +++--
 arch/x86/include/asm/page.h     |  5 +++--
 include/linux/highmem.h         | 23 +++++++++++++----------
 mm/memory.c                     |  5 +++--
 9 files changed, 38 insertions(+), 27 deletions(-)

diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h
index 4db1ebc0ed99..6fc7fe91b6cb 100644
--- a/arch/alpha/include/asm/page.h
+++ b/arch/alpha/include/asm/page.h
@@ -17,8 +17,9 @@
 extern void clear_page(void *page);
 #define clear_user_page(page, vaddr, pg)	clear_page(page)
 
-#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
-	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
+#define vma_alloc_zeroed_movable_folio(vma, vaddr, gfp, order) \
+	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO | (gfp), \
+			order, vma, vaddr, false)
 
 extern void copy_page(void * _to, void * _from);
 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 2312e6ee595f..47710852f872 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -30,7 +30,8 @@ void copy_highpage(struct page *to, struct page *from);
 #define __HAVE_ARCH_COPY_HIGHPAGE
 
 struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
-						unsigned long vaddr);
+						unsigned long vaddr,
+						gfp_t gfp, int order);
 #define vma_alloc_zeroed_movable_folio vma_alloc_zeroed_movable_folio
 
 void tag_clear_highpage(struct page *to);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 6045a5117ac1..0a43c3b3f190 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -961,9 +961,10 @@ NOKPROBE_SYMBOL(do_debug_exception);
  * Used during anonymous page fault handling.
  */
 struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
-						unsigned long vaddr)
+						unsigned long vaddr,
+						gfp_t gfp, int order)
 {
-	gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;
+	gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO | gfp;
 
 	/*
 	 * If the page is mapped with PROT_MTE, initialise the tags at the
@@ -973,7 +974,7 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
 	if (vma->vm_flags & VM_MTE)
 		flags |= __GFP_ZEROTAGS;
 
-	return vma_alloc_folio(flags, 0, vma, vaddr, false);
+	return vma_alloc_folio(flags, order, vma, vaddr, false);
 }
 
 void tag_clear_highpage(struct page *page)
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
index 310b09c3342d..ebdf04274023 100644
--- a/arch/ia64/include/asm/page.h
+++ b/arch/ia64/include/asm/page.h
@@ -82,10 +82,11 @@ do {						\
 } while (0)
 
 
-#define vma_alloc_zeroed_movable_folio(vma, vaddr)			\
+#define vma_alloc_zeroed_movable_folio(vma, vaddr, gfp, order)		\
 ({									\
 	struct folio *folio = vma_alloc_folio(				\
-		GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false); \
+		GFP_HIGHUSER_MOVABLE | __GFP_ZERO | (gfp),		\
+		order, vma, vaddr, false);				\
 	if (folio)							\
 		flush_dcache_folio(folio);				\
 	folio;								\
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index 060e4c0e7605..4a2fe57fef5e 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -3,7 +3,7 @@
 #define _M68K_PAGE_NO_H
 
 #ifndef __ASSEMBLY__
- 
+
 extern unsigned long memory_start;
 extern unsigned long memory_end;
 
@@ -13,8 +13,9 @@ extern unsigned long memory_end;
 #define clear_user_page(page, vaddr, pg)	clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
 
-#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
-	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
+#define vma_alloc_zeroed_movable_folio(vma, vaddr, gfp, order) \
+	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO | (gfp), \
+			order, vma, vaddr, false)
 
 #define __pa(vaddr)		((unsigned long)(vaddr))
 #define __va(paddr)		((void *)((unsigned long)(paddr)))
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 8a2a3b5d1e29..b749564140f1 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -73,8 +73,9 @@ static inline void copy_page(void *to, void *from)
 #define clear_user_page(page, vaddr, pg)	clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
 
-#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
-	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
+#define vma_alloc_zeroed_movable_folio(vma, vaddr, gfp, order) \
+	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO | (gfp), \
+			order, vma, vaddr, false)
 
 /*
  * These are used to make use of C type-checking..
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index d18e5c332cb9..34deab1a8dae 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -34,8 +34,9 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
 	copy_page(to, from);
 }
 
-#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
-	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
+#define vma_alloc_zeroed_movable_folio(vma, vaddr, gfp, order) \
+	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO | (gfp), \
+			order, vma, vaddr, false)
 
 #ifndef __pa
 #define __pa(x)		__phys_addr((unsigned long)(x))
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 4de1dbcd3ef6..b9a9b0340557 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -209,26 +209,29 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
 
 #ifndef vma_alloc_zeroed_movable_folio
 /**
- * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
- * @vma: The VMA the page is to be allocated for.
- * @vaddr: The virtual address the page will be inserted into.
- *
- * This function will allocate a page suitable for inserting into this
- * VMA at this virtual address.  It may be allocated from highmem or
+ * vma_alloc_zeroed_movable_folio - Allocate a zeroed folio for a VMA.
+ * @vma: The start VMA the folio is to be allocated for.
+ * @vaddr: The virtual address the folio will be inserted into.
+ * @gfp: Additional gfp falgs to mix in or 0.
+ * @order: The order of the folio (2^order pages).
+ *
+ * This function will allocate a folio suitable for inserting into this
+ * VMA starting at this virtual address.  It may be allocated from highmem or
  * the movable zone.  An architecture may provide its own implementation.
  *
- * Return: A folio containing one allocated and zeroed page or NULL if
+ * Return: A folio containing 2^order allocated and zeroed pages or NULL if
  * we are out of memory.
  */
 static inline
 struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
-				   unsigned long vaddr)
+				   unsigned long vaddr, gfp_t gfp, int order)
 {
 	struct folio *folio;
 
-	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
+	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE | gfp,
+					order, vma, vaddr, false);
 	if (folio)
-		clear_user_highpage(&folio->page, vaddr);
+		clear_huge_page(&folio->page, vaddr, 1U << order);
 
 	return folio;
 }
diff --git a/mm/memory.c b/mm/memory.c
index 3d4ea668c4d1..367bbbb29d91 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3073,7 +3073,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 		goto oom;
 
 	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
-		new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
+		new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address,
+									0, 0);
 		if (!new_folio)
 			goto oom;
 	} else {
@@ -4087,7 +4088,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 	/* Allocate our own private page. */
 	if (unlikely(anon_vma_prepare(vma)))
 		goto oom;
-	folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
+	folio = vma_alloc_zeroed_movable_folio(vma, vmf->address, 0, 0);
 	if (!folio)
 		goto oom;
 
-- 
2.25.1



  parent reply	other threads:[~2023-06-26 17:14 UTC|newest]

Thread overview: 54+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-06-26 17:14 [PATCH v1 00/10] variable-order, large folios for anonymous memory Ryan Roberts
2023-06-26 17:14 ` [PATCH v1 01/10] mm: Expose clear_huge_page() unconditionally Ryan Roberts
2023-06-27  1:55   ` Yu Zhao
2023-06-27  7:21     ` Ryan Roberts
2023-06-27  8:29       ` Yu Zhao
2023-06-27  9:41         ` Ryan Roberts
2023-06-27 18:26           ` Yu Zhao
2023-06-28 10:56             ` Ryan Roberts
2023-06-26 17:14 ` Ryan Roberts [this message]
2023-06-27  2:27   ` [PATCH v1 02/10] mm: pass gfp flags and order to vma_alloc_zeroed_movable_folio() Yu Zhao
2023-06-27  7:27     ` Ryan Roberts
2023-06-26 17:14 ` [PATCH v1 03/10] mm: Introduce try_vma_alloc_movable_folio() Ryan Roberts
2023-06-27  2:34   ` Yu Zhao
2023-06-27  5:29     ` Yu Zhao
2023-06-27  7:56       ` Ryan Roberts
2023-06-28  2:32         ` Yin Fengwei
2023-06-28 11:06           ` Ryan Roberts
2023-06-26 17:14 ` [PATCH v1 04/10] mm: Implement folio_add_new_anon_rmap_range() Ryan Roberts
2023-06-27  7:08   ` Yu Zhao
2023-06-27  8:09     ` Ryan Roberts
2023-06-28  2:20       ` Yin Fengwei
2023-06-28 11:09         ` Ryan Roberts
2023-06-28  2:17     ` Yin Fengwei
2023-06-26 17:14 ` [PATCH v1 05/10] mm: Implement folio_remove_rmap_range() Ryan Roberts
2023-06-27  3:06   ` Yu Zhao
2023-06-26 17:14 ` [PATCH v1 06/10] mm: Allow deferred splitting of arbitrary large anon folios Ryan Roberts
2023-06-27  2:54   ` Yu Zhao
2023-06-28  2:43   ` Yin Fengwei
2023-06-26 17:14 ` [PATCH v1 07/10] mm: Batch-zap large anonymous folio PTE mappings Ryan Roberts
2023-06-27  3:04   ` Yu Zhao
2023-06-27  9:46     ` Ryan Roberts
2023-06-26 17:14 ` [PATCH v1 08/10] mm: Kconfig hooks to determine max anon folio allocation order Ryan Roberts
2023-06-27  2:47   ` Yu Zhao
2023-06-27  9:54     ` Ryan Roberts
2023-06-29  1:38   ` Yang Shi
2023-06-29 11:31     ` Ryan Roberts
2023-06-26 17:14 ` [PATCH v1 09/10] arm64: mm: Declare support for large anonymous folios Ryan Roberts
2023-06-27  2:53   ` Yu Zhao
2023-06-26 17:14 ` [PATCH v1 10/10] mm: Allocate large folios for anonymous memory Ryan Roberts
2023-06-27  3:01   ` Yu Zhao
2023-06-27  9:57     ` Ryan Roberts
2023-06-27 18:33       ` Yu Zhao
2023-06-29  2:13   ` Yang Shi
2023-06-29 11:30     ` Ryan Roberts
2023-06-29 17:05       ` Yang Shi
2023-06-27  3:30 ` [PATCH v1 00/10] variable-order, " Yu Zhao
2023-06-27  7:49   ` Yu Zhao
2023-06-27  9:59     ` Ryan Roberts
2023-06-28 18:22       ` Yu Zhao
2023-06-28 23:59         ` Yin Fengwei
2023-06-29  0:27           ` Yu Zhao
2023-06-29  0:31             ` Yin Fengwei
2023-06-29 15:28         ` Ryan Roberts
2023-06-29  2:21     ` Yang Shi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230626171430.3167004-3-ryan.roberts@arm.com \
    --to=ryan.roberts@arm.com \
    --cc=akpm@linux-foundation.org \
    --cc=borntraeger@linux.ibm.com \
    --cc=bp@alien8.de \
    --cc=catalin.marinas@arm.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=david@redhat.com \
    --cc=fengwei.yin@intel.com \
    --cc=geert@linux-m68k.org \
    --cc=hpa@zytor.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linux-alpha@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-ia64@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-m68k@lists.linux-m68k.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=svens@linux.ibm.com \
    --cc=tglx@linutronix.de \
    --cc=will@kernel.org \
    --cc=willy@infradead.org \
    --cc=yuzhao@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox