linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	William Kucharski <william.kucharski@oracle.com>
Subject: [PATCH v2 14/16] mm/mempolicy: Add alloc_frozen_pages()
Date: Tue,  9 Aug 2022 18:18:52 +0100	[thread overview]
Message-ID: <20220809171854.3725722-15-willy@infradead.org> (raw)
In-Reply-To: <20220809171854.3725722-1-willy@infradead.org>

Provide an interface to allocate pages from the page allocator without
incrementing their refcount.  This saves an atomic operation on free,
which may be beneficial to some users (eg slab).

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
---
 mm/internal.h  |  9 ++++++++
 mm/mempolicy.c | 61 +++++++++++++++++++++++++++++++-------------------
 2 files changed, 47 insertions(+), 23 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index 7e6079216a17..6f02bc32b406 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -367,6 +367,15 @@ struct page *__alloc_frozen_pages(gfp_t, unsigned int order, int nid,
 void free_frozen_pages(struct page *, unsigned int order);
 void free_unref_page_list(struct list_head *list);
 
+#ifdef CONFIG_NUMA
+struct page *alloc_frozen_pages(gfp_t, unsigned int order);
+#else
+static inline struct page *alloc_frozen_pages(gfp_t gfp, unsigned int order)
+{
+	return __alloc_frozen_pages(gfp, order, numa_node_id(), NULL);
+}
+#endif
+
 extern void zone_pcp_update(struct zone *zone, int cpu_online);
 extern void zone_pcp_reset(struct zone *zone);
 extern void zone_pcp_disable(struct zone *zone);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b73d3248d976..09ecc499d5fc 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2100,7 +2100,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
 {
 	struct page *page;
 
-	page = __alloc_pages(gfp, order, nid, NULL);
+	page = __alloc_frozen_pages(gfp, order, nid, NULL);
 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
 	if (!static_branch_likely(&vm_numa_stat_key))
 		return page;
@@ -2126,9 +2126,9 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
 	 */
 	preferred_gfp = gfp | __GFP_NOWARN;
 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
-	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
+	page = __alloc_frozen_pages(preferred_gfp, order, nid, &pol->nodes);
 	if (!page)
-		page = __alloc_pages(gfp, order, nid, NULL);
+		page = __alloc_frozen_pages(gfp, order, nid, NULL);
 
 	return page;
 }
@@ -2167,8 +2167,11 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
 		mpol_cond_put(pol);
 		gfp |= __GFP_COMP;
 		page = alloc_page_interleave(gfp, order, nid);
-		if (page && order > 1)
-			prep_transhuge_page(page);
+		if (page) {
+			set_page_refcounted(page);
+			if (order > 1)
+				prep_transhuge_page(page);
+		}
 		folio = (struct folio *)page;
 		goto out;
 	}
@@ -2180,8 +2183,11 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
 		gfp |= __GFP_COMP;
 		page = alloc_pages_preferred_many(gfp, order, node, pol);
 		mpol_cond_put(pol);
-		if (page && order > 1)
-			prep_transhuge_page(page);
+		if (page) {
+			set_page_refcounted(page);
+			if (order > 1)
+				prep_transhuge_page(page);
+		}
 		folio = (struct folio *)page;
 		goto out;
 	}
@@ -2235,21 +2241,7 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
 }
 EXPORT_SYMBOL(vma_alloc_folio);
 
-/**
- * alloc_pages - Allocate pages.
- * @gfp: GFP flags.
- * @order: Power of two of number of pages to allocate.
- *
- * Allocate 1 << @order contiguous pages.  The physical address of the
- * first page is naturally aligned (eg an order-3 allocation will be aligned
- * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
- * process is honoured when in process context.
- *
- * Context: Can be called from any context, providing the appropriate GFP
- * flags are used.
- * Return: The page on success or NULL if allocation fails.
- */
-struct page *alloc_pages(gfp_t gfp, unsigned order)
+struct page *alloc_frozen_pages(gfp_t gfp, unsigned order)
 {
 	struct mempolicy *pol = &default_policy;
 	struct page *page;
@@ -2267,12 +2259,35 @@ struct page *alloc_pages(gfp_t gfp, unsigned order)
 		page = alloc_pages_preferred_many(gfp, order,
 				  policy_node(gfp, pol, numa_node_id()), pol);
 	else
-		page = __alloc_pages(gfp, order,
+		page = __alloc_frozen_pages(gfp, order,
 				policy_node(gfp, pol, numa_node_id()),
 				policy_nodemask(gfp, pol));
 
 	return page;
 }
+
+/**
+ * alloc_pages - Allocate pages.
+ * @gfp: GFP flags.
+ * @order: Power of two of number of pages to allocate.
+ *
+ * Allocate 1 << @order contiguous pages.  The physical address of the
+ * first page is naturally aligned (eg an order-3 allocation will be aligned
+ * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
+ * process is honoured when in process context.
+ *
+ * Context: Can be called from any context, providing the appropriate GFP
+ * flags are used.
+ * Return: The page on success or NULL if allocation fails.
+ */
+struct page *alloc_pages(gfp_t gfp, unsigned order)
+{
+	struct page *page = alloc_frozen_pages(gfp, order);
+
+	if (page)
+		set_page_refcounted(page);
+	return page;
+}
 EXPORT_SYMBOL(alloc_pages);
 
 struct folio *folio_alloc(gfp_t gfp, unsigned order)
-- 
2.35.1



  parent reply	other threads:[~2022-08-09 17:19 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-09 17:18 [PATCH v2 00/16] Allocate and free frozen pages Matthew Wilcox (Oracle)
2022-08-09 17:18 ` [PATCH v2 01/16] mm/page_alloc: Cache page_zone() result in free_unref_page() Matthew Wilcox (Oracle)
2022-08-10  1:56   ` Miaohe Lin
2022-08-10  6:31   ` Muchun Song
2022-08-10 15:00   ` Mel Gorman
2022-08-09 17:18 ` [PATCH v2 02/16] mm/page_alloc: Rename free_the_page() to free_frozen_pages() Matthew Wilcox (Oracle)
2022-08-10  6:36   ` Muchun Song
2022-08-09 17:18 ` [PATCH v2 03/16] mm/page_alloc: Export free_frozen_pages() instead of free_unref_page() Matthew Wilcox (Oracle)
2022-08-10  3:00   ` Miaohe Lin
2022-08-10  6:37   ` Muchun Song
2022-08-09 17:18 ` [PATCH v2 04/16] mm/page_alloc: Move set_page_refcounted() to callers of post_alloc_hook() Matthew Wilcox (Oracle)
2022-08-10  3:30   ` Miaohe Lin
2022-08-09 17:18 ` [PATCH v2 05/16] mm/page_alloc: Move set_page_refcounted() to callers of prep_new_page() Matthew Wilcox (Oracle)
2022-08-09 17:18 ` [PATCH v2 06/16] mm/page_alloc: Move set_page_refcounted() to callers of get_page_from_freelist() Matthew Wilcox (Oracle)
2022-08-09 17:18 ` [PATCH v2 07/16] mm/page_alloc: Move set_page_refcounted() to callers of __alloc_pages_cpuset_fallback() Matthew Wilcox (Oracle)
2022-08-09 17:18 ` [PATCH v2 08/16] mm/page_alloc: Move set_page_refcounted() to callers of __alloc_pages_may_oom() Matthew Wilcox (Oracle)
2022-08-09 17:18 ` [PATCH v2 09/16] mm/page_alloc: Move set_page_refcounted() to callers of __alloc_pages_direct_compact() Matthew Wilcox (Oracle)
2022-08-09 17:18 ` [PATCH v2 10/16] mm/page_alloc: Move set_page_refcounted() to callers of __alloc_pages_direct_reclaim() Matthew Wilcox (Oracle)
2022-08-09 17:18 ` [PATCH v2 11/16] mm/page_alloc: Move set_page_refcounted() to callers of __alloc_pages_slowpath() Matthew Wilcox (Oracle)
2022-08-09 17:18 ` [PATCH v2 12/16] mm/page_alloc: Move set_page_refcounted() to end of __alloc_pages() Matthew Wilcox (Oracle)
2022-08-09 17:18 ` [PATCH v2 13/16] mm/page_alloc: Add __alloc_frozen_pages() Matthew Wilcox (Oracle)
2022-08-09 17:18 ` Matthew Wilcox (Oracle) [this message]
2022-08-09 17:18 ` [PATCH v2 15/16] slab: Allocate frozen pages Matthew Wilcox (Oracle)
2022-08-10 12:31   ` Vlastimil Babka
2022-08-10 16:27     ` Mel Gorman
2022-08-09 17:18 ` [PATCH v2 16/16] slub: " Matthew Wilcox (Oracle)
2022-08-11  0:19 ` [PATCH v2 00/16] Allocate and free " Shakeel Butt
2022-08-12  0:13   ` Matthew Wilcox
2022-08-12 16:48     ` Shakeel Butt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220809171854.3725722-15-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-mm@kvack.org \
    --cc=william.kucharski@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox