From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Subject: [PATCH 4/6] mm/page_alloc: Add alloc_frozen_pages()
Date: Tue, 31 May 2022 16:06:09 +0100 [thread overview]
Message-ID: <20220531150611.1303156-5-willy@infradead.org> (raw)
In-Reply-To: <20220531150611.1303156-1-willy@infradead.org>
Provide an interface to allocate pages from the page allocator without
incrementing their refcount. This saves an atomic operation on free,
which may be beneficial to some users (eg slab).
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/internal.h | 11 +++++++++
mm/mempolicy.c | 61 ++++++++++++++++++++++++++++++-------------------
mm/page_alloc.c | 18 +++++++++++----
3 files changed, 63 insertions(+), 27 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index f1c0dab2b98e..bf70ee2e38e9 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -362,9 +362,20 @@ extern void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags);
extern int user_min_free_kbytes;
+struct page *__alloc_frozen_pages(gfp_t, unsigned int order, int nid,
+ nodemask_t *);
void free_frozen_pages(struct page *, unsigned int order);
void free_unref_page_list(struct list_head *list);
+#ifdef CONFIG_NUMA
+struct page *alloc_frozen_pages(gfp_t, unsigned int order);
+#else
+static inline struct page *alloc_frozen_pages(gfp_t gfp, unsigned int order)
+{
+ return __alloc_frozen_pages(gfp, order, numa_node_id(), NULL);
+}
+#endif
+
extern void zone_pcp_update(struct zone *zone, int cpu_online);
extern void zone_pcp_reset(struct zone *zone);
extern void zone_pcp_disable(struct zone *zone);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d39b01fd52fe..ac7c45d0f7dc 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2102,7 +2102,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
{
struct page *page;
- page = __alloc_pages(gfp, order, nid, NULL);
+ page = __alloc_frozen_pages(gfp, order, nid, NULL);
/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
if (!static_branch_likely(&vm_numa_stat_key))
return page;
@@ -2128,9 +2128,9 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
*/
preferred_gfp = gfp | __GFP_NOWARN;
preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
- page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
+ page = __alloc_frozen_pages(preferred_gfp, order, nid, &pol->nodes);
if (!page)
- page = __alloc_pages(gfp, order, nid, NULL);
+ page = __alloc_frozen_pages(gfp, order, nid, NULL);
return page;
}
@@ -2169,8 +2169,11 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
mpol_cond_put(pol);
gfp |= __GFP_COMP;
page = alloc_page_interleave(gfp, order, nid);
- if (page && order > 1)
- prep_transhuge_page(page);
+ if (page) {
+ set_page_refcounted(page);
+ if (order > 1)
+ prep_transhuge_page(page);
+ }
folio = (struct folio *)page;
goto out;
}
@@ -2182,8 +2185,11 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
gfp |= __GFP_COMP;
page = alloc_pages_preferred_many(gfp, order, node, pol);
mpol_cond_put(pol);
- if (page && order > 1)
- prep_transhuge_page(page);
+ if (page) {
+ set_page_refcounted(page);
+ if (order > 1)
+ prep_transhuge_page(page);
+ }
folio = (struct folio *)page;
goto out;
}
@@ -2237,21 +2243,7 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
}
EXPORT_SYMBOL(vma_alloc_folio);
-/**
- * alloc_pages - Allocate pages.
- * @gfp: GFP flags.
- * @order: Power of two of number of pages to allocate.
- *
- * Allocate 1 << @order contiguous pages. The physical address of the
- * first page is naturally aligned (eg an order-3 allocation will be aligned
- * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current
- * process is honoured when in process context.
- *
- * Context: Can be called from any context, providing the appropriate GFP
- * flags are used.
- * Return: The page on success or NULL if allocation fails.
- */
-struct page *alloc_pages(gfp_t gfp, unsigned order)
+struct page *alloc_frozen_pages(gfp_t gfp, unsigned order)
{
struct mempolicy *pol = &default_policy;
struct page *page;
@@ -2269,12 +2261,35 @@ struct page *alloc_pages(gfp_t gfp, unsigned order)
page = alloc_pages_preferred_many(gfp, order,
policy_node(gfp, pol, numa_node_id()), pol);
else
- page = __alloc_pages(gfp, order,
+ page = __alloc_frozen_pages(gfp, order,
policy_node(gfp, pol, numa_node_id()),
policy_nodemask(gfp, pol));
return page;
}
+
+/**
+ * alloc_pages - Allocate pages.
+ * @gfp: GFP flags.
+ * @order: Power of two of number of pages to allocate.
+ *
+ * Allocate 1 << @order contiguous pages. The physical address of the
+ * first page is naturally aligned (eg an order-3 allocation will be aligned
+ * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current
+ * process is honoured when in process context.
+ *
+ * Context: Can be called from any context, providing the appropriate GFP
+ * flags are used.
+ * Return: The page on success or NULL if allocation fails.
+ */
+struct page *alloc_pages(gfp_t gfp, unsigned order)
+{
+ struct page *page = alloc_frozen_pages(gfp, order);
+
+ if (page)
+ set_page_refcounted(page);
+ return page;
+}
EXPORT_SYMBOL(alloc_pages);
struct folio *folio_alloc(gfp_t gfp, unsigned order)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 825922000781..49d8f04d14ef 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2390,7 +2390,6 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
set_page_private(page, 0);
- set_page_refcounted(page);
arch_alloc_page(page, order);
debug_pagealloc_map_pages(page, 1 << order);
@@ -5386,8 +5385,8 @@ EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
/*
* This is the 'heart' of the zoned buddy allocator.
*/
-struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
- nodemask_t *nodemask)
+struct page *__alloc_frozen_pages(gfp_t gfp, unsigned int order,
+ int preferred_nid, nodemask_t *nodemask)
{
struct page *page;
unsigned int alloc_flags = ALLOC_WMARK_LOW;
@@ -5440,7 +5439,7 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
out:
if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
- __free_pages(page, order);
+ free_frozen_pages(page, order);
page = NULL;
}
@@ -5448,6 +5447,17 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
return page;
}
+
+struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
+ nodemask_t *nodemask)
+{
+ struct page *page;
+
+ page = __alloc_frozen_pages(gfp, order, preferred_nid, nodemask);
+ if (page)
+ set_page_refcounted(page);
+ return page;
+}
EXPORT_SYMBOL(__alloc_pages);
struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
--
2.34.1
next prev parent reply other threads:[~2022-05-31 15:06 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-05-31 15:06 [PATCH 0/6] Allocate and free frozen pages Matthew Wilcox (Oracle)
2022-05-31 15:06 ` [PATCH 1/6] mm/page_alloc: Remove zone parameter from free_one_page() Matthew Wilcox (Oracle)
2022-05-31 16:59 ` David Hildenbrand
2022-06-01 6:53 ` Miaohe Lin
2022-05-31 15:06 ` [PATCH 2/6] mm/page_alloc: Rename free_the_page() to free_frozen_pages() Matthew Wilcox (Oracle)
2022-05-31 17:02 ` David Hildenbrand
2022-06-01 6:58 ` Miaohe Lin
2022-06-01 12:23 ` Matthew Wilcox
2022-06-02 7:45 ` Miaohe Lin
2022-05-31 15:06 ` [PATCH 3/6] mm/page_alloc: Export free_frozen_pages() instead of free_unref_page() Matthew Wilcox (Oracle)
2022-05-31 17:09 ` David Hildenbrand
2022-05-31 17:11 ` Matthew Wilcox
2022-05-31 15:06 ` Matthew Wilcox (Oracle) [this message]
2022-05-31 15:06 ` [PATCH 5/6] slab: Allocate frozen pages Matthew Wilcox (Oracle)
2022-05-31 17:15 ` David Hildenbrand
2022-05-31 17:33 ` Matthew Wilcox
2022-06-01 12:14 ` David Hildenbrand
2022-08-09 10:37 ` Vlastimil Babka (SUSE)
2022-05-31 15:06 ` [PATCH 6/6] slub: " Matthew Wilcox (Oracle)
2022-06-01 3:31 ` [PATCH 0/6] Allocate and free " William Kucharski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220531150611.1303156-5-willy@infradead.org \
--to=willy@infradead.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox