From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: akpm@linuxfoundation.org, linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Subject: [PATCH v2 04/26] mm: Remove alloc_pages_vma()
Date: Wed, 4 May 2022 19:28:35 +0100 [thread overview]
Message-ID: <20220504182857.4013401-5-willy@infradead.org> (raw)
In-Reply-To: <20220504182857.4013401-1-willy@infradead.org>
All callers have now been converted to use vma_alloc_folio(), so
convert the body of alloc_pages_vma() to allocate folios instead.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/gfp.h | 18 +++++++---------
mm/mempolicy.c | 51 ++++++++++++++++++++++-----------------------
2 files changed, 32 insertions(+), 37 deletions(-)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 3e3d36fc2109..2a08a3c4ba95 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -613,13 +613,8 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
#ifdef CONFIG_NUMA
struct page *alloc_pages(gfp_t gfp, unsigned int order);
struct folio *folio_alloc(gfp_t gfp, unsigned order);
-struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
- struct vm_area_struct *vma, unsigned long addr,
- bool hugepage);
struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned long addr, bool hugepage);
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages_vma(gfp_mask, order, vma, addr, true)
#else
static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
{
@@ -629,16 +624,17 @@ static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
{
return __folio_alloc_node(gfp, order, numa_node_id());
}
-#define alloc_pages_vma(gfp_mask, order, vma, addr, hugepage) \
- alloc_pages(gfp_mask, order)
#define vma_alloc_folio(gfp, order, vma, addr, hugepage) \
folio_alloc(gfp, order)
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, false)
+static inline struct page *alloc_page_vma(gfp_t gfp,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ struct folio *folio = vma_alloc_folio(gfp, 0, vma, addr, false);
+
+ return &folio->page;
+}
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 8c74107a2b15..174efbee1cb5 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2135,44 +2135,55 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
}
/**
- * alloc_pages_vma - Allocate a page for a VMA.
+ * vma_alloc_folio - Allocate a folio for a VMA.
* @gfp: GFP flags.
- * @order: Order of the GFP allocation.
+ * @order: Order of the folio.
* @vma: Pointer to VMA or NULL if not available.
* @addr: Virtual address of the allocation. Must be inside @vma.
* @hugepage: For hugepages try only the preferred node if possible.
*
- * Allocate a page for a specific address in @vma, using the appropriate
+ * Allocate a folio for a specific address in @vma, using the appropriate
* NUMA policy. When @vma is not NULL the caller must hold the mmap_lock
* of the mm_struct of the VMA to prevent it from going away. Should be
- * used for all allocations for pages that will be mapped into user space.
+ * used for all allocations for folios that will be mapped into user space.
*
- * Return: The page on success or NULL if allocation fails.
+ * Return: The folio on success or NULL if allocation fails.
*/
-struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
+struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned long addr, bool hugepage)
{
struct mempolicy *pol;
int node = numa_node_id();
- struct page *page;
+ struct folio *folio;
int preferred_nid;
nodemask_t *nmask;
pol = get_vma_policy(vma, addr);
if (pol->mode == MPOL_INTERLEAVE) {
+ struct page *page;
unsigned nid;
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
mpol_cond_put(pol);
+ gfp |= __GFP_COMP;
page = alloc_page_interleave(gfp, order, nid);
+ if (page && order > 1)
+ prep_transhuge_page(page);
+ folio = (struct folio *)page;
goto out;
}
if (pol->mode == MPOL_PREFERRED_MANY) {
+ struct page *page;
+
node = policy_node(gfp, pol, node);
+ gfp |= __GFP_COMP;
page = alloc_pages_preferred_many(gfp, order, node, pol);
mpol_cond_put(pol);
+ if (page && order > 1)
+ prep_transhuge_page(page);
+ folio = (struct folio *)page;
goto out;
}
@@ -2199,8 +2210,8 @@ struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
* First, try to allocate THP only on local node, but
* don't reclaim unnecessarily, just compact.
*/
- page = __alloc_pages_node(hpage_node,
- gfp | __GFP_THISNODE | __GFP_NORETRY, order);
+ folio = __folio_alloc_node(gfp | __GFP_THISNODE |
+ __GFP_NORETRY, order, hpage_node);
/*
* If hugepage allocations are configured to always
@@ -2208,8 +2219,9 @@ struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
* to prefer hugepage backing, retry allowing remote
* memory with both reclaim and compact as well.
*/
- if (!page && (gfp & __GFP_DIRECT_RECLAIM))
- page = __alloc_pages(gfp, order, hpage_node, nmask);
+ if (!folio && (gfp & __GFP_DIRECT_RECLAIM))
+ folio = __folio_alloc(gfp, order, hpage_node,
+ nmask);
goto out;
}
@@ -2217,25 +2229,12 @@ struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
nmask = policy_nodemask(gfp, pol);
preferred_nid = policy_node(gfp, pol, node);
- page = __alloc_pages(gfp, order, preferred_nid, nmask);
+ folio = __folio_alloc(gfp, order, preferred_nid, nmask);
mpol_cond_put(pol);
out:
- return page;
-}
-EXPORT_SYMBOL(alloc_pages_vma);
-
-struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
- unsigned long addr, bool hugepage)
-{
- struct folio *folio;
-
- folio = (struct folio *)alloc_pages_vma(gfp, order, vma, addr,
- hugepage);
- if (folio && order > 1)
- prep_transhuge_page(&folio->page);
-
return folio;
}
+EXPORT_SYMBOL(vma_alloc_folio);
/**
* alloc_pages - Allocate pages.
--
2.34.1
next prev parent reply other threads:[~2022-05-04 18:29 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-05-04 18:28 [PATCH v2 00/26] Folio patches for 5.19 Matthew Wilcox (Oracle)
2022-05-04 18:28 ` [PATCH v2 01/26] shmem: Convert shmem_alloc_hugepage() to use vma_alloc_folio() Matthew Wilcox (Oracle)
2022-05-05 15:30 ` Christoph Hellwig
2022-05-05 17:29 ` Zi Yan
2022-05-04 18:28 ` [PATCH v2 02/26] mm/huge_memory: Convert do_huge_pmd_anonymous_page() " Matthew Wilcox (Oracle)
2022-05-05 15:31 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 03/26] alpha: Fix alloc_zeroed_user_highpage_movable() Matthew Wilcox (Oracle)
2022-05-05 15:31 ` Christoph Hellwig
2022-05-04 18:28 ` Matthew Wilcox (Oracle) [this message]
2022-05-05 15:34 ` [PATCH v2 04/26] mm: Remove alloc_pages_vma() Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 05/26] vmscan: Use folio_mapped() in shrink_page_list() Matthew Wilcox (Oracle)
2022-05-05 15:34 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 06/26] vmscan: Convert the writeback handling in shrink_page_list() to folios Matthew Wilcox (Oracle)
2022-05-05 15:35 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 07/26] swap: Turn get_swap_page() into folio_alloc_swap() Matthew Wilcox (Oracle)
2022-05-05 15:35 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 08/26] swap: Convert add_to_swap() to take a folio Matthew Wilcox (Oracle)
2022-05-05 15:35 ` Christoph Hellwig
2022-05-06 1:21 ` Andrew Morton
2022-05-06 1:39 ` Matthew Wilcox
2022-05-04 18:28 ` [PATCH v2 09/26] vmscan: Convert dirty page handling to folios Matthew Wilcox (Oracle)
2022-05-05 15:36 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 10/26] vmscan: Convert page buffer handling to use folios Matthew Wilcox (Oracle)
2022-05-05 15:36 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 11/26] vmscan: Convert lazy freeing to folios Matthew Wilcox (Oracle)
2022-05-05 15:37 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 12/26] vmscan: Move initialisation of mapping down Matthew Wilcox (Oracle)
2022-05-05 15:37 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 13/26] vmscan: Convert the activate_locked portion of shrink_page_list to folios Matthew Wilcox (Oracle)
2022-05-05 15:37 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 14/26] mm: Allow can_split_folio() to be called when THP are disabled Matthew Wilcox (Oracle)
2022-05-05 15:38 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 15/26] vmscan: Remove remaining uses of page in shrink_page_list Matthew Wilcox (Oracle)
2022-05-05 15:38 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 16/26] mm/shmem: Use a folio in shmem_unused_huge_shrink Matthew Wilcox (Oracle)
2022-05-05 15:38 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 17/26] mm/swap: Add folio_throttle_swaprate Matthew Wilcox (Oracle)
2022-05-05 15:39 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 18/26] mm/shmem: Convert shmem_add_to_page_cache to take a folio Matthew Wilcox (Oracle)
2022-05-05 15:39 ` Christoph Hellwig
2022-05-11 3:06 ` Mike Kravetz
2022-05-11 3:25 ` Matthew Wilcox
2022-05-04 18:28 ` [PATCH v2 19/26] mm/shmem: Turn shmem_should_replace_page into shmem_should_replace_folio Matthew Wilcox (Oracle)
2022-05-05 15:40 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 20/26] mm/shmem: Add shmem_alloc_folio() Matthew Wilcox (Oracle)
2022-05-05 15:40 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 21/26] mm/shmem: Convert shmem_alloc_and_acct_page to use a folio Matthew Wilcox (Oracle)
2022-05-05 15:40 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 22/26] mm/shmem: Convert shmem_getpage_gfp " Matthew Wilcox (Oracle)
2022-05-05 15:41 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 23/26] mm/shmem: Convert shmem_swapin_page() to shmem_swapin_folio() Matthew Wilcox (Oracle)
2022-05-05 15:41 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 24/26] mm: Add folio_mapping_flags() Matthew Wilcox (Oracle)
2022-05-05 15:41 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 25/26] mm: Add folio_test_movable() Matthew Wilcox (Oracle)
2022-05-05 15:42 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 26/26] mm/migrate: Convert move_to_new_page() into move_to_new_folio() Matthew Wilcox (Oracle)
2022-05-05 15:42 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220504182857.4013401-5-willy@infradead.org \
--to=willy@infradead.org \
--cc=akpm@linuxfoundation.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox