linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] mm: alloc_pages_bulk: small API refactor
@ 2024-12-17 16:31 Luiz Capitulino
  2024-12-17 16:31 ` [PATCH 1/2] mm: alloc_pages_bulk_noprof: drop page_list argument Luiz Capitulino
  2024-12-17 16:31 ` [PATCH 2/2] mm: alloc_pages_bulk: rename API Luiz Capitulino
  0 siblings, 2 replies; 6+ messages in thread
From: Luiz Capitulino @ 2024-12-17 16:31 UTC (permalink / raw)
  To: linux-mm, mgorman, willy; +Cc: david, linux-kernel, lcapitulino

Hi,

Today, alloc_pages_bulk_noprof() supports two arguments to return allocated
pages: a linked list and an array. There are also higher level APIs for both.

However, the linked list API has apparently never been used. So, this series
removes it along with the list API and also refactors the remaining API naming
for consistency.

I've boot-tested this series on arm64 and built-tested it on x86.

PS: Matthew, it was easier to keep my patch instead of fixing up Mel's but I
included the API refactoring patch as well.

PPS: It's probably good to have a free_pages_bulk() function, but I'll leave
this for another day.

Luiz Capitulino (2):
  mm: alloc_pages_bulk_noprof: drop page_list argument
  mm: alloc_pages_bulk: rename API

 .../staging/media/atomisp/pci/hmm/hmm_bo.c    | 12 +++---
 drivers/vfio/pci/mlx5/cmd.c                   | 14 +++----
 drivers/vfio/pci/virtio/migrate.c             |  6 +--
 fs/btrfs/extent_io.c                          |  2 +-
 fs/erofs/zutil.c                              |  4 +-
 fs/splice.c                                   |  2 +-
 fs/xfs/xfs_buf.c                              |  4 +-
 include/linux/gfp.h                           | 22 +++++------
 kernel/bpf/arena.c                            |  2 +-
 lib/alloc_tag.c                               |  4 +-
 lib/kunit_iov_iter.c                          |  2 +-
 lib/test_vmalloc.c                            |  2 +-
 mm/mempolicy.c                                | 28 ++++++-------
 mm/page_alloc.c                               | 39 ++++++-------------
 mm/vmalloc.c                                  |  4 +-
 net/core/page_pool.c                          |  7 ++--
 net/sunrpc/svc.c                              |  4 +-
 net/sunrpc/svc_xprt.c                         |  3 +-
 18 files changed, 70 insertions(+), 91 deletions(-)

-- 
2.47.1



^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/2] mm: alloc_pages_bulk_noprof: drop page_list argument
  2024-12-17 16:31 [PATCH 0/2] mm: alloc_pages_bulk: small API refactor Luiz Capitulino
@ 2024-12-17 16:31 ` Luiz Capitulino
  2024-12-19 13:24   ` David Hildenbrand
  2024-12-17 16:31 ` [PATCH 2/2] mm: alloc_pages_bulk: rename API Luiz Capitulino
  1 sibling, 1 reply; 6+ messages in thread
From: Luiz Capitulino @ 2024-12-17 16:31 UTC (permalink / raw)
  To: linux-mm, mgorman, willy; +Cc: david, linux-kernel, lcapitulino

The commit 387ba26fb1cb added __alloc_pages_bulk() along with the page_list
argument. The next commit 0f87d9d30f21 added the array-based argument. As
it turns out, the page_list argument has no users in the current tree (if it
ever had any). Dropping it allows for a slight simplification and eliminates
some unnecessary checks, now that page_array is required.

Signed-off-by: Luiz Capitulino <luizcap@redhat.com>
---
 include/linux/gfp.h |  8 ++------
 mm/mempolicy.c      | 14 +++++++-------
 mm/page_alloc.c     | 39 ++++++++++++---------------------------
 3 files changed, 21 insertions(+), 40 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b0fe9f62d15b6..eebed36443b35 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -212,7 +212,6 @@ struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_
 
 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 				nodemask_t *nodemask, int nr_pages,
-				struct list_head *page_list,
 				struct page **page_array);
 #define __alloc_pages_bulk(...)			alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
 
@@ -223,11 +222,8 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
 	alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__))
 
 /* Bulk allocate order-0 pages */
-#define alloc_pages_bulk_list(_gfp, _nr_pages, _list)			\
-	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL)
-
 #define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array)		\
-	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array)
+	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
 
 static inline unsigned long
 alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
@@ -236,7 +232,7 @@ alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
 	if (nid == NUMA_NO_NODE)
 		nid = numa_mem_id();
 
-	return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array);
+	return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
 }
 
 #define alloc_pages_bulk_array_node(...)				\
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 04f35659717ae..42a7b07ccc15a 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2375,13 +2375,13 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
 		if (delta) {
 			nr_allocated = alloc_pages_bulk_noprof(gfp,
 					interleave_nodes(pol), NULL,
-					nr_pages_per_node + 1, NULL,
+					nr_pages_per_node + 1,
 					page_array);
 			delta--;
 		} else {
 			nr_allocated = alloc_pages_bulk_noprof(gfp,
 					interleave_nodes(pol), NULL,
-					nr_pages_per_node, NULL, page_array);
+					nr_pages_per_node, page_array);
 		}
 
 		page_array += nr_allocated;
@@ -2430,7 +2430,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
 	if (weight && node_isset(node, nodes)) {
 		node_pages = min(rem_pages, weight);
 		nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
-						  NULL, page_array);
+						  page_array);
 		page_array += nr_allocated;
 		total_allocated += nr_allocated;
 		/* if that's all the pages, no need to interleave */
@@ -2493,7 +2493,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
 		if (!node_pages)
 			break;
 		nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
-						  NULL, page_array);
+						  page_array);
 		page_array += nr_allocated;
 		total_allocated += nr_allocated;
 		if (total_allocated == nr_pages)
@@ -2517,11 +2517,11 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
 
 	nr_allocated  = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes,
-					   nr_pages, NULL, page_array);
+					   nr_pages, page_array);
 
 	if (nr_allocated < nr_pages)
 		nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL,
-				nr_pages - nr_allocated, NULL,
+				nr_pages - nr_allocated,
 				page_array + nr_allocated);
 	return nr_allocated;
 }
@@ -2557,7 +2557,7 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
 	nid = numa_node_id();
 	nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
 	return alloc_pages_bulk_noprof(gfp, nid, nodemask,
-				       nr_pages, NULL, page_array);
+				       nr_pages, page_array);
 }
 
 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1cb4b8c8886d8..3ef6d902e2fea 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4529,28 +4529,23 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
 }
 
 /*
- * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
+ * __alloc_pages_bulk - Allocate a number of order-0 pages to an array
  * @gfp: GFP flags for the allocation
  * @preferred_nid: The preferred NUMA node ID to allocate from
  * @nodemask: Set of nodes to allocate from, may be NULL
- * @nr_pages: The number of pages desired on the list or array
- * @page_list: Optional list to store the allocated pages
- * @page_array: Optional array to store the pages
+ * @nr_pages: The number of pages desired in the array
+ * @page_array: Array to store the pages
  *
  * This is a batched version of the page allocator that attempts to
- * allocate nr_pages quickly. Pages are added to page_list if page_list
- * is not NULL, otherwise it is assumed that the page_array is valid.
+ * allocate nr_pages quickly. Pages are added to the page_array.
  *
- * For lists, nr_pages is the number of pages that should be allocated.
- *
- * For arrays, only NULL elements are populated with pages and nr_pages
+ * Note that only NULL elements are populated with pages and nr_pages
  * is the maximum number of pages that will be stored in the array.
  *
- * Returns the number of pages on the list or array.
+ * Returns the number of pages in the array.
  */
 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 			nodemask_t *nodemask, int nr_pages,
-			struct list_head *page_list,
 			struct page **page_array)
 {
 	struct page *page;
@@ -4568,7 +4563,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 	 * Skip populated array elements to determine if any pages need
 	 * to be allocated before disabling IRQs.
 	 */
-	while (page_array && nr_populated < nr_pages && page_array[nr_populated])
+	while (nr_populated < nr_pages && page_array[nr_populated])
 		nr_populated++;
 
 	/* No pages requested? */
@@ -4576,7 +4571,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 		goto out;
 
 	/* Already populated array? */
-	if (unlikely(page_array && nr_pages - nr_populated == 0))
+	if (unlikely(nr_pages - nr_populated == 0))
 		goto out;
 
 	/* Bulk allocator does not support memcg accounting. */
@@ -4658,7 +4653,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 	while (nr_populated < nr_pages) {
 
 		/* Skip existing pages */
-		if (page_array && page_array[nr_populated]) {
+		if (page_array[nr_populated]) {
 			nr_populated++;
 			continue;
 		}
@@ -4676,11 +4671,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 		nr_account++;
 
 		prep_new_page(page, 0, gfp, 0);
-		if (page_list)
-			list_add(&page->lru, page_list);
-		else
-			page_array[nr_populated] = page;
-		nr_populated++;
+		page_array[nr_populated++] = page;
 	}
 
 	pcp_spin_unlock(pcp);
@@ -4697,14 +4688,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 
 failed:
 	page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask);
-	if (page) {
-		if (page_list)
-			list_add(&page->lru, page_list);
-		else
-			page_array[nr_populated] = page;
-		nr_populated++;
-	}
-
+	if (page)
+		page_array[nr_populated++] = page;
 	goto out;
 }
 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
-- 
2.47.1



^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 2/2] mm: alloc_pages_bulk: rename API
  2024-12-17 16:31 [PATCH 0/2] mm: alloc_pages_bulk: small API refactor Luiz Capitulino
  2024-12-17 16:31 ` [PATCH 1/2] mm: alloc_pages_bulk_noprof: drop page_list argument Luiz Capitulino
@ 2024-12-17 16:31 ` Luiz Capitulino
  2024-12-19 13:26   ` David Hildenbrand
  1 sibling, 1 reply; 6+ messages in thread
From: Luiz Capitulino @ 2024-12-17 16:31 UTC (permalink / raw)
  To: linux-mm, mgorman, willy; +Cc: david, linux-kernel, lcapitulino

The previous commit removed the page_list argument from
alloc_pages_bulk_noprof() along with the alloc_pages_bulk_list() function.

Now that only the *_array() flavour of the API remains, we can do the
following renaming (along with the _noprof() ones):

  alloc_pages_bulk_array -> alloc_pages_bulk
  alloc_pages_bulk_array_mempolicy -> alloc_pages_bulk_mempolicy
  alloc_pages_bulk_array_node -> alloc_pages_bulk_node

Signed-off-by: Luiz Capitulino <luizcap@redhat.com>
---
 drivers/staging/media/atomisp/pci/hmm/hmm_bo.c | 12 ++++++------
 drivers/vfio/pci/mlx5/cmd.c                    | 14 +++++++-------
 drivers/vfio/pci/virtio/migrate.c              |  6 +++---
 fs/btrfs/extent_io.c                           |  2 +-
 fs/erofs/zutil.c                               |  4 ++--
 fs/splice.c                                    |  2 +-
 fs/xfs/xfs_buf.c                               |  4 ++--
 include/linux/gfp.h                            | 14 +++++++-------
 kernel/bpf/arena.c                             |  2 +-
 lib/alloc_tag.c                                |  4 ++--
 lib/kunit_iov_iter.c                           |  2 +-
 lib/test_vmalloc.c                             |  2 +-
 mm/mempolicy.c                                 | 14 +++++++-------
 mm/vmalloc.c                                   |  4 ++--
 net/core/page_pool.c                           |  7 +++----
 net/sunrpc/svc.c                               |  4 ++--
 net/sunrpc/svc_xprt.c                          |  3 +--
 17 files changed, 49 insertions(+), 51 deletions(-)

diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
index 07ed33464d711..b191953eaa941 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
@@ -604,7 +604,7 @@ struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
 	return bo;
 }
 
-static void free_pages_bulk_array(unsigned long nr_pages, struct page **page_array)
+static void free_pages_bulk(unsigned long nr_pages, struct page **page_array)
 {
 	unsigned long i;
 
@@ -615,7 +615,7 @@ static void free_pages_bulk_array(unsigned long nr_pages, struct page **page_arr
 static void free_private_bo_pages(struct hmm_buffer_object *bo)
 {
 	set_pages_array_wb(bo->pages, bo->pgnr);
-	free_pages_bulk_array(bo->pgnr, bo->pages);
+	free_pages_bulk(bo->pgnr, bo->pages);
 }
 
 /*Allocate pages which will be used only by ISP*/
@@ -624,17 +624,17 @@ static int alloc_private_pages(struct hmm_buffer_object *bo)
 	const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS;
 	int ret;
 
-	ret = alloc_pages_bulk_array(gfp, bo->pgnr, bo->pages);
+	ret = alloc_pages_bulk(gfp, bo->pgnr, bo->pages);
 	if (ret != bo->pgnr) {
-		free_pages_bulk_array(ret, bo->pages);
-		dev_err(atomisp_dev, "alloc_pages_bulk_array() failed\n");
+		free_pages_bulk(ret, bo->pages);
+		dev_err(atomisp_dev, "alloc_pages_bulk() failed\n");
 		return -ENOMEM;
 	}
 
 	ret = set_pages_array_uc(bo->pages, bo->pgnr);
 	if (ret) {
 		dev_err(atomisp_dev, "set pages uncacheable failed.\n");
-		free_pages_bulk_array(bo->pgnr, bo->pages);
+		free_pages_bulk(bo->pgnr, bo->pages);
 		return ret;
 	}
 
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index eb7387ee6ebd1..11eda6b207f13 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -408,7 +408,7 @@ void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf)
 				  buf->dma_dir, 0);
 	}
 
-	/* Undo alloc_pages_bulk_array() */
+	/* Undo alloc_pages_bulk() */
 	for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
 		__free_page(sg_page_iter_page(&sg_iter));
 	sg_free_append_table(&buf->table);
@@ -431,8 +431,8 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
 		return -ENOMEM;
 
 	do {
-		filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
-						page_list);
+		filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, to_fill,
+					  page_list);
 		if (!filled) {
 			ret = -ENOMEM;
 			goto err;
@@ -1342,7 +1342,7 @@ static void free_recv_pages(struct mlx5_vhca_recv_buf *recv_buf)
 {
 	int i;
 
-	/* Undo alloc_pages_bulk_array() */
+	/* Undo alloc_pages_bulk() */
 	for (i = 0; i < recv_buf->npages; i++)
 		__free_page(recv_buf->page_list[i]);
 
@@ -1361,9 +1361,9 @@ static int alloc_recv_pages(struct mlx5_vhca_recv_buf *recv_buf,
 		return -ENOMEM;
 
 	for (;;) {
-		filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT,
-						npages - done,
-						recv_buf->page_list + done);
+		filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT,
+					  npages - done,
+					  recv_buf->page_list + done);
 		if (!filled)
 			goto err;
 
diff --git a/drivers/vfio/pci/virtio/migrate.c b/drivers/vfio/pci/virtio/migrate.c
index ee54f4c178577..ba92bb4e9af94 100644
--- a/drivers/vfio/pci/virtio/migrate.c
+++ b/drivers/vfio/pci/virtio/migrate.c
@@ -77,8 +77,8 @@ static int virtiovf_add_migration_pages(struct virtiovf_data_buffer *buf,
 		return -ENOMEM;
 
 	do {
-		filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
-						page_list);
+		filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, to_fill,
+					  page_list);
 		if (!filled) {
 			ret = -ENOMEM;
 			goto err;
@@ -112,7 +112,7 @@ static void virtiovf_free_data_buffer(struct virtiovf_data_buffer *buf)
 {
 	struct sg_page_iter sg_iter;
 
-	/* Undo alloc_pages_bulk_array() */
+	/* Undo alloc_pages_bulk() */
 	for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
 		__free_page(sg_page_iter_page(&sg_iter));
 	sg_free_append_table(&buf->table);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index b923d0cec61c7..d70e9461fea86 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -632,7 +632,7 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
 	for (allocated = 0; allocated < nr_pages;) {
 		unsigned int last = allocated;
 
-		allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
+		allocated = alloc_pages_bulk(gfp, nr_pages, page_array);
 		if (unlikely(allocated == last)) {
 			/* No progress, fail and do cleanup. */
 			for (int i = 0; i < allocated; i++) {
diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c
index 75704f58ecfa9..a71add9b7012b 100644
--- a/fs/erofs/zutil.c
+++ b/fs/erofs/zutil.c
@@ -87,8 +87,8 @@ int z_erofs_gbuf_growsize(unsigned int nrpages)
 			tmp_pages[j] = gbuf->pages[j];
 		do {
 			last = j;
-			j = alloc_pages_bulk_array(GFP_KERNEL, nrpages,
-						   tmp_pages);
+			j = alloc_pages_bulk(GFP_KERNEL, nrpages,
+					     tmp_pages);
 			if (last == j)
 				goto out;
 		} while (j != nrpages);
diff --git a/fs/splice.c b/fs/splice.c
index 2898fa1e9e638..28cfa63aa2364 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -342,7 +342,7 @@ ssize_t copy_splice_read(struct file *in, loff_t *ppos,
 		return -ENOMEM;
 
 	pages = (struct page **)(bv + npages);
-	npages = alloc_pages_bulk_array(GFP_USER, npages, pages);
+	npages = alloc_pages_bulk(GFP_USER, npages, pages);
 	if (!npages) {
 		kfree(bv);
 		return -ENOMEM;
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index aa63b8efd7822..82db3ab0e8b40 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -395,8 +395,8 @@ xfs_buf_alloc_pages(
 	for (;;) {
 		long	last = filled;
 
-		filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count,
-						bp->b_pages);
+		filled = alloc_pages_bulk(gfp_mask, bp->b_page_count,
+					  bp->b_pages);
 		if (filled == bp->b_page_count) {
 			XFS_STATS_INC(bp->b_mount, xb_page_found);
 			break;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index eebed36443b35..64c3f0729bdc6 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -215,18 +215,18 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 				struct page **page_array);
 #define __alloc_pages_bulk(...)			alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
 
-unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
+unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
 				unsigned long nr_pages,
 				struct page **page_array);
-#define  alloc_pages_bulk_array_mempolicy(...)				\
-	alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__))
+#define  alloc_pages_bulk_mempolicy(...)				\
+	alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__))
 
 /* Bulk allocate order-0 pages */
-#define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array)		\
+#define alloc_pages_bulk(_gfp, _nr_pages, _page_array)		\
 	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
 
 static inline unsigned long
-alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
+alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
 				   struct page **page_array)
 {
 	if (nid == NUMA_NO_NODE)
@@ -235,8 +235,8 @@ alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
 	return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
 }
 
-#define alloc_pages_bulk_array_node(...)				\
-	alloc_hooks(alloc_pages_bulk_array_node_noprof(__VA_ARGS__))
+#define alloc_pages_bulk_node(...)				\
+	alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__))
 
 static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
 {
diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
index 945a5680f6a54..9927cd4c9e0ea 100644
--- a/kernel/bpf/arena.c
+++ b/kernel/bpf/arena.c
@@ -443,7 +443,7 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt
 			return 0;
 	}
 
-	/* zeroing is needed, since alloc_pages_bulk_array() only fills in non-zero entries */
+	/* zeroing is needed, since alloc_pages_bulk() only fills in non-zero entries */
 	pages = kvcalloc(page_cnt, sizeof(struct page *), GFP_KERNEL);
 	if (!pages)
 		return 0;
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 35f7560a309a4..f419763a743c2 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -410,8 +410,8 @@ static int vm_module_tags_populate(void)
 		unsigned long nr;
 
 		more_pages = ALIGN(module_tags.size - phys_size, PAGE_SIZE) >> PAGE_SHIFT;
-		nr = alloc_pages_bulk_array_node(GFP_KERNEL | __GFP_NOWARN,
-						 NUMA_NO_NODE, more_pages, next_page);
+		nr = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN,
+				           NUMA_NO_NODE, more_pages, next_page);
 		if (nr < more_pages ||
 		    vmap_pages_range(addr, addr + (nr << PAGE_SHIFT), PAGE_KERNEL,
 				     next_page, PAGE_SHIFT) < 0) {
diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c
index 13e15687675a8..830bf3eca4c2e 100644
--- a/lib/kunit_iov_iter.c
+++ b/lib/kunit_iov_iter.c
@@ -57,7 +57,7 @@ static void *__init iov_kunit_create_buffer(struct kunit *test,
         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
 	*ppages = pages;
 
-	got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
+	got = alloc_pages_bulk(GFP_KERNEL, npages, pages);
 	if (got != npages) {
 		release_pages(pages, got);
 		KUNIT_ASSERT_EQ(test, got, npages);
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 4ddf769861ff7..f585949ff696e 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -373,7 +373,7 @@ vm_map_ram_test(void)
 	if (!pages)
 		return -1;
 
-	nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages);
+	nr_allocated = alloc_pages_bulk(GFP_KERNEL, map_nr_pages, pages);
 	if (nr_allocated != map_nr_pages)
 		goto cleanup;
 
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 42a7b07ccc15a..69bc9019368e4 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2356,7 +2356,7 @@ struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
 }
 EXPORT_SYMBOL(folio_alloc_noprof);
 
-static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
+static unsigned long alloc_pages_bulk_interleave(gfp_t gfp,
 		struct mempolicy *pol, unsigned long nr_pages,
 		struct page **page_array)
 {
@@ -2391,7 +2391,7 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
 	return total_allocated;
 }
 
-static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
+static unsigned long alloc_pages_bulk_weighted_interleave(gfp_t gfp,
 		struct mempolicy *pol, unsigned long nr_pages,
 		struct page **page_array)
 {
@@ -2506,7 +2506,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
 	return total_allocated;
 }
 
-static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
+static unsigned long alloc_pages_bulk_preferred_many(gfp_t gfp, int nid,
 		struct mempolicy *pol, unsigned long nr_pages,
 		struct page **page_array)
 {
@@ -2532,7 +2532,7 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
  * It can accelerate memory allocation especially interleaving
  * allocate memory.
  */
-unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
+unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
 		unsigned long nr_pages, struct page **page_array)
 {
 	struct mempolicy *pol = &default_policy;
@@ -2543,15 +2543,15 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
 		pol = get_task_policy(current);
 
 	if (pol->mode == MPOL_INTERLEAVE)
-		return alloc_pages_bulk_array_interleave(gfp, pol,
+		return alloc_pages_bulk_interleave(gfp, pol,
 							 nr_pages, page_array);
 
 	if (pol->mode == MPOL_WEIGHTED_INTERLEAVE)
-		return alloc_pages_bulk_array_weighted_interleave(
+		return alloc_pages_bulk_weighted_interleave(
 				  gfp, pol, nr_pages, page_array);
 
 	if (pol->mode == MPOL_PREFERRED_MANY)
-		return alloc_pages_bulk_array_preferred_many(gfp,
+		return alloc_pages_bulk_preferred_many(gfp,
 				numa_node_id(), pol, nr_pages, page_array);
 
 	nid = numa_node_id();
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f009b21705c16..0ca555f688812 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3560,11 +3560,11 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
 			 * but mempolicy wants to alloc memory by interleaving.
 			 */
 			if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
-				nr = alloc_pages_bulk_array_mempolicy_noprof(gfp,
+				nr = alloc_pages_bulk_mempolicy_noprof(gfp,
 							nr_pages_request,
 							pages + nr_allocated);
 			else
-				nr = alloc_pages_bulk_array_node_noprof(gfp, nid,
+				nr = alloc_pages_bulk_node_noprof(gfp, nid,
 							nr_pages_request,
 							pages + nr_allocated);
 
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index f89cf93f6eb45..8a91c1972dc50 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -532,12 +532,11 @@ static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool,
 	if (unlikely(pool->alloc.count > 0))
 		return pool->alloc.cache[--pool->alloc.count];
 
-	/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
+	/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk */
 	memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
 
-	nr_pages = alloc_pages_bulk_array_node(gfp,
-					       pool->p.nid, bulk,
-					       (struct page **)pool->alloc.cache);
+	nr_pages = alloc_pages_bulk_node(gfp, pool->p.nid, bulk,
+					 (struct page **)pool->alloc.cache);
 	if (unlikely(!nr_pages))
 		return 0;
 
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 79879b7d39cb4..e7f9c295d13c0 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -651,8 +651,8 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
 	if (pages > RPCSVC_MAXPAGES)
 		pages = RPCSVC_MAXPAGES;
 
-	ret = alloc_pages_bulk_array_node(GFP_KERNEL, node, pages,
-					  rqstp->rq_pages);
+	ret = alloc_pages_bulk_node(GFP_KERNEL, node, pages,
+				    rqstp->rq_pages);
 	return ret == pages;
 }
 
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 43c57124de52f..aebc0d8ddff5c 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -671,8 +671,7 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp)
 	}
 
 	for (filled = 0; filled < pages; filled = ret) {
-		ret = alloc_pages_bulk_array(GFP_KERNEL, pages,
-					     rqstp->rq_pages);
+		ret = alloc_pages_bulk(GFP_KERNEL, pages, rqstp->rq_pages);
 		if (ret > filled)
 			/* Made progress, don't sleep yet */
 			continue;
-- 
2.47.1



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] mm: alloc_pages_bulk_noprof: drop page_list argument
  2024-12-17 16:31 ` [PATCH 1/2] mm: alloc_pages_bulk_noprof: drop page_list argument Luiz Capitulino
@ 2024-12-19 13:24   ` David Hildenbrand
  2024-12-19 15:50     ` Luiz Capitulino
  0 siblings, 1 reply; 6+ messages in thread
From: David Hildenbrand @ 2024-12-19 13:24 UTC (permalink / raw)
  To: Luiz Capitulino, linux-mm, mgorman, willy; +Cc: linux-kernel, lcapitulino

On 17.12.24 17:31, Luiz Capitulino wrote:
> The commit 387ba26fb1cb added __alloc_pages_bulk() along with the page_list
> argument. The next commit 0f87d9d30f21 added the array-based argument. As

Nit: Use "commit 387ba26fb1cb ("mm/page_alloc: add a bulk page 
allocator")" ,same for the other commit

(likely scripts/checkpatch.pl should complain)

> it turns out, the page_list argument has no users in the current tree (if it
> ever had any). Dropping it allows for a slight simplification and eliminates
> some unnecessary checks, now that page_array is required.
> 

It's probably a good idea to link to Mel's patch and the discussion from 
2023. Quoting what Willy said back then about performance of list vs. 
arrays might be valuable to have here is well.


Acked-by: David Hildenbrand <david@redhat.com>

> Signed-off-by: Luiz Capitulino <luizcap@redhat.com>
> ---
>   include/linux/gfp.h |  8 ++------
>   mm/mempolicy.c      | 14 +++++++-------
>   mm/page_alloc.c     | 39 ++++++++++++---------------------------
>   3 files changed, 21 insertions(+), 40 deletions(-)
> 
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index b0fe9f62d15b6..eebed36443b35 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -212,7 +212,6 @@ struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_
>   
>   unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>   				nodemask_t *nodemask, int nr_pages,
> -				struct list_head *page_list,
>   				struct page **page_array);
>   #define __alloc_pages_bulk(...)			alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
>   
> @@ -223,11 +222,8 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
>   	alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__))
>   
>   /* Bulk allocate order-0 pages */
> -#define alloc_pages_bulk_list(_gfp, _nr_pages, _list)			\
> -	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL)
> -
>   #define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array)		\
> -	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array)
> +	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
>   
>   static inline unsigned long
>   alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
> @@ -236,7 +232,7 @@ alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
>   	if (nid == NUMA_NO_NODE)
>   		nid = numa_mem_id();
>   
> -	return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array);
> +	return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
>   }
>   
>   #define alloc_pages_bulk_array_node(...)				\
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index 04f35659717ae..42a7b07ccc15a 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -2375,13 +2375,13 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
>   		if (delta) {
>   			nr_allocated = alloc_pages_bulk_noprof(gfp,
>   					interleave_nodes(pol), NULL,
> -					nr_pages_per_node + 1, NULL,
> +					nr_pages_per_node + 1,
>   					page_array);
>   			delta--;
>   		} else {
>   			nr_allocated = alloc_pages_bulk_noprof(gfp,
>   					interleave_nodes(pol), NULL,
> -					nr_pages_per_node, NULL, page_array);
> +					nr_pages_per_node, page_array);
>   		}
>   
>   		page_array += nr_allocated;
> @@ -2430,7 +2430,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
>   	if (weight && node_isset(node, nodes)) {
>   		node_pages = min(rem_pages, weight);
>   		nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
> -						  NULL, page_array);
> +						  page_array);
>   		page_array += nr_allocated;
>   		total_allocated += nr_allocated;
>   		/* if that's all the pages, no need to interleave */
> @@ -2493,7 +2493,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
>   		if (!node_pages)
>   			break;
>   		nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
> -						  NULL, page_array);
> +						  page_array);
>   		page_array += nr_allocated;
>   		total_allocated += nr_allocated;
>   		if (total_allocated == nr_pages)
> @@ -2517,11 +2517,11 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
>   	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
>   
>   	nr_allocated  = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes,
> -					   nr_pages, NULL, page_array);
> +					   nr_pages, page_array);
>   
>   	if (nr_allocated < nr_pages)
>   		nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL,
> -				nr_pages - nr_allocated, NULL,
> +				nr_pages - nr_allocated,
>   				page_array + nr_allocated);
>   	return nr_allocated;
>   }
> @@ -2557,7 +2557,7 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
>   	nid = numa_node_id();
>   	nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
>   	return alloc_pages_bulk_noprof(gfp, nid, nodemask,
> -				       nr_pages, NULL, page_array);
> +				       nr_pages, page_array);
>   }
>   
>   int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 1cb4b8c8886d8..3ef6d902e2fea 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4529,28 +4529,23 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
>   }
>   
>   /*
> - * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
> + * __alloc_pages_bulk - Allocate a number of order-0 pages to an array
>    * @gfp: GFP flags for the allocation
>    * @preferred_nid: The preferred NUMA node ID to allocate from
>    * @nodemask: Set of nodes to allocate from, may be NULL
> - * @nr_pages: The number of pages desired on the list or array
> - * @page_list: Optional list to store the allocated pages
> - * @page_array: Optional array to store the pages
> + * @nr_pages: The number of pages desired in the array
> + * @page_array: Array to store the pages
>    *
>    * This is a batched version of the page allocator that attempts to
> - * allocate nr_pages quickly. Pages are added to page_list if page_list
> - * is not NULL, otherwise it is assumed that the page_array is valid.
> + * allocate nr_pages quickly. Pages are added to the page_array.
>    *
> - * For lists, nr_pages is the number of pages that should be allocated.
> - *
> - * For arrays, only NULL elements are populated with pages and nr_pages
> + * Note that only NULL elements are populated with pages and nr_pages
>    * is the maximum number of pages that will be stored in the array.
>    *
> - * Returns the number of pages on the list or array.
> + * Returns the number of pages in the array.
>    */
>   unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>   			nodemask_t *nodemask, int nr_pages,
> -			struct list_head *page_list,
>   			struct page **page_array)
>   {
>   	struct page *page;
> @@ -4568,7 +4563,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>   	 * Skip populated array elements to determine if any pages need
>   	 * to be allocated before disabling IRQs.
>   	 */
> -	while (page_array && nr_populated < nr_pages && page_array[nr_populated])
> +	while (nr_populated < nr_pages && page_array[nr_populated])
>   		nr_populated++;
>   
>   	/* No pages requested? */
> @@ -4576,7 +4571,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>   		goto out;
>   
>   	/* Already populated array? */
> -	if (unlikely(page_array && nr_pages - nr_populated == 0))
> +	if (unlikely(nr_pages - nr_populated == 0))
>   		goto out;
>   
>   	/* Bulk allocator does not support memcg accounting. */
> @@ -4658,7 +4653,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>   	while (nr_populated < nr_pages) {
>   
>   		/* Skip existing pages */
> -		if (page_array && page_array[nr_populated]) {
> +		if (page_array[nr_populated]) {
>   			nr_populated++;
>   			continue;
>   		}
> @@ -4676,11 +4671,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>   		nr_account++;
>   
>   		prep_new_page(page, 0, gfp, 0);
> -		if (page_list)
> -			list_add(&page->lru, page_list);
> -		else
> -			page_array[nr_populated] = page;
> -		nr_populated++;
> +		page_array[nr_populated++] = page;
>   	}
>   
>   	pcp_spin_unlock(pcp);
> @@ -4697,14 +4688,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>   
>   failed:
>   	page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask);
> -	if (page) {
> -		if (page_list)
> -			list_add(&page->lru, page_list);
> -		else
> -			page_array[nr_populated] = page;
> -		nr_populated++;
> -	}
> -
> +	if (page)
> +		page_array[nr_populated++] = page;
>   	goto out;
>   }
>   EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);


-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 2/2] mm: alloc_pages_bulk: rename API
  2024-12-17 16:31 ` [PATCH 2/2] mm: alloc_pages_bulk: rename API Luiz Capitulino
@ 2024-12-19 13:26   ` David Hildenbrand
  0 siblings, 0 replies; 6+ messages in thread
From: David Hildenbrand @ 2024-12-19 13:26 UTC (permalink / raw)
  To: Luiz Capitulino, linux-mm, mgorman, willy; +Cc: linux-kernel, lcapitulino

On 17.12.24 17:31, Luiz Capitulino wrote:
> The previous commit removed the page_list argument from
> alloc_pages_bulk_noprof() along with the alloc_pages_bulk_list() function.
> 
> Now that only the *_array() flavour of the API remains, we can do the
> following renaming (along with the _noprof() ones):
> 
>    alloc_pages_bulk_array -> alloc_pages_bulk
>    alloc_pages_bulk_array_mempolicy -> alloc_pages_bulk_mempolicy
>    alloc_pages_bulk_array_node -> alloc_pages_bulk_node
> 
> Signed-off-by: Luiz Capitulino <luizcap@redhat.com>

Sounds reasonable to me

Acked-by: David Hildenbrand <david@redhat.com>


Interestingly, drivers/staging/media/atomisp/pci/hmm/hmm_bo.c has a 
free_pages_bulk_array() function -- which we should definitively leave 
alone for now :)

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] mm: alloc_pages_bulk_noprof: drop page_list argument
  2024-12-19 13:24   ` David Hildenbrand
@ 2024-12-19 15:50     ` Luiz Capitulino
  0 siblings, 0 replies; 6+ messages in thread
From: Luiz Capitulino @ 2024-12-19 15:50 UTC (permalink / raw)
  To: David Hildenbrand, linux-mm, mgorman, willy; +Cc: linux-kernel, lcapitulino

On 2024-12-19 08:24, David Hildenbrand wrote:
> On 17.12.24 17:31, Luiz Capitulino wrote:
>> The commit 387ba26fb1cb added __alloc_pages_bulk() along with the page_list
>> argument. The next commit 0f87d9d30f21 added the array-based argument. As
> 
> Nit: Use "commit 387ba26fb1cb ("mm/page_alloc: add a bulk page allocator")" ,same for the other commit
> 
> (likely scripts/checkpatch.pl should complain)
> 
>> it turns out, the page_list argument has no users in the current tree (if it
>> ever had any). Dropping it allows for a slight simplification and eliminates
>> some unnecessary checks, now that page_array is required.
>>
> 
> It's probably a good idea to link to Mel's patch and the discussion from 2023. Quoting what Willy said back then about performance of list vs. arrays might be valuable to have here is well.

I'll add these and the other patch's suggestion for v2.

Thanks for the review, David.

> 
> 
> Acked-by: David Hildenbrand <david@redhat.com>
> 
>> Signed-off-by: Luiz Capitulino <luizcap@redhat.com>
>> ---
>>   include/linux/gfp.h |  8 ++------
>>   mm/mempolicy.c      | 14 +++++++-------
>>   mm/page_alloc.c     | 39 ++++++++++++---------------------------
>>   3 files changed, 21 insertions(+), 40 deletions(-)
>>
>> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
>> index b0fe9f62d15b6..eebed36443b35 100644
>> --- a/include/linux/gfp.h
>> +++ b/include/linux/gfp.h
>> @@ -212,7 +212,6 @@ struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_
>>   unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>>                   nodemask_t *nodemask, int nr_pages,
>> -                struct list_head *page_list,
>>                   struct page **page_array);
>>   #define __alloc_pages_bulk(...)            alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
>> @@ -223,11 +222,8 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
>>       alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__))
>>   /* Bulk allocate order-0 pages */
>> -#define alloc_pages_bulk_list(_gfp, _nr_pages, _list)            \
>> -    __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL)
>> -
>>   #define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array)        \
>> -    __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array)
>> +    __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
>>   static inline unsigned long
>>   alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
>> @@ -236,7 +232,7 @@ alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
>>       if (nid == NUMA_NO_NODE)
>>           nid = numa_mem_id();
>> -    return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array);
>> +    return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
>>   }
>>   #define alloc_pages_bulk_array_node(...)                \
>> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
>> index 04f35659717ae..42a7b07ccc15a 100644
>> --- a/mm/mempolicy.c
>> +++ b/mm/mempolicy.c
>> @@ -2375,13 +2375,13 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
>>           if (delta) {
>>               nr_allocated = alloc_pages_bulk_noprof(gfp,
>>                       interleave_nodes(pol), NULL,
>> -                    nr_pages_per_node + 1, NULL,
>> +                    nr_pages_per_node + 1,
>>                       page_array);
>>               delta--;
>>           } else {
>>               nr_allocated = alloc_pages_bulk_noprof(gfp,
>>                       interleave_nodes(pol), NULL,
>> -                    nr_pages_per_node, NULL, page_array);
>> +                    nr_pages_per_node, page_array);
>>           }
>>           page_array += nr_allocated;
>> @@ -2430,7 +2430,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
>>       if (weight && node_isset(node, nodes)) {
>>           node_pages = min(rem_pages, weight);
>>           nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
>> -                          NULL, page_array);
>> +                          page_array);
>>           page_array += nr_allocated;
>>           total_allocated += nr_allocated;
>>           /* if that's all the pages, no need to interleave */
>> @@ -2493,7 +2493,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
>>           if (!node_pages)
>>               break;
>>           nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
>> -                          NULL, page_array);
>> +                          page_array);
>>           page_array += nr_allocated;
>>           total_allocated += nr_allocated;
>>           if (total_allocated == nr_pages)
>> @@ -2517,11 +2517,11 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
>>       preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
>>       nr_allocated  = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes,
>> -                       nr_pages, NULL, page_array);
>> +                       nr_pages, page_array);
>>       if (nr_allocated < nr_pages)
>>           nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL,
>> -                nr_pages - nr_allocated, NULL,
>> +                nr_pages - nr_allocated,
>>                   page_array + nr_allocated);
>>       return nr_allocated;
>>   }
>> @@ -2557,7 +2557,7 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
>>       nid = numa_node_id();
>>       nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
>>       return alloc_pages_bulk_noprof(gfp, nid, nodemask,
>> -                       nr_pages, NULL, page_array);
>> +                       nr_pages, page_array);
>>   }
>>   int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
>> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>> index 1cb4b8c8886d8..3ef6d902e2fea 100644
>> --- a/mm/page_alloc.c
>> +++ b/mm/page_alloc.c
>> @@ -4529,28 +4529,23 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
>>   }
>>   /*
>> - * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
>> + * __alloc_pages_bulk - Allocate a number of order-0 pages to an array
>>    * @gfp: GFP flags for the allocation
>>    * @preferred_nid: The preferred NUMA node ID to allocate from
>>    * @nodemask: Set of nodes to allocate from, may be NULL
>> - * @nr_pages: The number of pages desired on the list or array
>> - * @page_list: Optional list to store the allocated pages
>> - * @page_array: Optional array to store the pages
>> + * @nr_pages: The number of pages desired in the array
>> + * @page_array: Array to store the pages
>>    *
>>    * This is a batched version of the page allocator that attempts to
>> - * allocate nr_pages quickly. Pages are added to page_list if page_list
>> - * is not NULL, otherwise it is assumed that the page_array is valid.
>> + * allocate nr_pages quickly. Pages are added to the page_array.
>>    *
>> - * For lists, nr_pages is the number of pages that should be allocated.
>> - *
>> - * For arrays, only NULL elements are populated with pages and nr_pages
>> + * Note that only NULL elements are populated with pages and nr_pages
>>    * is the maximum number of pages that will be stored in the array.
>>    *
>> - * Returns the number of pages on the list or array.
>> + * Returns the number of pages in the array.
>>    */
>>   unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>>               nodemask_t *nodemask, int nr_pages,
>> -            struct list_head *page_list,
>>               struct page **page_array)
>>   {
>>       struct page *page;
>> @@ -4568,7 +4563,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>>        * Skip populated array elements to determine if any pages need
>>        * to be allocated before disabling IRQs.
>>        */
>> -    while (page_array && nr_populated < nr_pages && page_array[nr_populated])
>> +    while (nr_populated < nr_pages && page_array[nr_populated])
>>           nr_populated++;
>>       /* No pages requested? */
>> @@ -4576,7 +4571,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>>           goto out;
>>       /* Already populated array? */
>> -    if (unlikely(page_array && nr_pages - nr_populated == 0))
>> +    if (unlikely(nr_pages - nr_populated == 0))
>>           goto out;
>>       /* Bulk allocator does not support memcg accounting. */
>> @@ -4658,7 +4653,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>>       while (nr_populated < nr_pages) {
>>           /* Skip existing pages */
>> -        if (page_array && page_array[nr_populated]) {
>> +        if (page_array[nr_populated]) {
>>               nr_populated++;
>>               continue;
>>           }
>> @@ -4676,11 +4671,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>>           nr_account++;
>>           prep_new_page(page, 0, gfp, 0);
>> -        if (page_list)
>> -            list_add(&page->lru, page_list);
>> -        else
>> -            page_array[nr_populated] = page;
>> -        nr_populated++;
>> +        page_array[nr_populated++] = page;
>>       }
>>       pcp_spin_unlock(pcp);
>> @@ -4697,14 +4688,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>>   failed:
>>       page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask);
>> -    if (page) {
>> -        if (page_list)
>> -            list_add(&page->lru, page_list);
>> -        else
>> -            page_array[nr_populated] = page;
>> -        nr_populated++;
>> -    }
>> -
>> +    if (page)
>> +        page_array[nr_populated++] = page;
>>       goto out;
>>   }
>>   EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
> 
> 



^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2024-12-19 15:50 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-12-17 16:31 [PATCH 0/2] mm: alloc_pages_bulk: small API refactor Luiz Capitulino
2024-12-17 16:31 ` [PATCH 1/2] mm: alloc_pages_bulk_noprof: drop page_list argument Luiz Capitulino
2024-12-19 13:24   ` David Hildenbrand
2024-12-19 15:50     ` Luiz Capitulino
2024-12-17 16:31 ` [PATCH 2/2] mm: alloc_pages_bulk: rename API Luiz Capitulino
2024-12-19 13:26   ` David Hildenbrand

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox