linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Mike Rapoport <rppt@kernel.org>
To: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	Arnd Bergmann <arnd@arndb.de>,
	David Hildenbrand <david@redhat.com>,
	Nick Piggin <npiggin@gmail.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Vlastimil Babka <vbabka@suse.cz>,
	Mel Gorman <mgorman@techsingularity.net>,
	Uladzislau Rezki <urezki@gmail.com>,
	Christoph Hellwig <hch@infradead.org>
Subject: Re: [PATCH] mm: prefer xxx_page() alloc/free functions for order-0 pages
Date: Fri, 17 Mar 2023 10:24:47 +0200	[thread overview]
Message-ID: <ZBQjz9vzFaLjW0MM@kernel.org> (raw)
In-Reply-To: <50c48ca4789f1da2a65795f2346f5ae3eff7d665.1678710232.git.lstoakes@gmail.com>

On Mon, Mar 13, 2023 at 12:27:14PM +0000, Lorenzo Stoakes wrote:
> Update instances of alloc_pages(..., 0), __get_free_pages(..., 0) and
> __free_pages(..., 0) to use alloc_page(), __get_free_page() and
> __free_page() respectively in core code.
> 
> Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>

Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>

But why limit this only to mm?
> ---
>  include/asm-generic/pgalloc.h | 4 ++--
>  mm/debug_vm_pgtable.c         | 4 ++--
>  mm/hugetlb_vmemmap.c          | 2 +-
>  mm/mmu_gather.c               | 2 +-
>  mm/page_alloc.c               | 2 +-
>  mm/vmalloc.c                  | 2 +-
>  6 files changed, 8 insertions(+), 8 deletions(-)
> 
> diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
> index 977bea16cf1b..a7cf825befae 100644
> --- a/include/asm-generic/pgalloc.h
> +++ b/include/asm-generic/pgalloc.h
> @@ -123,11 +123,11 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
> 
>  	if (mm == &init_mm)
>  		gfp = GFP_PGTABLE_KERNEL;
> -	page = alloc_pages(gfp, 0);
> +	page = alloc_page(gfp);
>  	if (!page)
>  		return NULL;
>  	if (!pgtable_pmd_page_ctor(page)) {
> -		__free_pages(page, 0);
> +		__free_page(page);
>  		return NULL;
>  	}
>  	return (pmd_t *)page_address(page);
> diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
> index 7887cc2b75bf..4362021b1ce7 100644
> --- a/mm/debug_vm_pgtable.c
> +++ b/mm/debug_vm_pgtable.c
> @@ -1048,7 +1048,7 @@ static void __init destroy_args(struct pgtable_debug_args *args)
> 
>  	if (args->pte_pfn != ULONG_MAX) {
>  		page = pfn_to_page(args->pte_pfn);
> -		__free_pages(page, 0);
> +		__free_page(page);
> 
>  		args->pte_pfn = ULONG_MAX;
>  	}
> @@ -1290,7 +1290,7 @@ static int __init init_args(struct pgtable_debug_args *args)
>  		}
>  	}
> 
> -	page = alloc_pages(GFP_KERNEL, 0);
> +	page = alloc_page(GFP_KERNEL);
>  	if (page)
>  		args->pte_pfn = page_to_pfn(page);
> 
> diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> index a15cc56cf70a..1198064f80eb 100644
> --- a/mm/hugetlb_vmemmap.c
> +++ b/mm/hugetlb_vmemmap.c
> @@ -400,7 +400,7 @@ static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
>  	return 0;
>  out:
>  	list_for_each_entry_safe(page, next, list, lru)
> -		__free_pages(page, 0);
> +		__free_page(page);
>  	return -ENOMEM;
>  }
> 
> diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
> index 2b93cf6ac9ae..ea9683e12936 100644
> --- a/mm/mmu_gather.c
> +++ b/mm/mmu_gather.c
> @@ -32,7 +32,7 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
>  	if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
>  		return false;
> 
> -	batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
> +	batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
>  	if (!batch)
>  		return false;
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 680a4d76460e..256e8d3c8742 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -5538,7 +5538,7 @@ EXPORT_SYMBOL(__get_free_pages);
> 
>  unsigned long get_zeroed_page(gfp_t gfp_mask)
>  {
> -	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
> +	return __get_free_page(gfp_mask | __GFP_ZERO);
>  }
>  EXPORT_SYMBOL(get_zeroed_page);
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 5e60e9792cbf..978194dc2bb8 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2739,7 +2739,7 @@ void vfree(const void *addr)
>  		 * High-order allocs for huge vmallocs are split, so
>  		 * can be freed as an array of order-0 allocations
>  		 */
> -		__free_pages(page, 0);
> +		__free_page(page);
>  		cond_resched();
>  	}
>  	atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
> --
> 2.39.2
> 

-- 
Sincerely yours,
Mike.


  parent reply	other threads:[~2023-03-17  8:25 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-13 12:27 Lorenzo Stoakes
2023-03-13 18:52 ` David Hildenbrand
2023-03-15 10:35 ` Vlastimil Babka
2023-03-17  8:24 ` Mike Rapoport [this message]
2023-03-19 12:13   ` Lorenzo Stoakes
2023-03-22 16:22 ` Mel Gorman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZBQjz9vzFaLjW0MM@kernel.org \
    --to=rppt@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=david@redhat.com \
    --cc=hch@infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lstoakes@gmail.com \
    --cc=mgorman@techsingularity.net \
    --cc=npiggin@gmail.com \
    --cc=peterz@infradead.org \
    --cc=urezki@gmail.com \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox