* [PATCH] mm/mm_init.c: simplify logic of deferred_[init|free]_pages
@ 2024-06-12 2:04 Wei Yang
2024-06-13 6:07 ` Mike Rapoport
[not found] ` <d5d75ca9-e5be-45a4-9ce8-1c46885c099b@redhat.com>
0 siblings, 2 replies; 3+ messages in thread
From: Wei Yang @ 2024-06-12 2:04 UTC (permalink / raw)
To: rppt, akpm; +Cc: linux-mm, Wei Yang, Kirill A . Shutemov, David Hildenbrand
Function deferred_[init|free]_pages are only used in
deferred_init_maxorder(), which makes sure the range to init/free is
within MAX_ORDER_NR_PAGES size.
With this knowledge, we can simplify these two functions. Since
* only the first pfn could be IS_MAX_ORDER_ALIGNED()
Also since the range passed to deferred_[init|free]_pages is always from
memblock.memory for those we have already allocated memmap to cover,
pfn_valid() always return true. Then we can remove related check.
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
CC: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
CC: Mike Rapoport (IBM) <rppt@kernel.org>
CC: David Hildenbrand <david@redhat.com>
---
mm/mm_init.c | 63 +++++++---------------------------------------------
1 file changed, 8 insertions(+), 55 deletions(-)
diff --git a/mm/mm_init.c b/mm/mm_init.c
index c152c60eca3d..63d70fc60705 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1911,7 +1911,7 @@ unsigned long __init node_map_pfn_alignment(void)
}
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-static void __init deferred_free_range(unsigned long pfn,
+static void __init deferred_free_pages(unsigned long pfn,
unsigned long nr_pages)
{
struct page *page;
@@ -1950,69 +1950,22 @@ static inline void __init pgdat_init_report_one_done(void)
complete(&pgdat_init_all_done_comp);
}
-/*
- * Returns true if page needs to be initialized or freed to buddy allocator.
- *
- * We check if a current MAX_PAGE_ORDER block is valid by only checking the
- * validity of the head pfn.
- */
-static inline bool __init deferred_pfn_valid(unsigned long pfn)
-{
- if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn))
- return false;
- return true;
-}
-
-/*
- * Free pages to buddy allocator. Try to free aligned pages in
- * MAX_ORDER_NR_PAGES sizes.
- */
-static void __init deferred_free_pages(unsigned long pfn,
- unsigned long end_pfn)
-{
- unsigned long nr_free = 0;
-
- for (; pfn < end_pfn; pfn++) {
- if (!deferred_pfn_valid(pfn)) {
- deferred_free_range(pfn - nr_free, nr_free);
- nr_free = 0;
- } else if (IS_MAX_ORDER_ALIGNED(pfn)) {
- deferred_free_range(pfn - nr_free, nr_free);
- nr_free = 1;
- } else {
- nr_free++;
- }
- }
- /* Free the last block of pages to allocator */
- deferred_free_range(pfn - nr_free, nr_free);
-}
-
/*
* Initialize struct pages. We minimize pfn page lookups and scheduler checks
* by performing it only once every MAX_ORDER_NR_PAGES.
* Return number of pages initialized.
*/
-static unsigned long __init deferred_init_pages(struct zone *zone,
- unsigned long pfn,
- unsigned long end_pfn)
+static unsigned long __init deferred_init_pages(struct zone *zone,
+ unsigned long pfn,
+ unsigned long end_pfn)
{
int nid = zone_to_nid(zone);
- unsigned long nr_pages = 0;
+ unsigned long nr_pages = end_pfn - pfn;
int zid = zone_idx(zone);
- struct page *page = NULL;
+ struct page *page = pfn_to_page(pfn);
- for (; pfn < end_pfn; pfn++) {
- if (!deferred_pfn_valid(pfn)) {
- page = NULL;
- continue;
- } else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) {
- page = pfn_to_page(pfn);
- } else {
- page++;
- }
+ for (; pfn < end_pfn; pfn++, page++)
__init_single_page(page, pfn, zid, nid);
- nr_pages++;
- }
return nr_pages;
}
@@ -2096,7 +2049,7 @@ deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
break;
t = min(mo_pfn, epfn);
- deferred_free_pages(spfn, t);
+ deferred_free_pages(spfn, t - spfn);
if (mo_pfn <= epfn)
break;
--
2.34.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] mm/mm_init.c: simplify logic of deferred_[init|free]_pages
2024-06-12 2:04 [PATCH] mm/mm_init.c: simplify logic of deferred_[init|free]_pages Wei Yang
@ 2024-06-13 6:07 ` Mike Rapoport
[not found] ` <d5d75ca9-e5be-45a4-9ce8-1c46885c099b@redhat.com>
1 sibling, 0 replies; 3+ messages in thread
From: Mike Rapoport @ 2024-06-13 6:07 UTC (permalink / raw)
To: Wei Yang; +Cc: akpm, linux-mm, Kirill A . Shutemov, David Hildenbrand
On Wed, Jun 12, 2024 at 02:04:21AM +0000, Wei Yang wrote:
> Function deferred_[init|free]_pages are only used in
> deferred_init_maxorder(), which makes sure the range to init/free is
> within MAX_ORDER_NR_PAGES size.
>
> With this knowledge, we can simplify these two functions. Since
>
> * only the first pfn could be IS_MAX_ORDER_ALIGNED()
>
> Also since the range passed to deferred_[init|free]_pages is always from
> memblock.memory for those we have already allocated memmap to cover,
> pfn_valid() always return true. Then we can remove related check.
>
> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
> CC: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> CC: Mike Rapoport (IBM) <rppt@kernel.org>
> CC: David Hildenbrand <david@redhat.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
> ---
> mm/mm_init.c | 63 +++++++---------------------------------------------
> 1 file changed, 8 insertions(+), 55 deletions(-)
>
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index c152c60eca3d..63d70fc60705 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -1911,7 +1911,7 @@ unsigned long __init node_map_pfn_alignment(void)
> }
>
> #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
> -static void __init deferred_free_range(unsigned long pfn,
> +static void __init deferred_free_pages(unsigned long pfn,
> unsigned long nr_pages)
> {
> struct page *page;
> @@ -1950,69 +1950,22 @@ static inline void __init pgdat_init_report_one_done(void)
> complete(&pgdat_init_all_done_comp);
> }
>
> -/*
> - * Returns true if page needs to be initialized or freed to buddy allocator.
> - *
> - * We check if a current MAX_PAGE_ORDER block is valid by only checking the
> - * validity of the head pfn.
> - */
> -static inline bool __init deferred_pfn_valid(unsigned long pfn)
> -{
> - if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn))
> - return false;
> - return true;
> -}
> -
> -/*
> - * Free pages to buddy allocator. Try to free aligned pages in
> - * MAX_ORDER_NR_PAGES sizes.
> - */
> -static void __init deferred_free_pages(unsigned long pfn,
> - unsigned long end_pfn)
> -{
> - unsigned long nr_free = 0;
> -
> - for (; pfn < end_pfn; pfn++) {
> - if (!deferred_pfn_valid(pfn)) {
> - deferred_free_range(pfn - nr_free, nr_free);
> - nr_free = 0;
> - } else if (IS_MAX_ORDER_ALIGNED(pfn)) {
> - deferred_free_range(pfn - nr_free, nr_free);
> - nr_free = 1;
> - } else {
> - nr_free++;
> - }
> - }
> - /* Free the last block of pages to allocator */
> - deferred_free_range(pfn - nr_free, nr_free);
> -}
> -
> /*
> * Initialize struct pages. We minimize pfn page lookups and scheduler checks
> * by performing it only once every MAX_ORDER_NR_PAGES.
> * Return number of pages initialized.
> */
> -static unsigned long __init deferred_init_pages(struct zone *zone,
> - unsigned long pfn,
> - unsigned long end_pfn)
> +static unsigned long __init deferred_init_pages(struct zone *zone,
> + unsigned long pfn,
> + unsigned long end_pfn)
> {
> int nid = zone_to_nid(zone);
> - unsigned long nr_pages = 0;
> + unsigned long nr_pages = end_pfn - pfn;
> int zid = zone_idx(zone);
> - struct page *page = NULL;
> + struct page *page = pfn_to_page(pfn);
>
> - for (; pfn < end_pfn; pfn++) {
> - if (!deferred_pfn_valid(pfn)) {
> - page = NULL;
> - continue;
> - } else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) {
> - page = pfn_to_page(pfn);
> - } else {
> - page++;
> - }
> + for (; pfn < end_pfn; pfn++, page++)
> __init_single_page(page, pfn, zid, nid);
> - nr_pages++;
> - }
> return nr_pages;
> }
>
> @@ -2096,7 +2049,7 @@ deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
> break;
>
> t = min(mo_pfn, epfn);
> - deferred_free_pages(spfn, t);
> + deferred_free_pages(spfn, t - spfn);
>
> if (mo_pfn <= epfn)
> break;
> --
> 2.34.1
>
--
Sincerely yours,
Mike.
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] mm/mm_init.c: simplify logic of deferred_[init|free]_pages
[not found] ` <d5d75ca9-e5be-45a4-9ce8-1c46885c099b@redhat.com>
@ 2024-06-13 11:32 ` Wei Yang
0 siblings, 0 replies; 3+ messages in thread
From: Wei Yang @ 2024-06-13 11:32 UTC (permalink / raw)
To: David Hildenbrand; +Cc: Wei Yang, rppt, akpm, linux-mm, Kirill A . Shutemov
On Thu, Jun 13, 2024 at 10:21:08AM +0200, David Hildenbrand wrote:
>On 12.06.24 04:04, Wei Yang wrote:
>> Function deferred_[init|free]_pages are only used in
>> deferred_init_maxorder(), which makes sure the range to init/free is
>> within MAX_ORDER_NR_PAGES size.
>>
>> With this knowledge, we can simplify these two functions. Since
>>
>> * only the first pfn could be IS_MAX_ORDER_ALIGNED()
>>
>> Also since the range passed to deferred_[init|free]_pages is always from
>> memblock.memory for those we have already allocated memmap to cover,
>> pfn_valid() always return true. Then we can remove related check.
>>
>
>I'm surprised that we can completely get rid if the pfn_valid checks (which
>is great!), trusting Mike's review that this is all sane :)
>
I'm surprised too :-)
>> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
>> CC: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
>> CC: Mike Rapoport (IBM) <rppt@kernel.org>
>> CC: David Hildenbrand <david@redhat.com>
>> ---
>
>[...]
>
>> /*
>> * Initialize struct pages. We minimize pfn page lookups and scheduler checks
>> * by performing it only once every MAX_ORDER_NR_PAGES.
>> * Return number of pages initialized.
>> */
>> -static unsigned long __init deferred_init_pages(struct zone *zone,
>> - unsigned long pfn,
>> - unsigned long end_pfn)
>> +static unsigned long __init deferred_init_pages(struct zone *zone,
>> + unsigned long pfn,
>> + unsigned long end_pfn)
>
>We nowadays prefer double tabs for indentation for the second+ parameter line
>in MM, like
>
>static unsigned long __init deferred_init_pages(struct zone *zone,
> unsigned long pfn, unsigned long end_pfn)
>
>Usually results in less churn when renaming functions ... and reduces the
>LOC.
>
Will adjust it.
>
>> {
>> int nid = zone_to_nid(zone);
>> - unsigned long nr_pages = 0;
>> + unsigned long nr_pages = end_pfn - pfn;
>> int zid = zone_idx(zone);
>> - struct page *page = NULL;
>> + struct page *page = pfn_to_page(pfn);
>> - for (; pfn < end_pfn; pfn++) {
>> - if (!deferred_pfn_valid(pfn)) {
>> - page = NULL;
>> - continue;
>> - } else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) {
>> - page = pfn_to_page(pfn);
>> - } else {
>> - page++;
>> - }
>> + for (; pfn < end_pfn; pfn++, page++)
>> __init_single_page(page, pfn, zid, nid);
>> - nr_pages++;
>> - }
>> return nr_pages;
>> }
>> @@ -2096,7 +2049,7 @@ deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
>> break;
>> t = min(mo_pfn, epfn);
>> - deferred_free_pages(spfn, t);
>> + deferred_free_pages(spfn, t - spfn);
>> if (mo_pfn <= epfn)
>> break;
>
>Looks like a really nice cleanup!
>
Glad you like it :-)
>--
>Cheers,
>
>David / dhildenb
--
Wei Yang
Help you, Help me
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2024-06-13 11:32 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-06-12 2:04 [PATCH] mm/mm_init.c: simplify logic of deferred_[init|free]_pages Wei Yang
2024-06-13 6:07 ` Mike Rapoport
[not found] ` <d5d75ca9-e5be-45a4-9ce8-1c46885c099b@redhat.com>
2024-06-13 11:32 ` Wei Yang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox