linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Byungchul Park <byungchul@sk.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Cc: kernel_team@skhynix.com, akpm@linux-foundation.org,
	ying.huang@intel.com, vernhao@tencent.com,
	mgorman@techsingularity.net, hughd@google.com,
	willy@infradead.org, david@redhat.com, peterz@infradead.org,
	luto@kernel.org, tglx@linutronix.de, mingo@redhat.com,
	bp@alien8.de, dave.hansen@linux.intel.com, rjgolo@gmail.com
Subject: [PATCH v11 05/12] mm: buddy: make room for a new variable, ugen, in struct page
Date: Fri, 31 May 2024 18:19:54 +0900	[thread overview]
Message-ID: <20240531092001.30428-6-byungchul@sk.com> (raw)
In-Reply-To: <20240531092001.30428-1-byungchul@sk.com>

Functionally, no change.  This is a preparation for luf mechanism that
tracks need of tlb flush for each page residing in buddy, using a
generation number in struct page.

Fortunately, since the private field in struct page is used only to
store page order in buddy, ranging from 0 to MAX_PAGE_ORDER, that can be
covered with unsigned short int.  So splitted it into two smaller ones,
order and ugen, so that the both can be used in buddy at the same time.

Signed-off-by: Byungchul Park <byungchul@sk.com>
---
 include/linux/mm_types.h | 40 +++++++++++++++++++++++++++++++++-------
 mm/internal.h            |  4 ++--
 mm/page_alloc.c          | 13 ++++++++-----
 3 files changed, 43 insertions(+), 14 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 24323c7d0bd4..37eb3000267c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -108,13 +108,25 @@ struct page {
 				pgoff_t index;		/* Our offset within mapping. */
 				unsigned long share;	/* share count for fsdax */
 			};
-			/**
-			 * @private: Mapping-private opaque data.
-			 * Usually used for buffer_heads if PagePrivate.
-			 * Used for swp_entry_t if PageSwapCache.
-			 * Indicates order in the buddy system if PageBuddy.
-			 */
-			unsigned long private;
+			union {
+				/**
+				 * @private: Mapping-private opaque data.
+				 * Usually used for buffer_heads if PagePrivate.
+				 * Used for swp_entry_t if PageSwapCache.
+				 */
+				unsigned long private;
+				struct {
+					/*
+					 * Indicates order in the buddy system if PageBuddy.
+					 */
+					unsigned short int order;
+					/*
+					 * Tracks need of tlb flush used by luf,
+					 * which stands for lazy unmap flush.
+					 */
+					unsigned short int ugen;
+				};
+			};
 		};
 		struct {	/* page_pool used by netstack */
 			/**
@@ -521,6 +533,20 @@ static inline void set_page_private(struct page *page, unsigned long private)
 	page->private = private;
 }
 
+#define page_buddy_order(page)		((page)->order)
+
+static inline void set_page_buddy_order(struct page *page, unsigned int order)
+{
+	page->order = (unsigned short int)order;
+}
+
+#define page_buddy_ugen(page)		((page)->ugen)
+
+static inline void set_page_buddy_ugen(struct page *page, unsigned short int ugen)
+{
+	page->ugen = ugen;
+}
+
 static inline void *folio_get_private(struct folio *folio)
 {
 	return folio->private;
diff --git a/mm/internal.h b/mm/internal.h
index bbec99cc9d9d..552e1061d36d 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -461,7 +461,7 @@ struct alloc_context {
 static inline unsigned int buddy_order(struct page *page)
 {
 	/* PageBuddy() must be checked by the caller */
-	return page_private(page);
+	return page_buddy_order(page);
 }
 
 /*
@@ -475,7 +475,7 @@ static inline unsigned int buddy_order(struct page *page)
  * times, potentially observing different values in the tests and the actual
  * use of the result.
  */
-#define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
+#define buddy_order_unsafe(page)	READ_ONCE(page_buddy_order(page))
 
 /*
  * This function checks whether a page is free && is the buddy
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b1e3eb5787de..ae57dd8718fe 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -565,9 +565,12 @@ void prep_compound_page(struct page *page, unsigned int order)
 	prep_compound_head(page, order);
 }
 
-static inline void set_buddy_order(struct page *page, unsigned int order)
+static inline void set_buddy_order_ugen(struct page *page,
+					unsigned int order,
+					unsigned short int ugen)
 {
-	set_page_private(page, order);
+	set_page_buddy_order(page, order);
+	set_page_buddy_ugen(page, order);
 	__SetPageBuddy(page);
 }
 
@@ -826,7 +829,7 @@ static inline void __free_one_page(struct page *page,
 	}
 
 done_merging:
-	set_buddy_order(page, order);
+	set_buddy_order_ugen(page, order, 0);
 
 	if (fpi_flags & FPI_TO_TAIL)
 		to_tail = true;
@@ -1336,7 +1339,7 @@ static inline void expand(struct zone *zone, struct page *page,
 			continue;
 
 		__add_to_free_list(&page[size], zone, high, migratetype, false);
-		set_buddy_order(&page[size], high);
+		set_buddy_order_ugen(&page[size], high, 0);
 		nr_added += size;
 	}
 	account_freepages(zone, nr_added, migratetype);
@@ -6801,7 +6804,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
 			continue;
 
 		add_to_free_list(current_buddy, zone, high, migratetype, false);
-		set_buddy_order(current_buddy, high);
+		set_buddy_order_ugen(current_buddy, high, 0);
 	}
 }
 
-- 
2.17.1



  parent reply	other threads:[~2024-05-31  9:20 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-05-31  9:19 [PATCH v11 00/12] LUF(Lazy Unmap Flush) reducing tlb numbers over 90% Byungchul Park
2024-05-31  9:19 ` [PATCH v11 01/12] x86/tlb: add APIs manipulating tlb batch's arch data Byungchul Park
2024-05-31  9:19 ` [PATCH v11 02/12] arm64: tlbflush: " Byungchul Park
2024-05-31  9:19 ` [PATCH v11 03/12] riscv, tlb: " Byungchul Park
2024-05-31  9:19 ` [PATCH v11 04/12] x86/tlb, riscv/tlb, mm/rmap: separate arch_tlbbatch_clear() out of arch_tlbbatch_flush() Byungchul Park
2024-05-31  9:19 ` Byungchul Park [this message]
2024-05-31  9:19 ` [PATCH v11 06/12] mm: add folio_put_ugen() to deliver unmap generation number to pcp or buddy Byungchul Park
2024-05-31  9:19 ` [PATCH v11 07/12] mm: add a parameter, unmap generation number, to free_unref_folios() Byungchul Park
2024-05-31  9:19 ` [PATCH v11 08/12] mm/rmap: recognize read-only tlb entries during batched tlb flush Byungchul Park
2024-05-31  9:19 ` [PATCH v11 09/12] mm: implement LUF(Lazy Unmap Flush) defering tlb flush when folios get unmapped Byungchul Park
2024-05-31 16:12   ` Dave Hansen
2024-05-31 18:04     ` Byungchul Park
2024-05-31 21:46       ` Dave Hansen
2024-05-31 22:09         ` Matthew Wilcox
2024-06-01  2:20         ` Byungchul Park
2024-06-01  7:22         ` David Hildenbrand
2024-06-03  9:35           ` Byungchul Park
2024-06-03 13:23             ` Dave Hansen
2024-06-03 16:05               ` David Hildenbrand
2024-06-03 16:37                 ` Dave Hansen
2024-06-03 17:01                   ` Matthew Wilcox
2024-06-03 18:00                     ` David Hildenbrand
2024-06-04  8:16                       ` Huang, Ying
2024-06-04  0:34                     ` Byungchul Park
2024-06-10 13:23                       ` Michal Hocko
2024-06-11  0:55                         ` Byungchul Park
2024-06-11 11:55                           ` Michal Hocko
2024-06-14  2:45                             ` Byungchul Park
2024-06-04  1:53               ` Byungchul Park
2024-06-04  4:43                 ` Byungchul Park
2024-06-06  8:33                   ` David Hildenbrand
2024-06-14  1:57                 ` Byungchul Park
2024-06-11  9:12               ` Byungchul Park
2024-05-31  9:19 ` [PATCH v11 10/12] mm: separate move/undo parts from migrate_pages_batch() Byungchul Park
2024-05-31  9:20 ` [PATCH v11 11/12] mm, migrate: apply luf mechanism to unmapping during migration Byungchul Park
2024-05-31  9:20 ` [PATCH v11 12/12] mm, vmscan: apply luf mechanism to unmapping during folio reclaim Byungchul Park

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240531092001.30428-6-byungchul@sk.com \
    --to=byungchul@sk.com \
    --cc=akpm@linux-foundation.org \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=david@redhat.com \
    --cc=hughd@google.com \
    --cc=kernel_team@skhynix.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mgorman@techsingularity.net \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rjgolo@gmail.com \
    --cc=tglx@linutronix.de \
    --cc=vernhao@tencent.com \
    --cc=willy@infradead.org \
    --cc=ying.huang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox