linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Yafang Shao <laoar.shao@gmail.com>
To: akpm@linux-foundation.org, ast@kernel.org, daniel@iogearbox.net,
	andrii@kernel.org
Cc: bpf@vger.kernel.org, linux-mm@kvack.org,
	Yafang Shao <laoar.shao@gmail.com>
Subject: [RFC PATCH 1/4] mm: move hugepage_global_{enabled,always}() to internal.h
Date: Tue, 29 Apr 2025 10:41:36 +0800	[thread overview]
Message-ID: <20250429024139.34365-2-laoar.shao@gmail.com> (raw)
In-Reply-To: <20250429024139.34365-1-laoar.shao@gmail.com>

The functions hugepage_global_{enabled,always}() are currently only used in
mm/huge_memory.c, so we can move them to mm/internal.h. They will also be
exposed for BPF hooking in a future change.

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
 include/linux/huge_mm.h | 54 +----------------------------------------
 mm/huge_memory.c        | 46 ++++++++++++++++++++++++++++++++---
 mm/internal.h           | 14 +++++++++++
 3 files changed, 57 insertions(+), 57 deletions(-)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index e893d546a49f..5e92db48fc99 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -177,19 +177,6 @@ extern unsigned long huge_anon_orders_always;
 extern unsigned long huge_anon_orders_madvise;
 extern unsigned long huge_anon_orders_inherit;
 
-static inline bool hugepage_global_enabled(void)
-{
-	return transparent_hugepage_flags &
-			((1<<TRANSPARENT_HUGEPAGE_FLAG) |
-			(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
-}
-
-static inline bool hugepage_global_always(void)
-{
-	return transparent_hugepage_flags &
-			(1<<TRANSPARENT_HUGEPAGE_FLAG);
-}
-
 static inline int highest_order(unsigned long orders)
 {
 	return fls_long(orders) - 1;
@@ -260,49 +247,10 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
 	return orders;
 }
 
-unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
-					 unsigned long vm_flags,
-					 unsigned long tva_flags,
-					 unsigned long orders);
-
-/**
- * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
- * @vma:  the vm area to check
- * @vm_flags: use these vm_flags instead of vma->vm_flags
- * @tva_flags: Which TVA flags to honour
- * @orders: bitfield of all orders to consider
- *
- * Calculates the intersection of the requested hugepage orders and the allowed
- * hugepage orders for the provided vma. Permitted orders are encoded as a set
- * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
- * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
- *
- * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
- * orders are allowed.
- */
-static inline
 unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
 				       unsigned long vm_flags,
 				       unsigned long tva_flags,
-				       unsigned long orders)
-{
-	/* Optimization to check if required orders are enabled early. */
-	if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
-		unsigned long mask = READ_ONCE(huge_anon_orders_always);
-
-		if (vm_flags & VM_HUGEPAGE)
-			mask |= READ_ONCE(huge_anon_orders_madvise);
-		if (hugepage_global_always() ||
-		    ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
-			mask |= READ_ONCE(huge_anon_orders_inherit);
-
-		orders &= mask;
-		if (!orders)
-			return 0;
-	}
-
-	return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
-}
+				       unsigned long orders);
 
 struct thpsize {
 	struct kobject kobj;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2a47682d1ab7..39afa14af2f2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -98,10 +98,10 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
 	return !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
 }
 
-unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
-					 unsigned long vm_flags,
-					 unsigned long tva_flags,
-					 unsigned long orders)
+static unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
+						unsigned long vm_flags,
+						unsigned long tva_flags,
+						unsigned long orders)
 {
 	bool smaps = tva_flags & TVA_SMAPS;
 	bool in_pf = tva_flags & TVA_IN_PF;
@@ -208,6 +208,44 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
 	return orders;
 }
 
+/**
+ * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
+ * @vma:  the vm area to check
+ * @vm_flags: use these vm_flags instead of vma->vm_flags
+ * @tva_flags: Which TVA flags to honour
+ * @orders: bitfield of all orders to consider
+ *
+ * Calculates the intersection of the requested hugepage orders and the allowed
+ * hugepage orders for the provided vma. Permitted orders are encoded as a set
+ * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
+ * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
+ *
+ * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
+ * orders are allowed.
+ */
+unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
+				       unsigned long vm_flags,
+				       unsigned long tva_flags,
+				       unsigned long orders)
+{
+	/* Optimization to check if required orders are enabled early. */
+	if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
+		unsigned long mask = READ_ONCE(huge_anon_orders_always);
+
+		if (vm_flags & VM_HUGEPAGE)
+			mask |= READ_ONCE(huge_anon_orders_madvise);
+		if (hugepage_global_always() ||
+		    ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
+			mask |= READ_ONCE(huge_anon_orders_inherit);
+
+		orders &= mask;
+		if (!orders)
+			return 0;
+	}
+
+	return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
+}
+
 static bool get_huge_zero_page(void)
 {
 	struct folio *zero_folio;
diff --git a/mm/internal.h b/mm/internal.h
index e9695baa5922..462d85c2ba7b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1625,5 +1625,19 @@ static inline bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
 }
 #endif /* CONFIG_PT_RECLAIM */
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline bool hugepage_global_enabled(void)
+{
+	return transparent_hugepage_flags &
+			((1<<TRANSPARENT_HUGEPAGE_FLAG) |
+			(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
+}
+
+static inline bool hugepage_global_always(void)
+{
+	return transparent_hugepage_flags &
+			(1<<TRANSPARENT_HUGEPAGE_FLAG);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 #endif	/* __MM_INTERNAL_H */
-- 
2.43.5



  reply	other threads:[~2025-04-29  2:41 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-04-29  2:41 [RFC PATCH 0/4] mm, bpf: BPF based THP adjustment Yafang Shao
2025-04-29  2:41 ` Yafang Shao [this message]
2025-04-29 15:13   ` [RFC PATCH 1/4] mm: move hugepage_global_{enabled,always}() to internal.h Zi Yan
2025-04-30  2:40     ` Yafang Shao
2025-04-30 12:11       ` Zi Yan
2025-04-30 14:43         ` Yafang Shao
2025-04-29  2:41 ` [RFC PATCH 2/4] mm: pass VMA parameter to hugepage_global_{enabled,always}() Yafang Shao
2025-04-29 15:31   ` Zi Yan
2025-04-30  2:46     ` Yafang Shao
2025-04-29  2:41 ` [RFC PATCH 3/4] mm: add BPF hook for THP adjustment Yafang Shao
2025-04-29 15:19   ` Alexei Starovoitov
2025-04-30  2:48     ` Yafang Shao
2025-04-29  2:41 ` [RFC PATCH 4/4] selftests/bpf: Add selftest " Yafang Shao
2025-04-29  3:11 ` [RFC PATCH 0/4] mm, bpf: BPF based " Matthew Wilcox
2025-04-29  4:53   ` Yafang Shao
2025-04-29 15:09 ` Zi Yan
2025-04-30  2:33   ` Yafang Shao
2025-04-30 13:19     ` Zi Yan
2025-04-30 14:38       ` Yafang Shao
2025-04-30 15:00         ` Zi Yan
2025-04-30 15:16           ` Yafang Shao
2025-04-30 15:21           ` Liam R. Howlett
2025-04-30 15:37             ` Yafang Shao
2025-04-30 15:53               ` Liam R. Howlett
2025-04-30 16:06                 ` Yafang Shao
2025-04-30 17:45                   ` Johannes Weiner
2025-04-30 17:53                     ` Zi Yan
2025-05-01 19:36                       ` Gutierrez Asier
2025-05-02  5:48                         ` Yafang Shao
2025-05-02 12:00                           ` Zi Yan
2025-05-02 12:18                             ` Yafang Shao
2025-05-02 13:04                               ` David Hildenbrand
2025-05-02 13:06                                 ` Matthew Wilcox
2025-05-02 13:34                                 ` Zi Yan
2025-05-05  2:35                                 ` Yafang Shao
2025-05-05  9:11                           ` Gutierrez Asier
2025-05-05  9:38                             ` Yafang Shao
2025-04-30 17:59         ` Johannes Weiner
2025-05-01  0:40           ` Yafang Shao
2025-04-30 14:40     ` Liam R. Howlett
2025-04-30 14:49       ` Yafang Shao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250429024139.34365-2-laoar.shao@gmail.com \
    --to=laoar.shao@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox