From: Yafang Shao <laoar.shao@gmail.com>
To: akpm@linux-foundation.org, ast@kernel.org, daniel@iogearbox.net,
andrii@kernel.org
Cc: bpf@vger.kernel.org, linux-mm@kvack.org,
Yafang Shao <laoar.shao@gmail.com>
Subject: [RFC PATCH 2/4] mm: pass VMA parameter to hugepage_global_{enabled,always}()
Date: Tue, 29 Apr 2025 10:41:37 +0800 [thread overview]
Message-ID: <20250429024139.34365-3-laoar.shao@gmail.com> (raw)
In-Reply-To: <20250429024139.34365-1-laoar.shao@gmail.com>
We will use the new @vma parameter to determine whether THP can be used.
Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
mm/huge_memory.c | 8 ++++----
mm/internal.h | 8 ++++++--
mm/khugepaged.c | 18 +++++++++---------
3 files changed, 19 insertions(+), 15 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 39afa14af2f2..7a4a968c7874 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -176,8 +176,8 @@ static unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
* were already handled in thp_vma_allowable_orders().
*/
if (enforce_sysfs &&
- (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
- !hugepage_global_always())))
+ (!hugepage_global_enabled(vma) || (!(vm_flags & VM_HUGEPAGE) &&
+ !hugepage_global_always(vma))))
return 0;
/*
@@ -234,8 +234,8 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
if (vm_flags & VM_HUGEPAGE)
mask |= READ_ONCE(huge_anon_orders_madvise);
- if (hugepage_global_always() ||
- ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
+ if (hugepage_global_always(vma) ||
+ ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled(vma)))
mask |= READ_ONCE(huge_anon_orders_inherit);
orders &= mask;
diff --git a/mm/internal.h b/mm/internal.h
index 462d85c2ba7b..aa698a11dd68 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1626,14 +1626,18 @@ static inline bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
#endif /* CONFIG_PT_RECLAIM */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline bool hugepage_global_enabled(void)
+/*
+ * Checks whether a given @vma can use THP. If @vma is NULL, the check is
+ * performed globally by khugepaged during a system-wide scan.
+ */
+static inline bool hugepage_global_enabled(struct vm_area_struct *vma)
{
return transparent_hugepage_flags &
((1<<TRANSPARENT_HUGEPAGE_FLAG) |
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
}
-static inline bool hugepage_global_always(void)
+static inline bool hugepage_global_always(struct vm_area_struct *vma)
{
return transparent_hugepage_flags &
(1<<TRANSPARENT_HUGEPAGE_FLAG);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index cc945c6ab3bd..b85e36ddd7db 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -413,7 +413,7 @@ static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
test_bit(MMF_DISABLE_THP, &mm->flags);
}
-static bool hugepage_pmd_enabled(void)
+static bool hugepage_pmd_enabled(struct vm_area_struct *vma)
{
/*
* We cover the anon, shmem and the file-backed case here; file-backed
@@ -423,14 +423,14 @@ static bool hugepage_pmd_enabled(void)
* except when the global shmem_huge is set to SHMEM_HUGE_DENY.
*/
if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
- hugepage_global_enabled())
+ hugepage_global_enabled(vma))
return true;
if (test_bit(PMD_ORDER, &huge_anon_orders_always))
return true;
if (test_bit(PMD_ORDER, &huge_anon_orders_madvise))
return true;
if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) &&
- hugepage_global_enabled())
+ hugepage_global_enabled(vma))
return true;
if (IS_ENABLED(CONFIG_SHMEM) && shmem_hpage_pmd_enabled())
return true;
@@ -473,7 +473,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
- hugepage_pmd_enabled()) {
+ hugepage_pmd_enabled(vma)) {
if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
PMD_ORDER))
__khugepaged_enter(vma->vm_mm);
@@ -2516,7 +2516,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
static int khugepaged_has_work(void)
{
- return !list_empty(&khugepaged_scan.mm_head) && hugepage_pmd_enabled();
+ return !list_empty(&khugepaged_scan.mm_head) && hugepage_pmd_enabled(NULL);
}
static int khugepaged_wait_event(void)
@@ -2589,7 +2589,7 @@ static void khugepaged_wait_work(void)
return;
}
- if (hugepage_pmd_enabled())
+ if (hugepage_pmd_enabled(NULL))
wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
}
@@ -2620,7 +2620,7 @@ static void set_recommended_min_free_kbytes(void)
int nr_zones = 0;
unsigned long recommended_min;
- if (!hugepage_pmd_enabled()) {
+ if (!hugepage_pmd_enabled(NULL)) {
calculate_min_free_kbytes();
goto update_wmarks;
}
@@ -2670,7 +2670,7 @@ int start_stop_khugepaged(void)
int err = 0;
mutex_lock(&khugepaged_mutex);
- if (hugepage_pmd_enabled()) {
+ if (hugepage_pmd_enabled(NULL)) {
if (!khugepaged_thread)
khugepaged_thread = kthread_run(khugepaged, NULL,
"khugepaged");
@@ -2696,7 +2696,7 @@ int start_stop_khugepaged(void)
void khugepaged_min_free_kbytes_update(void)
{
mutex_lock(&khugepaged_mutex);
- if (hugepage_pmd_enabled() && khugepaged_thread)
+ if (hugepage_pmd_enabled(NULL) && khugepaged_thread)
set_recommended_min_free_kbytes();
mutex_unlock(&khugepaged_mutex);
}
--
2.43.5
next prev parent reply other threads:[~2025-04-29 2:42 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-04-29 2:41 [RFC PATCH 0/4] mm, bpf: BPF based THP adjustment Yafang Shao
2025-04-29 2:41 ` [RFC PATCH 1/4] mm: move hugepage_global_{enabled,always}() to internal.h Yafang Shao
2025-04-29 15:13 ` Zi Yan
2025-04-30 2:40 ` Yafang Shao
2025-04-30 12:11 ` Zi Yan
2025-04-30 14:43 ` Yafang Shao
2025-04-29 2:41 ` Yafang Shao [this message]
2025-04-29 15:31 ` [RFC PATCH 2/4] mm: pass VMA parameter to hugepage_global_{enabled,always}() Zi Yan
2025-04-30 2:46 ` Yafang Shao
2025-04-29 2:41 ` [RFC PATCH 3/4] mm: add BPF hook for THP adjustment Yafang Shao
2025-04-29 15:19 ` Alexei Starovoitov
2025-04-30 2:48 ` Yafang Shao
2025-04-29 2:41 ` [RFC PATCH 4/4] selftests/bpf: Add selftest " Yafang Shao
2025-04-29 3:11 ` [RFC PATCH 0/4] mm, bpf: BPF based " Matthew Wilcox
2025-04-29 4:53 ` Yafang Shao
2025-04-29 15:09 ` Zi Yan
2025-04-30 2:33 ` Yafang Shao
2025-04-30 13:19 ` Zi Yan
2025-04-30 14:38 ` Yafang Shao
2025-04-30 15:00 ` Zi Yan
2025-04-30 15:16 ` Yafang Shao
2025-04-30 15:21 ` Liam R. Howlett
2025-04-30 15:37 ` Yafang Shao
2025-04-30 15:53 ` Liam R. Howlett
2025-04-30 16:06 ` Yafang Shao
2025-04-30 17:45 ` Johannes Weiner
2025-04-30 17:53 ` Zi Yan
2025-05-01 19:36 ` Gutierrez Asier
2025-05-02 5:48 ` Yafang Shao
2025-05-02 12:00 ` Zi Yan
2025-05-02 12:18 ` Yafang Shao
2025-05-02 13:04 ` David Hildenbrand
2025-05-02 13:06 ` Matthew Wilcox
2025-05-02 13:34 ` Zi Yan
2025-05-05 2:35 ` Yafang Shao
2025-05-05 9:11 ` Gutierrez Asier
2025-05-05 9:38 ` Yafang Shao
2025-04-30 17:59 ` Johannes Weiner
2025-05-01 0:40 ` Yafang Shao
2025-04-30 14:40 ` Liam R. Howlett
2025-04-30 14:49 ` Yafang Shao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250429024139.34365-3-laoar.shao@gmail.com \
--to=laoar.shao@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox