From: Kefeng Wang <wangkefeng.wang@huawei.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>, <linux-mm@kvack.org>,
David Hildenbrand <david@redhat.com>,
Kefeng Wang <wangkefeng.wang@huawei.com>
Subject: [PATCH] mm: add more readable thp_vma_allowable_order_foo()
Date: Wed, 24 Apr 2024 22:07:15 +0800 [thread overview]
Message-ID: <20240424140715.5838-1-wangkefeng.wang@huawei.com> (raw)
There are too many bool arguments in thp_vma_allowable_orders(), adding
some more readable thp_vma_allowable_order_foo(),
thp_vma_allowable_orders_insmaps() is used in samps
thp_vma_allowable_order[s]_inpf() is used in page fault
thp_vma_allowable_pmd_order_inhuge is used in khugepaged scan and madvise
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
fs/proc/task_mmu.c | 3 +--
include/linux/huge_mm.h | 14 ++++++++++++--
mm/khugepaged.c | 20 ++++++++------------
mm/memory.c | 8 ++++----
4 files changed, 25 insertions(+), 20 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f4259b7edfde..1136aa97f143 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -871,8 +871,7 @@ static int show_smap(struct seq_file *m, void *v)
__show_smap(m, &mss, false);
seq_printf(m, "THPeligible: %8u\n",
- !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
- true, THP_ORDERS_ALL));
+ thp_vma_allowable_orders_insmaps(vma, vma->vm_flags));
if (arch_pkeys_enabled())
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 56c7ea73090b..345cf394480b 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -83,8 +83,18 @@ extern struct kobj_attribute shmem_enabled_attr;
*/
#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
-#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
- (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
+#define thp_vma_allowable_orders_insmaps(vma, vm_flags) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, true, false, true, THP_ORDERS_ALL))
+
+#define thp_vma_allowable_orders_inpf(vma, vm_flags, orders) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, false, true, true, orders))
+
+#define thp_vma_allowable_order_inpf(vma, vm_flags, order) \
+ (!!thp_vma_allowable_orders_inpf(vma, vm_flags, BIT(order)))
+
+#define thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, enforce_sysfs) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, false, false, enforce_sysfs, BIT(PMD_ORDER)))
+
#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
#define HPAGE_PMD_SHIFT PMD_SHIFT
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 2f73d2aa9ae8..5a27dccfda02 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -453,8 +453,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
hugepage_flags_enabled()) {
- if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
- PMD_ORDER))
+ if (thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, true))
__khugepaged_enter(vma->vm_mm);
}
}
@@ -909,15 +908,15 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
return SCAN_ADDRESS_RANGE;
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
- cc->is_khugepaged, PMD_ORDER))
+ if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags,
+ cc->is_khugepaged))
return SCAN_VMA_CHECK;
/*
* Anon VMA expected, the address may be unmapped then
* remapped to file after khugepaged reaquired the mmap_lock.
*
- * thp_vma_allowable_order may return true for qualified file
- * vmas.
+ * thp_vma_allowable_pmd_order_inhuge may return true for
+ * qualified file vmas.
*/
if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
return SCAN_PAGE_ANON;
@@ -1493,8 +1492,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
* and map it by a PMD, regardless of sysfs THP settings. As such, let's
* analogously elide sysfs THP settings here.
*/
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
- PMD_ORDER))
+ if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, false))
return SCAN_VMA_CHECK;
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
@@ -2355,8 +2353,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
progress++;
break;
}
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
- true, PMD_ORDER)) {
+ if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, true)) {
skip:
progress++;
continue;
@@ -2693,8 +2690,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
*prev = vma;
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
- PMD_ORDER))
+ if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, false))
return -EINVAL;
cc = kmalloc(sizeof(*cc), GFP_KERNEL);
diff --git a/mm/memory.c b/mm/memory.c
index 09ed76e5b8c0..8507bfda461a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4329,8 +4329,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
* for this vma. Then filter out the orders that can't be allocated over
* the faulting address and still be fully contained in the vma.
*/
- orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
- BIT(PMD_ORDER) - 1);
+ orders = thp_vma_allowable_orders_inpf(vma, vma->vm_flags,
+ BIT(PMD_ORDER) - 1);
orders = thp_vma_suitable_orders(vma, vmf->address, orders);
if (!orders)
@@ -5433,7 +5433,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
return VM_FAULT_OOM;
retry_pud:
if (pud_none(*vmf.pud) &&
- thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
+ thp_vma_allowable_order_inpf(vma, vm_flags, PUD_ORDER)) {
ret = create_huge_pud(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -5467,7 +5467,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
goto retry_pud;
if (pmd_none(*vmf.pmd) &&
- thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
+ thp_vma_allowable_order_inpf(vma, vm_flags, PMD_ORDER)) {
ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
--
2.41.0
next reply other threads:[~2024-04-24 13:40 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-24 14:07 Kefeng Wang [this message]
2024-04-24 14:05 ` Ryan Roberts
2024-04-24 14:11 ` Kefeng Wang
2024-04-24 14:58 ` David Hildenbrand
2024-04-25 1:09 ` Kefeng Wang
2024-04-24 14:57 ` David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240424140715.5838-1-wangkefeng.wang@huawei.com \
--to=wangkefeng.wang@huawei.com \
--cc=akpm@linux-foundation.org \
--cc=david@redhat.com \
--cc=linux-mm@kvack.org \
--cc=ryan.roberts@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox