* [PATCH] mm: add more readable thp_vma_allowable_order_foo()
@ 2024-04-24 14:07 Kefeng Wang
2024-04-24 14:05 ` Ryan Roberts
2024-04-24 14:57 ` David Hildenbrand
0 siblings, 2 replies; 6+ messages in thread
From: Kefeng Wang @ 2024-04-24 14:07 UTC (permalink / raw)
To: Andrew Morton; +Cc: Ryan Roberts, linux-mm, David Hildenbrand, Kefeng Wang
There are too many bool arguments in thp_vma_allowable_orders(), adding
some more readable thp_vma_allowable_order_foo(),
thp_vma_allowable_orders_insmaps() is used in samps
thp_vma_allowable_order[s]_inpf() is used in page fault
thp_vma_allowable_pmd_order_inhuge is used in khugepaged scan and madvise
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
fs/proc/task_mmu.c | 3 +--
include/linux/huge_mm.h | 14 ++++++++++++--
mm/khugepaged.c | 20 ++++++++------------
mm/memory.c | 8 ++++----
4 files changed, 25 insertions(+), 20 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f4259b7edfde..1136aa97f143 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -871,8 +871,7 @@ static int show_smap(struct seq_file *m, void *v)
__show_smap(m, &mss, false);
seq_printf(m, "THPeligible: %8u\n",
- !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
- true, THP_ORDERS_ALL));
+ thp_vma_allowable_orders_insmaps(vma, vma->vm_flags));
if (arch_pkeys_enabled())
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 56c7ea73090b..345cf394480b 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -83,8 +83,18 @@ extern struct kobj_attribute shmem_enabled_attr;
*/
#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
-#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
- (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
+#define thp_vma_allowable_orders_insmaps(vma, vm_flags) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, true, false, true, THP_ORDERS_ALL))
+
+#define thp_vma_allowable_orders_inpf(vma, vm_flags, orders) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, false, true, true, orders))
+
+#define thp_vma_allowable_order_inpf(vma, vm_flags, order) \
+ (!!thp_vma_allowable_orders_inpf(vma, vm_flags, BIT(order)))
+
+#define thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, enforce_sysfs) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, false, false, enforce_sysfs, BIT(PMD_ORDER)))
+
#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
#define HPAGE_PMD_SHIFT PMD_SHIFT
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 2f73d2aa9ae8..5a27dccfda02 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -453,8 +453,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
hugepage_flags_enabled()) {
- if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
- PMD_ORDER))
+ if (thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, true))
__khugepaged_enter(vma->vm_mm);
}
}
@@ -909,15 +908,15 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
return SCAN_ADDRESS_RANGE;
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
- cc->is_khugepaged, PMD_ORDER))
+ if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags,
+ cc->is_khugepaged))
return SCAN_VMA_CHECK;
/*
* Anon VMA expected, the address may be unmapped then
* remapped to file after khugepaged reaquired the mmap_lock.
*
- * thp_vma_allowable_order may return true for qualified file
- * vmas.
+ * thp_vma_allowable_pmd_order_inhuge may return true for
+ * qualified file vmas.
*/
if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
return SCAN_PAGE_ANON;
@@ -1493,8 +1492,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
* and map it by a PMD, regardless of sysfs THP settings. As such, let's
* analogously elide sysfs THP settings here.
*/
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
- PMD_ORDER))
+ if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, false))
return SCAN_VMA_CHECK;
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
@@ -2355,8 +2353,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
progress++;
break;
}
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
- true, PMD_ORDER)) {
+ if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, true)) {
skip:
progress++;
continue;
@@ -2693,8 +2690,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
*prev = vma;
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
- PMD_ORDER))
+ if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, false))
return -EINVAL;
cc = kmalloc(sizeof(*cc), GFP_KERNEL);
diff --git a/mm/memory.c b/mm/memory.c
index 09ed76e5b8c0..8507bfda461a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4329,8 +4329,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
* for this vma. Then filter out the orders that can't be allocated over
* the faulting address and still be fully contained in the vma.
*/
- orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
- BIT(PMD_ORDER) - 1);
+ orders = thp_vma_allowable_orders_inpf(vma, vma->vm_flags,
+ BIT(PMD_ORDER) - 1);
orders = thp_vma_suitable_orders(vma, vmf->address, orders);
if (!orders)
@@ -5433,7 +5433,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
return VM_FAULT_OOM;
retry_pud:
if (pud_none(*vmf.pud) &&
- thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
+ thp_vma_allowable_order_inpf(vma, vm_flags, PUD_ORDER)) {
ret = create_huge_pud(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -5467,7 +5467,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
goto retry_pud;
if (pmd_none(*vmf.pmd) &&
- thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
+ thp_vma_allowable_order_inpf(vma, vm_flags, PMD_ORDER)) {
ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
--
2.41.0
^ permalink raw reply [flat|nested] 6+ messages in thread* Re: [PATCH] mm: add more readable thp_vma_allowable_order_foo()
2024-04-24 14:07 [PATCH] mm: add more readable thp_vma_allowable_order_foo() Kefeng Wang
@ 2024-04-24 14:05 ` Ryan Roberts
2024-04-24 14:11 ` Kefeng Wang
2024-04-24 14:58 ` David Hildenbrand
2024-04-24 14:57 ` David Hildenbrand
1 sibling, 2 replies; 6+ messages in thread
From: Ryan Roberts @ 2024-04-24 14:05 UTC (permalink / raw)
To: Kefeng Wang, Andrew Morton; +Cc: linux-mm, David Hildenbrand
On 24/04/2024 15:07, Kefeng Wang wrote:
> There are too many bool arguments in thp_vma_allowable_orders(), adding
> some more readable thp_vma_allowable_order_foo(),
>
> thp_vma_allowable_orders_insmaps() is used in samps
> thp_vma_allowable_order[s]_inpf() is used in page fault
> thp_vma_allowable_pmd_order_inhuge is used in khugepaged scan and madvise
>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Just one nit below. With that addressed:
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
> ---
> fs/proc/task_mmu.c | 3 +--
> include/linux/huge_mm.h | 14 ++++++++++++--
> mm/khugepaged.c | 20 ++++++++------------
> mm/memory.c | 8 ++++----
> 4 files changed, 25 insertions(+), 20 deletions(-)
>
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index f4259b7edfde..1136aa97f143 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -871,8 +871,7 @@ static int show_smap(struct seq_file *m, void *v)
> __show_smap(m, &mss, false);
>
> seq_printf(m, "THPeligible: %8u\n",
> - !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
> - true, THP_ORDERS_ALL));
> + thp_vma_allowable_orders_insmaps(vma, vma->vm_flags));
>
> if (arch_pkeys_enabled())
> seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index 56c7ea73090b..345cf394480b 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -83,8 +83,18 @@ extern struct kobj_attribute shmem_enabled_attr;
> */
> #define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
>
> -#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
> - (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
> +#define thp_vma_allowable_orders_insmaps(vma, vm_flags) \
> + (!!thp_vma_allowable_orders(vma, vm_flags, true, false, true, THP_ORDERS_ALL))
> +
> +#define thp_vma_allowable_orders_inpf(vma, vm_flags, orders) \
> + (!!thp_vma_allowable_orders(vma, vm_flags, false, true, true, orders))
> +
> +#define thp_vma_allowable_order_inpf(vma, vm_flags, order) \
> + (!!thp_vma_allowable_orders_inpf(vma, vm_flags, BIT(order)))
> +
> +#define thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, enforce_sysfs) \
> + (!!thp_vma_allowable_orders(vma, vm_flags, false, false, enforce_sysfs, BIT(PMD_ORDER)))
nit: Personally I'd leave the order as an argument rather than encoding it in
the name. It's likely that khugepaged will grow support for non-PMD-size
collapse in future. The first part of the name "thp_vma_allowable_order" is then
consistent and easy to search for all variants. And perhaps "inkhuge" is more
precise?
> +
>
> #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
> #define HPAGE_PMD_SHIFT PMD_SHIFT
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 2f73d2aa9ae8..5a27dccfda02 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -453,8 +453,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
> {
> if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
> hugepage_flags_enabled()) {
> - if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
> - PMD_ORDER))
> + if (thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, true))
> __khugepaged_enter(vma->vm_mm);
> }
> }
> @@ -909,15 +908,15 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
>
> if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
> return SCAN_ADDRESS_RANGE;
> - if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
> - cc->is_khugepaged, PMD_ORDER))
> + if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags,
> + cc->is_khugepaged))
> return SCAN_VMA_CHECK;
> /*
> * Anon VMA expected, the address may be unmapped then
> * remapped to file after khugepaged reaquired the mmap_lock.
> *
> - * thp_vma_allowable_order may return true for qualified file
> - * vmas.
> + * thp_vma_allowable_pmd_order_inhuge may return true for
> + * qualified file vmas.
> */
> if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
> return SCAN_PAGE_ANON;
> @@ -1493,8 +1492,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
> * and map it by a PMD, regardless of sysfs THP settings. As such, let's
> * analogously elide sysfs THP settings here.
> */
> - if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
> - PMD_ORDER))
> + if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, false))
> return SCAN_VMA_CHECK;
>
> /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
> @@ -2355,8 +2353,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> progress++;
> break;
> }
> - if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
> - true, PMD_ORDER)) {
> + if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, true)) {
> skip:
> progress++;
> continue;
> @@ -2693,8 +2690,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
>
> *prev = vma;
>
> - if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
> - PMD_ORDER))
> + if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, false))
> return -EINVAL;
>
> cc = kmalloc(sizeof(*cc), GFP_KERNEL);
> diff --git a/mm/memory.c b/mm/memory.c
> index 09ed76e5b8c0..8507bfda461a 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4329,8 +4329,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
> * for this vma. Then filter out the orders that can't be allocated over
> * the faulting address and still be fully contained in the vma.
> */
> - orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
> - BIT(PMD_ORDER) - 1);
> + orders = thp_vma_allowable_orders_inpf(vma, vma->vm_flags,
> + BIT(PMD_ORDER) - 1);
> orders = thp_vma_suitable_orders(vma, vmf->address, orders);
>
> if (!orders)
> @@ -5433,7 +5433,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
> return VM_FAULT_OOM;
> retry_pud:
> if (pud_none(*vmf.pud) &&
> - thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
> + thp_vma_allowable_order_inpf(vma, vm_flags, PUD_ORDER)) {
> ret = create_huge_pud(&vmf);
> if (!(ret & VM_FAULT_FALLBACK))
> return ret;
> @@ -5467,7 +5467,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
> goto retry_pud;
>
> if (pmd_none(*vmf.pmd) &&
> - thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
> + thp_vma_allowable_order_inpf(vma, vm_flags, PMD_ORDER)) {
> ret = create_huge_pmd(&vmf);
> if (!(ret & VM_FAULT_FALLBACK))
> return ret;
^ permalink raw reply [flat|nested] 6+ messages in thread* Re: [PATCH] mm: add more readable thp_vma_allowable_order_foo()
2024-04-24 14:05 ` Ryan Roberts
@ 2024-04-24 14:11 ` Kefeng Wang
2024-04-24 14:58 ` David Hildenbrand
1 sibling, 0 replies; 6+ messages in thread
From: Kefeng Wang @ 2024-04-24 14:11 UTC (permalink / raw)
To: Ryan Roberts, Andrew Morton; +Cc: linux-mm, David Hildenbrand
On 2024/4/24 22:05, Ryan Roberts wrote:
> On 24/04/2024 15:07, Kefeng Wang wrote:
>> There are too many bool arguments in thp_vma_allowable_orders(), adding
>> some more readable thp_vma_allowable_order_foo(),
>>
>> thp_vma_allowable_orders_insmaps() is used in samps
>> thp_vma_allowable_order[s]_inpf() is used in page fault
>> thp_vma_allowable_pmd_order_inhuge is used in khugepaged scan and madvise
>>
>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
>
> Just one nit below. With that addressed:
>
> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
>
>> ---
>> fs/proc/task_mmu.c | 3 +--
>> include/linux/huge_mm.h | 14 ++++++++++++--
>> mm/khugepaged.c | 20 ++++++++------------
>> mm/memory.c | 8 ++++----
>> 4 files changed, 25 insertions(+), 20 deletions(-)
>>
>> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
>> index f4259b7edfde..1136aa97f143 100644
>> --- a/fs/proc/task_mmu.c
>> +++ b/fs/proc/task_mmu.c
>> @@ -871,8 +871,7 @@ static int show_smap(struct seq_file *m, void *v)
>> __show_smap(m, &mss, false);
>>
>> seq_printf(m, "THPeligible: %8u\n",
>> - !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
>> - true, THP_ORDERS_ALL));
>> + thp_vma_allowable_orders_insmaps(vma, vma->vm_flags));
>>
>> if (arch_pkeys_enabled())
>> seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
>> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
>> index 56c7ea73090b..345cf394480b 100644
>> --- a/include/linux/huge_mm.h
>> +++ b/include/linux/huge_mm.h
>> @@ -83,8 +83,18 @@ extern struct kobj_attribute shmem_enabled_attr;
>> */
>> #define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
>>
>> -#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
>> - (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
>> +#define thp_vma_allowable_orders_insmaps(vma, vm_flags) \
>> + (!!thp_vma_allowable_orders(vma, vm_flags, true, false, true, THP_ORDERS_ALL))
>> +
>> +#define thp_vma_allowable_orders_inpf(vma, vm_flags, orders) \
>> + (!!thp_vma_allowable_orders(vma, vm_flags, false, true, true, orders))
>> +
>> +#define thp_vma_allowable_order_inpf(vma, vm_flags, order) \
>> + (!!thp_vma_allowable_orders_inpf(vma, vm_flags, BIT(order)))
>> +
>> +#define thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, enforce_sysfs) \
>> + (!!thp_vma_allowable_orders(vma, vm_flags, false, false, enforce_sysfs, BIT(PMD_ORDER)))
>
> nit: Personally I'd leave the order as an argument rather than encoding it in
> the name. It's likely that khugepaged will grow support for non-PMD-size
> collapse in future. The first part of the name "thp_vma_allowable_order" is then
> consistent and easy to search for all variants. And perhaps "inkhuge" is more
> precise?
Sure, thp_vma_allowable_order_inkhuge(vma, vm_flags, enforce_sysfs, order),
maybe add thp_vma_allowable_orders_inkhuge() like inpf in future.
Thanks.
>
>> +
>>
>> #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
>> #define HPAGE_PMD_SHIFT PMD_SHIFT
>> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
>> index 2f73d2aa9ae8..5a27dccfda02 100644
>> --- a/mm/khugepaged.c
>> +++ b/mm/khugepaged.c
>> @@ -453,8 +453,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
>> {
>> if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
>> hugepage_flags_enabled()) {
>> - if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
>> - PMD_ORDER))
>> + if (thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, true))
>> __khugepaged_enter(vma->vm_mm);
>> }
>> }
>> @@ -909,15 +908,15 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
>>
>> if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
>> return SCAN_ADDRESS_RANGE;
>> - if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
>> - cc->is_khugepaged, PMD_ORDER))
>> + if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags,
>> + cc->is_khugepaged))
>> return SCAN_VMA_CHECK;
>> /*
>> * Anon VMA expected, the address may be unmapped then
>> * remapped to file after khugepaged reaquired the mmap_lock.
>> *
>> - * thp_vma_allowable_order may return true for qualified file
>> - * vmas.
>> + * thp_vma_allowable_pmd_order_inhuge may return true for
>> + * qualified file vmas.
>> */
>> if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
>> return SCAN_PAGE_ANON;
>> @@ -1493,8 +1492,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
>> * and map it by a PMD, regardless of sysfs THP settings. As such, let's
>> * analogously elide sysfs THP settings here.
>> */
>> - if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
>> - PMD_ORDER))
>> + if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, false))
>> return SCAN_VMA_CHECK;
>>
>> /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
>> @@ -2355,8 +2353,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
>> progress++;
>> break;
>> }
>> - if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
>> - true, PMD_ORDER)) {
>> + if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, true)) {
>> skip:
>> progress++;
>> continue;
>> @@ -2693,8 +2690,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
>>
>> *prev = vma;
>>
>> - if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
>> - PMD_ORDER))
>> + if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, false))
>> return -EINVAL;
>>
>> cc = kmalloc(sizeof(*cc), GFP_KERNEL);
>> diff --git a/mm/memory.c b/mm/memory.c
>> index 09ed76e5b8c0..8507bfda461a 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -4329,8 +4329,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
>> * for this vma. Then filter out the orders that can't be allocated over
>> * the faulting address and still be fully contained in the vma.
>> */
>> - orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
>> - BIT(PMD_ORDER) - 1);
>> + orders = thp_vma_allowable_orders_inpf(vma, vma->vm_flags,
>> + BIT(PMD_ORDER) - 1);
>> orders = thp_vma_suitable_orders(vma, vmf->address, orders);
>>
>> if (!orders)
>> @@ -5433,7 +5433,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
>> return VM_FAULT_OOM;
>> retry_pud:
>> if (pud_none(*vmf.pud) &&
>> - thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
>> + thp_vma_allowable_order_inpf(vma, vm_flags, PUD_ORDER)) {
>> ret = create_huge_pud(&vmf);
>> if (!(ret & VM_FAULT_FALLBACK))
>> return ret;
>> @@ -5467,7 +5467,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
>> goto retry_pud;
>>
>> if (pmd_none(*vmf.pmd) &&
>> - thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
>> + thp_vma_allowable_order_inpf(vma, vm_flags, PMD_ORDER)) {
>> ret = create_huge_pmd(&vmf);
>> if (!(ret & VM_FAULT_FALLBACK))
>> return ret;
>
^ permalink raw reply [flat|nested] 6+ messages in thread* Re: [PATCH] mm: add more readable thp_vma_allowable_order_foo()
2024-04-24 14:05 ` Ryan Roberts
2024-04-24 14:11 ` Kefeng Wang
@ 2024-04-24 14:58 ` David Hildenbrand
2024-04-25 1:09 ` Kefeng Wang
1 sibling, 1 reply; 6+ messages in thread
From: David Hildenbrand @ 2024-04-24 14:58 UTC (permalink / raw)
To: Ryan Roberts, Kefeng Wang, Andrew Morton; +Cc: linux-mm
On 24.04.24 16:05, Ryan Roberts wrote:
> On 24/04/2024 15:07, Kefeng Wang wrote:
>> There are too many bool arguments in thp_vma_allowable_orders(), adding
>> some more readable thp_vma_allowable_order_foo(),
>>
>> thp_vma_allowable_orders_insmaps() is used in samps
>> thp_vma_allowable_order[s]_inpf() is used in page fault
>> thp_vma_allowable_pmd_order_inhuge is used in khugepaged scan and madvise
>>
>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
>
> Just one nit below. With that addressed:
>
> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
>
>> ---
>> fs/proc/task_mmu.c | 3 +--
>> include/linux/huge_mm.h | 14 ++++++++++++--
>> mm/khugepaged.c | 20 ++++++++------------
>> mm/memory.c | 8 ++++----
>> 4 files changed, 25 insertions(+), 20 deletions(-)
>>
>> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
>> index f4259b7edfde..1136aa97f143 100644
>> --- a/fs/proc/task_mmu.c
>> +++ b/fs/proc/task_mmu.c
>> @@ -871,8 +871,7 @@ static int show_smap(struct seq_file *m, void *v)
>> __show_smap(m, &mss, false);
>>
>> seq_printf(m, "THPeligible: %8u\n",
>> - !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
>> - true, THP_ORDERS_ALL));
>> + thp_vma_allowable_orders_insmaps(vma, vma->vm_flags));
>>
>> if (arch_pkeys_enabled())
>> seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
>> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
>> index 56c7ea73090b..345cf394480b 100644
>> --- a/include/linux/huge_mm.h
>> +++ b/include/linux/huge_mm.h
>> @@ -83,8 +83,18 @@ extern struct kobj_attribute shmem_enabled_attr;
>> */
>> #define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
>>
>> -#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
>> - (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
>> +#define thp_vma_allowable_orders_insmaps(vma, vm_flags) \
>> + (!!thp_vma_allowable_orders(vma, vm_flags, true, false, true, THP_ORDERS_ALL))
>> +
>> +#define thp_vma_allowable_orders_inpf(vma, vm_flags, orders) \
>> + (!!thp_vma_allowable_orders(vma, vm_flags, false, true, true, orders))
>> +
>> +#define thp_vma_allowable_order_inpf(vma, vm_flags, order) \
>> + (!!thp_vma_allowable_orders_inpf(vma, vm_flags, BIT(order)))
>> +
>> +#define thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, enforce_sysfs) \
>> + (!!thp_vma_allowable_orders(vma, vm_flags, false, false, enforce_sysfs, BIT(PMD_ORDER)))
>
> nit: Personally I'd leave the order as an argument rather than encoding it in
> the name. It's likely that khugepaged will grow support for non-PMD-size
Agreed.
> collapse in future. The first part of the name "thp_vma_allowable_order" is then
> consistent and easy to search for all variants. And perhaps "inkhuge" is more
> precise?
"_khugepaged" or something else that people can actually parse and
understand.
--
Cheers,
David / dhildenb
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] mm: add more readable thp_vma_allowable_order_foo()
2024-04-24 14:58 ` David Hildenbrand
@ 2024-04-25 1:09 ` Kefeng Wang
0 siblings, 0 replies; 6+ messages in thread
From: Kefeng Wang @ 2024-04-25 1:09 UTC (permalink / raw)
To: David Hildenbrand, Ryan Roberts, Andrew Morton; +Cc: linux-mm
On 2024/4/24 22:58, David Hildenbrand wrote:
> On 24.04.24 16:05, Ryan Roberts wrote:
>> On 24/04/2024 15:07, Kefeng Wang wrote:
>>> There are too many bool arguments in thp_vma_allowable_orders(), adding
>>> some more readable thp_vma_allowable_order_foo(),
>>>
>>> thp_vma_allowable_orders_insmaps() is used in samps
>>> thp_vma_allowable_order[s]_inpf() is used in page fault
>>> thp_vma_allowable_pmd_order_inhuge is used in khugepaged scan and
>>> madvise
>>>
>>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
>>
>> Just one nit below. With that addressed:
>>
>> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
>>
...
>>
>> nit: Personally I'd leave the order as an argument rather than
>> encoding it in
>> the name. It's likely that khugepaged will grow support for non-PMD-size
>
> Agreed.
>
>> collapse in future. The first part of the name
>> "thp_vma_allowable_order" is then
>> consistent and easy to search for all variants. And perhaps "inkhuge"
>> is more
>> precise?
>
> "_khugepaged" or something else that people can actually parse and
> understand.
>
Try this before, _inkhugepaged is a bit long, so choose inkhuge, but I
can't find a better name, as you and Ryan suggested,
thp_vma_allowable_orders_smaps()
thp_vma_allowable_order[s]_pf()
thp_vma_allowable_order_khugepaged()
Thanks.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] mm: add more readable thp_vma_allowable_order_foo()
2024-04-24 14:07 [PATCH] mm: add more readable thp_vma_allowable_order_foo() Kefeng Wang
2024-04-24 14:05 ` Ryan Roberts
@ 2024-04-24 14:57 ` David Hildenbrand
1 sibling, 0 replies; 6+ messages in thread
From: David Hildenbrand @ 2024-04-24 14:57 UTC (permalink / raw)
To: Kefeng Wang, Andrew Morton; +Cc: Ryan Roberts, linux-mm
On 24.04.24 16:07, Kefeng Wang wrote:
> There are too many bool arguments in thp_vma_allowable_orders(), adding
> some more readable thp_vma_allowable_order_foo(),
Good, I had something similar in mind when talking about that with Ryan
in the past (during mTHP development).
>
> thp_vma_allowable_orders_insmaps() is used in samps
> thp_vma_allowable_order[s]_inpf() is used in page fault
> thp_vma_allowable_pmd_order_inhuge is used in khugepaged scan and madvise
I really don't like the "_in" stuff. And "inhuge" doesn't add any clarity.
What about
thp_vma_allowable_orders_smaps()
thp_vma_allowable_order[s]_pf()
thp_vma_allowable_pmd_order()
--
Cheers,
David / dhildenb
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2024-04-25 1:09 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-04-24 14:07 [PATCH] mm: add more readable thp_vma_allowable_order_foo() Kefeng Wang
2024-04-24 14:05 ` Ryan Roberts
2024-04-24 14:11 ` Kefeng Wang
2024-04-24 14:58 ` David Hildenbrand
2024-04-25 1:09 ` Kefeng Wang
2024-04-24 14:57 ` David Hildenbrand
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox