From: "Liam R. Howlett" <Liam.Howlett@oracle.com>
To: Nico Pache <npache@redhat.com>
Cc: linux-mm@kvack.org, linux-doc@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-trace-kernel@vger.kernel.org,
david@redhat.com, ziy@nvidia.com, baolin.wang@linux.alibaba.com,
lorenzo.stoakes@oracle.com, ryan.roberts@arm.com,
dev.jain@arm.com, corbet@lwn.net, rostedt@goodmis.org,
mhiramat@kernel.org, mathieu.desnoyers@efficios.com,
akpm@linux-foundation.org, baohua@kernel.org,
willy@infradead.org, peterx@redhat.com,
wangkefeng.wang@huawei.com, usamaarif642@gmail.com,
sunnanyong@huawei.com, vishal.moola@gmail.com,
thomas.hellstrom@linux.intel.com, yang@os.amperecomputing.com,
kirill.shutemov@linux.intel.com, aarcange@redhat.com,
raquini@redhat.com, anshuman.khandual@arm.com,
catalin.marinas@arm.com, tiwai@suse.de, will@kernel.org,
dave.hansen@linux.intel.com, jack@suse.cz, cl@gentwo.org,
jglisse@google.com, surenb@google.com, zokeefe@google.com,
hannes@cmpxchg.org, rientjes@google.com, mhocko@suse.com,
rdunlap@infradead.org
Subject: Re: [PATCH v7 01/12] khugepaged: rename hpage_collapse_* to khugepaged_*
Date: Fri, 16 May 2025 13:30:10 -0400 [thread overview]
Message-ID: <gsk47hv4pkqjstkb6mfiv2muon6yj3vp5rsho6rufq5qjfweow@scdzrvph5ah6> (raw)
In-Reply-To: <20250515032226.128900-2-npache@redhat.com>
* Nico Pache <npache@redhat.com> [250514 23:23]:
> functions in khugepaged.c use a mix of hpage_collapse and khugepaged
> as the function prefix.
>
> rename all of them to khugepaged to keep things consistent and slightly
> shorten the function names.
I don't like what was done here, we've lost the context of what these
functions are used for (collapse). Are they used for other things
besides collapse?
I'd rather drop the prefix entirely than drop collapse from them all.
They are all static, so do we really need khugepaged_ at the start of
every static function in khugepaged.c?
>
> Reviewed-by: Zi Yan <ziy@nvidia.com>
> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> Signed-off-by: Nico Pache <npache@redhat.com>
> ---
> mm/khugepaged.c | 42 +++++++++++++++++++++---------------------
> 1 file changed, 21 insertions(+), 21 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index cdf5a581368b..806bcd8c5185 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -402,14 +402,14 @@ void __init khugepaged_destroy(void)
> kmem_cache_destroy(mm_slot_cache);
> }
>
> -static inline int hpage_collapse_test_exit(struct mm_struct *mm)
> +static inline int khugepaged_test_exit(struct mm_struct *mm)
> {
> return atomic_read(&mm->mm_users) == 0;
> }
>
> -static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
> +static inline int khugepaged_test_exit_or_disable(struct mm_struct *mm)
> {
> - return hpage_collapse_test_exit(mm) ||
> + return khugepaged_test_exit(mm) ||
> test_bit(MMF_DISABLE_THP, &mm->flags);
> }
>
> @@ -444,7 +444,7 @@ void __khugepaged_enter(struct mm_struct *mm)
> int wakeup;
>
> /* __khugepaged_exit() must not run from under us */
> - VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
> + VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
> if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags)))
> return;
>
> @@ -503,7 +503,7 @@ void __khugepaged_exit(struct mm_struct *mm)
> } else if (mm_slot) {
> /*
> * This is required to serialize against
> - * hpage_collapse_test_exit() (which is guaranteed to run
> + * khugepaged_test_exit() (which is guaranteed to run
> * under mmap sem read mode). Stop here (after we return all
> * pagetables will be destroyed) until khugepaged has finished
> * working on the pagetables under the mmap_lock.
> @@ -851,7 +851,7 @@ struct collapse_control khugepaged_collapse_control = {
> .is_khugepaged = true,
> };
>
> -static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
> +static bool khugepaged_scan_abort(int nid, struct collapse_control *cc)
> {
> int i;
>
> @@ -886,7 +886,7 @@ static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
> }
>
> #ifdef CONFIG_NUMA
> -static int hpage_collapse_find_target_node(struct collapse_control *cc)
> +static int khugepaged_find_target_node(struct collapse_control *cc)
> {
> int nid, target_node = 0, max_value = 0;
>
> @@ -905,7 +905,7 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
> return target_node;
> }
> #else
> -static int hpage_collapse_find_target_node(struct collapse_control *cc)
> +static int khugepaged_find_target_node(struct collapse_control *cc)
> {
> return 0;
> }
> @@ -925,7 +925,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
> struct vm_area_struct *vma;
> unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
>
> - if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
> + if (unlikely(khugepaged_test_exit_or_disable(mm)))
> return SCAN_ANY_PROCESS;
>
> *vmap = vma = find_vma(mm, address);
> @@ -992,7 +992,7 @@ static int check_pmd_still_valid(struct mm_struct *mm,
>
> /*
> * Bring missing pages in from swap, to complete THP collapse.
> - * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
> + * Only done if khugepaged_scan_pmd believes it is worthwhile.
> *
> * Called and returns without pte mapped or spinlocks held.
> * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
> @@ -1078,7 +1078,7 @@ static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
> {
> gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
> GFP_TRANSHUGE);
> - int node = hpage_collapse_find_target_node(cc);
> + int node = khugepaged_find_target_node(cc);
> struct folio *folio;
>
> folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
> @@ -1264,7 +1264,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
> return result;
> }
>
> -static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> +static int khugepaged_scan_pmd(struct mm_struct *mm,
> struct vm_area_struct *vma,
> unsigned long address, bool *mmap_locked,
> struct collapse_control *cc)
> @@ -1378,7 +1378,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> * hit record.
> */
> node = folio_nid(folio);
> - if (hpage_collapse_scan_abort(node, cc)) {
> + if (khugepaged_scan_abort(node, cc)) {
> result = SCAN_SCAN_ABORT;
> goto out_unmap;
> }
> @@ -1447,7 +1447,7 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
>
> lockdep_assert_held(&khugepaged_mm_lock);
>
> - if (hpage_collapse_test_exit(mm)) {
> + if (khugepaged_test_exit(mm)) {
> /* free mm_slot */
> hash_del(&slot->hash);
> list_del(&slot->mm_node);
> @@ -1740,7 +1740,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
> if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED)
> continue;
>
> - if (hpage_collapse_test_exit(mm))
> + if (khugepaged_test_exit(mm))
> continue;
> /*
> * When a vma is registered with uffd-wp, we cannot recycle
> @@ -2262,7 +2262,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> return result;
> }
>
> -static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
> +static int khugepaged_scan_file(struct mm_struct *mm, unsigned long addr,
> struct file *file, pgoff_t start,
> struct collapse_control *cc)
> {
> @@ -2307,7 +2307,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
> }
>
> node = folio_nid(folio);
> - if (hpage_collapse_scan_abort(node, cc)) {
> + if (khugepaged_scan_abort(node, cc)) {
> result = SCAN_SCAN_ABORT;
> break;
> }
> @@ -2391,7 +2391,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> goto breakouterloop_mmap_lock;
>
> progress++;
> - if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
> + if (unlikely(khugepaged_test_exit_or_disable(mm)))
> goto breakouterloop;
>
> vma_iter_init(&vmi, mm, khugepaged_scan.address);
> @@ -2399,7 +2399,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> unsigned long hstart, hend;
>
> cond_resched();
> - if (unlikely(hpage_collapse_test_exit_or_disable(mm))) {
> + if (unlikely(khugepaged_test_exit_or_disable(mm))) {
> progress++;
> break;
> }
> @@ -2421,7 +2421,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> bool mmap_locked = true;
>
> cond_resched();
> - if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
> + if (unlikely(khugepaged_test_exit_or_disable(mm)))
> goto breakouterloop;
>
> VM_BUG_ON(khugepaged_scan.address < hstart ||
> @@ -2481,7 +2481,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> * Release the current mm_slot if this mm is about to die, or
> * if we scanned all vmas of this mm.
> */
> - if (hpage_collapse_test_exit(mm) || !vma) {
> + if (khugepaged_test_exit(mm) || !vma) {
> /*
> * Make sure that if mm_users is reaching zero while
> * khugepaged runs here, khugepaged_exit will find
> --
> 2.49.0
>
next prev parent reply other threads:[~2025-05-16 17:35 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-15 3:22 [PATCH v7 00/12] khugepaged: mTHP support Nico Pache
2025-05-15 3:22 ` [PATCH v7 01/12] khugepaged: rename hpage_collapse_* to khugepaged_* Nico Pache
2025-05-16 17:30 ` Liam R. Howlett [this message]
2025-06-29 6:48 ` Nico Pache
2025-05-15 3:22 ` [PATCH v7 02/12] introduce khugepaged_collapse_single_pmd to unify khugepaged and madvise_collapse Nico Pache
2025-05-15 5:50 ` Baolin Wang
2025-05-16 11:59 ` Nico Pache
2025-05-16 17:12 ` Liam R. Howlett
2025-07-02 0:00 ` Nico Pache
2025-05-15 3:22 ` [PATCH v7 03/12] khugepaged: generalize hugepage_vma_revalidate for mTHP support Nico Pache
2025-05-16 17:14 ` Liam R. Howlett
2025-06-29 6:52 ` Nico Pache
2025-05-23 6:55 ` Baolin Wang
2025-05-28 6:57 ` Dev Jain
2025-05-29 4:00 ` Nico Pache
2025-05-30 3:02 ` Baolin Wang
2025-05-15 3:22 ` [PATCH v7 04/12] khugepaged: generalize alloc_charge_folio() Nico Pache
2025-05-15 3:22 ` [PATCH v7 05/12] khugepaged: generalize __collapse_huge_page_* for mTHP support Nico Pache
2025-05-15 3:22 ` [PATCH v7 06/12] khugepaged: introduce khugepaged_scan_bitmap " Nico Pache
2025-05-16 3:20 ` Baolin Wang
2025-05-17 6:47 ` Nico Pache
2025-05-18 3:04 ` Liam R. Howlett
2025-05-20 10:09 ` Baolin Wang
2025-05-20 10:26 ` David Hildenbrand
2025-05-21 1:03 ` Baolin Wang
2025-05-21 10:23 ` Nico Pache
2025-05-22 9:39 ` Baolin Wang
2025-05-28 9:26 ` David Hildenbrand
2025-05-28 14:04 ` Baolin Wang
2025-05-29 4:02 ` Nico Pache
2025-05-29 8:27 ` Baolin Wang
2025-05-15 3:22 ` [PATCH v7 07/12] khugepaged: add " Nico Pache
2025-06-07 6:23 ` Dev Jain
2025-06-07 12:55 ` Nico Pache
2025-06-07 13:03 ` Nico Pache
2025-06-07 14:31 ` Dev Jain
2025-06-07 14:42 ` Dev Jain
2025-05-15 3:22 ` [PATCH v7 08/12] khugepaged: skip collapsing mTHP to smaller orders Nico Pache
2025-05-15 3:22 ` [PATCH v7 09/12] khugepaged: avoid unnecessary mTHP collapse attempts Nico Pache
2025-05-15 3:22 ` [PATCH v7 10/12] khugepaged: improve tracepoints for mTHP orders Nico Pache
2025-05-15 3:22 ` [PATCH v7 11/12] khugepaged: add per-order mTHP khugepaged stats Nico Pache
2025-05-15 3:22 ` [PATCH v7 12/12] Documentation: mm: update the admin guide for mTHP collapse Nico Pache
2025-05-15 4:40 ` Randy Dunlap
2025-06-07 6:44 ` Dev Jain
2025-06-07 12:57 ` Nico Pache
2025-06-07 14:34 ` Dev Jain
2025-06-08 19:50 ` Nico Pache
2025-06-09 3:06 ` Baolin Wang
2025-06-09 5:26 ` Dev Jain
2025-06-09 6:39 ` Baolin Wang
2025-06-09 5:56 ` Nico Pache
2025-05-28 12:31 ` [PATCH 1/2] mm: khugepaged: allow khugepaged to check all anonymous mTHP orders Baolin Wang
2025-05-28 12:31 ` [PATCH 2/2] mm: khugepaged: kick khugepaged for enabling none-PMD-sized mTHPs Baolin Wang
2025-05-28 12:39 ` [PATCH v7 00/12] khugepaged: mTHP support Baolin Wang
2025-05-29 3:52 ` Nico Pache
2025-06-16 3:51 ` Dev Jain
2025-06-16 15:51 ` Nico Pache
2025-06-16 16:35 ` Dev Jain
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=gsk47hv4pkqjstkb6mfiv2muon6yj3vp5rsho6rufq5qjfweow@scdzrvph5ah6 \
--to=liam.howlett@oracle.com \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=anshuman.khandual@arm.com \
--cc=baohua@kernel.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=catalin.marinas@arm.com \
--cc=cl@gentwo.org \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=david@redhat.com \
--cc=dev.jain@arm.com \
--cc=hannes@cmpxchg.org \
--cc=jack@suse.cz \
--cc=jglisse@google.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-trace-kernel@vger.kernel.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mathieu.desnoyers@efficios.com \
--cc=mhiramat@kernel.org \
--cc=mhocko@suse.com \
--cc=npache@redhat.com \
--cc=peterx@redhat.com \
--cc=raquini@redhat.com \
--cc=rdunlap@infradead.org \
--cc=rientjes@google.com \
--cc=rostedt@goodmis.org \
--cc=ryan.roberts@arm.com \
--cc=sunnanyong@huawei.com \
--cc=surenb@google.com \
--cc=thomas.hellstrom@linux.intel.com \
--cc=tiwai@suse.de \
--cc=usamaarif642@gmail.com \
--cc=vishal.moola@gmail.com \
--cc=wangkefeng.wang@huawei.com \
--cc=will@kernel.org \
--cc=willy@infradead.org \
--cc=yang@os.amperecomputing.com \
--cc=ziy@nvidia.com \
--cc=zokeefe@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox