From: Muhammad Usama Anjum <MUsamaAnjum@gmail.com>
To: Wei Yang <richard.weiyang@gmail.com>,
akpm@linux-foundation.org, david@redhat.com,
lorenzo.stoakes@oracle.com, ziy@nvidia.com,
baolin.wang@linux.alibaba.com, Liam.Howlett@oracle.com,
npache@redhat.com, ryan.roberts@arm.com, dev.jain@arm.com,
baohua@kernel.org, lance.yang@linux.dev, xu.xin16@zte.com.cn,
chengming.zhou@linux.dev
Cc: linux-mm@kvack.org, Kiryl Shutsemau <kirill@shutemov.name>,
SeongJae Park <sj@kernel.org>
Subject: Re: [Patch v4 2/2] mm/khugepaged: remove definition of struct khugepaged_mm_slot
Date: Sat, 27 Sep 2025 13:40:21 +0500 [thread overview]
Message-ID: <648726ab-bd57-4ed3-bcaa-0d0372264728@gmail.com> (raw)
In-Reply-To: <20250927004539.19308-3-richard.weiyang@gmail.com>
On 9/27/25 5:45 AM, Wei Yang wrote:
> Current code calls mm_slot_entry() even when we don't have a valid slot,
> which is not future proof. Currently, this is not a problem because
> "slot" is the first member in struct khugepaged_mm_slot.
>
> While struct khugepaged_mm_slot is just a wrapper of struct mm_slot, there
> is no need to define it.
>
> Remove the definition of struct khugepaged_mm_slot, so there is not chance
> to miss use mm_slot_entry().
>
> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
> Acked-by: Lance Yang <lance.yang@linux.dev>
> Reviewed-by: Dev Jain <dev.jain@arm.com>
> Cc: Lance Yang <lance.yang@linux.dev>
> Cc: David Hildenbrand <david@redhat.com>
> Cc: Dev Jain <dev.jain@arm.com>
> Cc: Kiryl Shutsemau <kirill@shutemov.name>
> Cc: xu xin <xu.xin16@zte.com.cn>
> Cc: SeongJae Park <sj@kernel.org>
> Cc: Nico Pache <npache@redhat.com>
Acked-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
>
> ---
> v3:
> * adjust changelog
> * rename the slab cache to "mm_slot"
> v2:
> * fix a PF reported by SeongJae, where slot is changed to next one
> ---
> mm/khugepaged.c | 55 ++++++++++++++++---------------------------------
> 1 file changed, 18 insertions(+), 37 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 204ce3059267..67540078083b 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -103,14 +103,6 @@ struct collapse_control {
> nodemask_t alloc_nmask;
> };
>
> -/**
> - * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
> - * @slot: hash lookup from mm to mm_slot
> - */
> -struct khugepaged_mm_slot {
> - struct mm_slot slot;
> -};
> -
> /**
> * struct khugepaged_scan - cursor for scanning
> * @mm_head: the head of the mm list to scan
> @@ -121,7 +113,7 @@ struct khugepaged_mm_slot {
> */
> struct khugepaged_scan {
> struct list_head mm_head;
> - struct khugepaged_mm_slot *mm_slot;
> + struct mm_slot *mm_slot;
> unsigned long address;
> };
>
> @@ -384,7 +376,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
>
> int __init khugepaged_init(void)
> {
> - mm_slot_cache = KMEM_CACHE(khugepaged_mm_slot, 0);
> + mm_slot_cache = KMEM_CACHE(mm_slot, 0);
> if (!mm_slot_cache)
> return -ENOMEM;
>
> @@ -438,7 +430,6 @@ static bool hugepage_pmd_enabled(void)
>
> void __khugepaged_enter(struct mm_struct *mm)
> {
> - struct khugepaged_mm_slot *mm_slot;
> struct mm_slot *slot;
> int wakeup;
>
> @@ -447,12 +438,10 @@ void __khugepaged_enter(struct mm_struct *mm)
> if (unlikely(mm_flags_test_and_set(MMF_VM_HUGEPAGE, mm)))
> return;
>
> - mm_slot = mm_slot_alloc(mm_slot_cache);
> - if (!mm_slot)
> + slot = mm_slot_alloc(mm_slot_cache);
> + if (!slot)
> return;
>
> - slot = &mm_slot->slot;
> -
> spin_lock(&khugepaged_mm_lock);
> mm_slot_insert(mm_slots_hash, mm, slot);
> /*
> @@ -480,14 +469,12 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
>
> void __khugepaged_exit(struct mm_struct *mm)
> {
> - struct khugepaged_mm_slot *mm_slot;
> struct mm_slot *slot;
> int free = 0;
>
> spin_lock(&khugepaged_mm_lock);
> slot = mm_slot_lookup(mm_slots_hash, mm);
> - mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
> - if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
> + if (slot && khugepaged_scan.mm_slot != slot) {
> hash_del(&slot->hash);
> list_del(&slot->mm_node);
> free = 1;
> @@ -496,9 +483,9 @@ void __khugepaged_exit(struct mm_struct *mm)
>
> if (free) {
> mm_flags_clear(MMF_VM_HUGEPAGE, mm);
> - mm_slot_free(mm_slot_cache, mm_slot);
> + mm_slot_free(mm_slot_cache, slot);
> mmdrop(mm);
> - } else if (mm_slot) {
> + } else if (slot) {
> /*
> * This is required to serialize against
> * hpage_collapse_test_exit() (which is guaranteed to run
> @@ -1432,9 +1419,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> return result;
> }
>
> -static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
> +static void collect_mm_slot(struct mm_slot *slot)
> {
> - struct mm_slot *slot = &mm_slot->slot;
> struct mm_struct *mm = slot->mm;
>
> lockdep_assert_held(&khugepaged_mm_lock);
> @@ -1451,7 +1437,7 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
> */
>
> /* khugepaged_mm_lock actually not necessary for the below */
> - mm_slot_free(mm_slot_cache, mm_slot);
> + mm_slot_free(mm_slot_cache, slot);
> mmdrop(mm);
> }
> }
> @@ -2394,7 +2380,6 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> __acquires(&khugepaged_mm_lock)
> {
> struct vma_iterator vmi;
> - struct khugepaged_mm_slot *mm_slot;
> struct mm_slot *slot;
> struct mm_struct *mm;
> struct vm_area_struct *vma;
> @@ -2405,14 +2390,12 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> *result = SCAN_FAIL;
>
> if (khugepaged_scan.mm_slot) {
> - mm_slot = khugepaged_scan.mm_slot;
> - slot = &mm_slot->slot;
> + slot = khugepaged_scan.mm_slot;
> } else {
> slot = list_first_entry(&khugepaged_scan.mm_head,
> struct mm_slot, mm_node);
> - mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
> khugepaged_scan.address = 0;
> - khugepaged_scan.mm_slot = mm_slot;
> + khugepaged_scan.mm_slot = slot;
> }
> spin_unlock(&khugepaged_mm_lock);
>
> @@ -2510,7 +2493,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> breakouterloop_mmap_lock:
>
> spin_lock(&khugepaged_mm_lock);
> - VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
> + VM_BUG_ON(khugepaged_scan.mm_slot != slot);
> /*
> * Release the current mm_slot if this mm is about to die, or
> * if we scanned all vmas of this mm.
> @@ -2522,16 +2505,14 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> * mm_slot not pointing to the exiting mm.
> */
> if (!list_is_last(&slot->mm_node, &khugepaged_scan.mm_head)) {
> - slot = list_next_entry(slot, mm_node);
> - khugepaged_scan.mm_slot =
> - mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
> + khugepaged_scan.mm_slot = list_next_entry(slot, mm_node);
> khugepaged_scan.address = 0;
> } else {
> khugepaged_scan.mm_slot = NULL;
> khugepaged_full_scans++;
> }
>
> - collect_mm_slot(mm_slot);
> + collect_mm_slot(slot);
> }
>
> return progress;
> @@ -2618,7 +2599,7 @@ static void khugepaged_wait_work(void)
>
> static int khugepaged(void *none)
> {
> - struct khugepaged_mm_slot *mm_slot;
> + struct mm_slot *slot;
>
> set_freezable();
> set_user_nice(current, MAX_NICE);
> @@ -2629,10 +2610,10 @@ static int khugepaged(void *none)
> }
>
> spin_lock(&khugepaged_mm_lock);
> - mm_slot = khugepaged_scan.mm_slot;
> + slot = khugepaged_scan.mm_slot;
> khugepaged_scan.mm_slot = NULL;
> - if (mm_slot)
> - collect_mm_slot(mm_slot);
> + if (slot)
> + collect_mm_slot(slot);
> spin_unlock(&khugepaged_mm_lock);
> return 0;
> }
next prev parent reply other threads:[~2025-09-27 8:40 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-27 0:45 [Patch v4 0/2] mm_slot: fix the usage of mm_slot_entry() Wei Yang
2025-09-27 0:45 ` [Patch v4 1/2] mm/ksm: don't call mm_slot_entry() when the slot is NULL Wei Yang
2025-09-27 8:39 ` Muhammad Usama Anjum
2025-09-28 13:53 ` Dev Jain
2025-09-29 8:13 ` David Hildenbrand
2025-09-29 10:58 ` Kiryl Shutsemau
2025-09-29 15:14 ` Zi Yan
2025-09-27 0:45 ` [Patch v4 2/2] mm/khugepaged: remove definition of struct khugepaged_mm_slot Wei Yang
2025-09-27 8:40 ` Muhammad Usama Anjum [this message]
2025-09-29 8:13 ` David Hildenbrand
2025-09-29 15:16 ` Zi Yan
2025-09-30 6:01 ` Raghavendra K T
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=648726ab-bd57-4ed3-bcaa-0d0372264728@gmail.com \
--to=musamaanjum@gmail.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=baohua@kernel.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=chengming.zhou@linux.dev \
--cc=david@redhat.com \
--cc=dev.jain@arm.com \
--cc=kirill@shutemov.name \
--cc=lance.yang@linux.dev \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=npache@redhat.com \
--cc=richard.weiyang@gmail.com \
--cc=ryan.roberts@arm.com \
--cc=sj@kernel.org \
--cc=xu.xin16@zte.com.cn \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox