linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Dev Jain <dev.jain@arm.com>
To: Shivank Garg <shivankg@amd.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	David Hildenbrand <david@kernel.org>,
	Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Zi Yan <ziy@nvidia.com>,
	Baolin Wang <baolin.wang@linux.alibaba.com>,
	"Liam R . Howlett" <Liam.Howlett@oracle.com>,
	Nico Pache <npache@redhat.com>,
	Ryan Roberts <ryan.roberts@arm.com>,
	Barry Song <baohua@kernel.org>, Lance Yang <lance.yang@linux.dev>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH V3 4/5] mm/khugepaged: use enum scan_result for result variables and return types
Date: Thu, 22 Jan 2026 14:49:17 +0530	[thread overview]
Message-ID: <5639693a-a89a-47c8-978e-1aab59268413@arm.com> (raw)
In-Reply-To: <20260118192253.9263-12-shivankg@amd.com>


On 19/01/26 12:52 am, Shivank Garg wrote:
> Convert result variables and return types from int to enum scan_result
> throughout khugepaged code. This improves type safety and code clarity
> by making the intent explicit.
>
> No functional change.
>
> Reviewed-by: Zi Yan <ziy@nvidia.com>
> Signed-off-by: Shivank Garg <shivankg@amd.com>

Reviewed-by: Dev Jain <dev.jain@arm.com>

> ---
>  mm/khugepaged.c | 99 +++++++++++++++++++++++--------------------------
>  1 file changed, 46 insertions(+), 53 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 17f3f0043368..1667abae6d8d 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -537,17 +537,16 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
>  	}
>  }
>  
> -static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
> -					unsigned long start_addr,
> -					pte_t *pte,
> -					struct collapse_control *cc,
> -					struct list_head *compound_pagelist)
> +static enum scan_result __collapse_huge_page_isolate(struct vm_area_struct *vma,
> +		unsigned long start_addr, pte_t *pte, struct collapse_control *cc,
> +		struct list_head *compound_pagelist)
>  {
>  	struct page *page = NULL;
>  	struct folio *folio = NULL;
>  	unsigned long addr = start_addr;
>  	pte_t *_pte;
> -	int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
> +	int none_or_zero = 0, shared = 0, referenced = 0;
> +	enum scan_result result = SCAN_FAIL;
>  
>  	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
>  	     _pte++, addr += PAGE_SIZE) {
> @@ -780,13 +779,13 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
>   * @ptl: lock on raw pages' PTEs
>   * @compound_pagelist: list that stores compound pages
>   */
> -static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
> +static enum scan_result __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
>  		pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
>  		unsigned long address, spinlock_t *ptl,
>  		struct list_head *compound_pagelist)
>  {
>  	unsigned int i;
> -	int result = SCAN_SUCCEED;
> +	enum scan_result result = SCAN_SUCCEED;
>  
>  	/*
>  	 * Copying pages' contents is subject to memory poison at any iteration.
> @@ -898,10 +897,8 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
>   * Returns enum scan_result value.
>   */
>  
> -static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
> -				   bool expect_anon,
> -				   struct vm_area_struct **vmap,
> -				   struct collapse_control *cc)
> +static enum scan_result hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
> +		bool expect_anon, struct vm_area_struct **vmap, struct collapse_control *cc)
>  {
>  	struct vm_area_struct *vma;
>  	enum tva_type type = cc->is_khugepaged ? TVA_KHUGEPAGED :
> @@ -930,7 +927,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
>  	return SCAN_SUCCEED;
>  }
>  
> -static inline int check_pmd_state(pmd_t *pmd)
> +static inline enum scan_result check_pmd_state(pmd_t *pmd)
>  {
>  	pmd_t pmde = pmdp_get_lockless(pmd);
>  
> @@ -953,9 +950,8 @@ static inline int check_pmd_state(pmd_t *pmd)
>  	return SCAN_SUCCEED;
>  }
>  
> -static int find_pmd_or_thp_or_none(struct mm_struct *mm,
> -				   unsigned long address,
> -				   pmd_t **pmd)
> +static enum scan_result find_pmd_or_thp_or_none(struct mm_struct *mm,
> +		unsigned long address, pmd_t **pmd)
>  {
>  	*pmd = mm_find_pmd(mm, address);
>  	if (!*pmd)
> @@ -964,12 +960,11 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
>  	return check_pmd_state(*pmd);
>  }
>  
> -static int check_pmd_still_valid(struct mm_struct *mm,
> -				 unsigned long address,
> -				 pmd_t *pmd)
> +static enum scan_result check_pmd_still_valid(struct mm_struct *mm,
> +		unsigned long address, pmd_t *pmd)
>  {
>  	pmd_t *new_pmd;
> -	int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
> +	enum scan_result result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
>  
>  	if (result != SCAN_SUCCEED)
>  		return result;
> @@ -985,15 +980,14 @@ static int check_pmd_still_valid(struct mm_struct *mm,
>   * Called and returns without pte mapped or spinlocks held.
>   * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
>   */
> -static int __collapse_huge_page_swapin(struct mm_struct *mm,
> -				       struct vm_area_struct *vma,
> -				       unsigned long start_addr, pmd_t *pmd,
> -				       int referenced)
> +static enum scan_result __collapse_huge_page_swapin(struct mm_struct *mm,
> +		struct vm_area_struct *vma, unsigned long start_addr, pmd_t *pmd,
> +		int referenced)
>  {
>  	int swapped_in = 0;
>  	vm_fault_t ret = 0;
>  	unsigned long addr, end = start_addr + (HPAGE_PMD_NR * PAGE_SIZE);
> -	int result;
> +	enum scan_result result;
>  	pte_t *pte = NULL;
>  	spinlock_t *ptl;
>  
> @@ -1062,8 +1056,8 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
>  	return result;
>  }
>  
> -static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
> -			      struct collapse_control *cc)
> +static enum scan_result alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
> +		struct collapse_control *cc)
>  {
>  	gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
>  		     GFP_TRANSHUGE);
> @@ -1090,9 +1084,8 @@ static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
>  	return SCAN_SUCCEED;
>  }
>  
> -static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
> -			      int referenced, int unmapped,
> -			      struct collapse_control *cc)
> +static enum scan_result collapse_huge_page(struct mm_struct *mm, unsigned long address,
> +		int referenced, int unmapped, struct collapse_control *cc)
>  {
>  	LIST_HEAD(compound_pagelist);
>  	pmd_t *pmd, _pmd;
> @@ -1100,7 +1093,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>  	pgtable_t pgtable;
>  	struct folio *folio;
>  	spinlock_t *pmd_ptl, *pte_ptl;
> -	int result = SCAN_FAIL;
> +	enum scan_result result = SCAN_FAIL;
>  	struct vm_area_struct *vma;
>  	struct mmu_notifier_range range;
>  
> @@ -1246,15 +1239,14 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>  	return result;
>  }
>  
> -static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> -				   struct vm_area_struct *vma,
> -				   unsigned long start_addr, bool *mmap_locked,
> -				   struct collapse_control *cc)
> +static enum scan_result hpage_collapse_scan_pmd(struct mm_struct *mm,
> +		struct vm_area_struct *vma, unsigned long start_addr, bool *mmap_locked,
> +		struct collapse_control *cc)
>  {
>  	pmd_t *pmd;
>  	pte_t *pte, *_pte;
> -	int result = SCAN_FAIL, referenced = 0;
> -	int none_or_zero = 0, shared = 0;
> +	int none_or_zero = 0, shared = 0, referenced = 0;
> +	enum scan_result result = SCAN_FAIL;
>  	struct page *page = NULL;
>  	struct folio *folio = NULL;
>  	unsigned long addr;
> @@ -1441,8 +1433,8 @@ static void collect_mm_slot(struct mm_slot *slot)
>  }
>  
>  /* folio must be locked, and mmap_lock must be held */
> -static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
> -			pmd_t *pmdp, struct folio *folio, struct page *page)
> +static enum scan_result set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
> +		pmd_t *pmdp, struct folio *folio, struct page *page)
>  {
>  	struct mm_struct *mm = vma->vm_mm;
>  	struct vm_fault vmf = {
> @@ -1477,10 +1469,11 @@ static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
>  	return SCAN_SUCCEED;
>  }
>  
> -static int try_collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
> +static enum scan_result try_collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
>  		bool install_pmd)
>  {
> -	int nr_mapped_ptes = 0, result = SCAN_FAIL;
> +	enum scan_result result = SCAN_FAIL;
> +	int nr_mapped_ptes = 0;
>  	unsigned int nr_batch_ptes;
>  	struct mmu_notifier_range range;
>  	bool notified = false;
> @@ -1862,9 +1855,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
>   *    + unlock old pages
>   *    + unlock and free huge page;
>   */
> -static int collapse_file(struct mm_struct *mm, unsigned long addr,
> -			 struct file *file, pgoff_t start,
> -			 struct collapse_control *cc)
> +static enum scan_result collapse_file(struct mm_struct *mm, unsigned long addr,
> +		struct file *file, pgoff_t start, struct collapse_control *cc)
>  {
>  	struct address_space *mapping = file->f_mapping;
>  	struct page *dst;
> @@ -1872,7 +1864,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
>  	pgoff_t index = 0, end = start + HPAGE_PMD_NR;
>  	LIST_HEAD(pagelist);
>  	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
> -	int nr_none = 0, result = SCAN_SUCCEED;
> +	enum scan_result result = SCAN_SUCCEED;
> +	int nr_none = 0;
>  	bool is_shmem = shmem_file(file);
>  
>  	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
> @@ -2293,16 +2286,15 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
>  	return result;
>  }
>  
> -static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
> -				    struct file *file, pgoff_t start,
> -				    struct collapse_control *cc)
> +static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
> +		struct file *file, pgoff_t start, struct collapse_control *cc)
>  {
>  	struct folio *folio = NULL;
>  	struct address_space *mapping = file->f_mapping;
>  	XA_STATE(xas, &mapping->i_pages, start);
>  	int present, swap;
>  	int node = NUMA_NO_NODE;
> -	int result = SCAN_SUCCEED;
> +	enum scan_result result = SCAN_SUCCEED;
>  
>  	present = 0;
>  	swap = 0;
> @@ -2400,7 +2392,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
>  	return result;
>  }
>  
> -static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> +static unsigned int khugepaged_scan_mm_slot(unsigned int pages, enum scan_result *result,
>  					    struct collapse_control *cc)
>  	__releases(&khugepaged_mm_lock)
>  	__acquires(&khugepaged_mm_lock)
> @@ -2562,7 +2554,7 @@ static void khugepaged_do_scan(struct collapse_control *cc)
>  	unsigned int progress = 0, pass_through_head = 0;
>  	unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
>  	bool wait = true;
> -	int result = SCAN_SUCCEED;
> +	enum scan_result result = SCAN_SUCCEED;
>  
>  	lru_add_drain_all();
>  
> @@ -2775,7 +2767,8 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
>  	struct collapse_control *cc;
>  	struct mm_struct *mm = vma->vm_mm;
>  	unsigned long hstart, hend, addr;
> -	int thps = 0, last_fail = SCAN_FAIL;
> +	enum scan_result last_fail = SCAN_FAIL;
> +	int thps = 0;
>  	bool mmap_locked = true;
>  
>  	BUG_ON(vma->vm_start > start);
> @@ -2796,7 +2789,7 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
>  	hend = end & HPAGE_PMD_MASK;
>  
>  	for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
> -		int result = SCAN_FAIL;
> +		enum scan_result result = SCAN_FAIL;
>  		bool triggered_wb = false;
>  
>  retry:


  parent reply	other threads:[~2026-01-22  9:19 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-01-18 19:22 [PATCH V3 0/5] mm/khugepaged: cleanups and scan limit fix Shivank Garg
2026-01-18 19:22 ` [PATCH V3 1/5] mm/khugepaged: remove unnecessary goto 'skip' label Shivank Garg
2026-01-22  7:04   ` Dev Jain
2026-01-22 11:56   ` Nico Pache
2026-01-18 19:22 ` [PATCH V3 2/5] mm/khugepaged: count small VMAs towards scan limit Shivank Garg
2026-01-22  7:32   ` Dev Jain
2026-01-22  8:44     ` Lance Yang
2026-01-22 12:26       ` Garg, Shivank
2026-01-23 10:42         ` Garg, Shivank
2026-01-23 15:37           ` Andrew Morton
2026-01-23 20:07             ` Garg, Shivank
2026-01-18 19:22 ` [PATCH V3 3/5] mm/khugepaged: change collapse_pte_mapped_thp() to return void Shivank Garg
2026-01-22 12:17   ` Nico Pache
2026-01-18 19:22 ` [PATCH V3 4/5] mm/khugepaged: use enum scan_result for result variables and return types Shivank Garg
2026-01-19 10:24   ` David Hildenbrand (Red Hat)
2026-01-22  9:19   ` Dev Jain [this message]
2026-01-22 12:14   ` Nico Pache
2026-01-18 19:23 ` [PATCH V3 5/5] mm/khugepaged: make khugepaged_collapse_control static Shivank Garg
2026-01-22  9:28   ` Dev Jain
2026-01-23  7:48     ` Dev Jain
2026-01-23  9:33       ` Garg, Shivank
2026-01-24  1:21         ` Andrew Morton
2026-01-24  3:02           ` Andrew Morton
2026-01-24  9:02             ` Lorenzo Stoakes
2026-01-24  9:01         ` Lorenzo Stoakes
2026-01-24 10:54           ` Dev Jain
2026-01-24 11:40             ` Lorenzo Stoakes
2026-01-24 11:56               ` Dev Jain
2026-01-24 18:37               ` Garg, Shivank
2026-01-18 20:34 ` [PATCH V3 0/5] mm/khugepaged: cleanups and scan limit fix Andrew Morton
2026-01-19  0:17   ` Zi Yan
2026-01-19  5:50   ` Garg, Shivank

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5639693a-a89a-47c8-978e-1aab59268413@arm.com \
    --to=dev.jain@arm.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=baohua@kernel.org \
    --cc=baolin.wang@linux.alibaba.com \
    --cc=david@kernel.org \
    --cc=lance.yang@linux.dev \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lorenzo.stoakes@oracle.com \
    --cc=npache@redhat.com \
    --cc=ryan.roberts@arm.com \
    --cc=shivankg@amd.com \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox