linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm: initialize 'seq' in gup_fast to remove -Wmaybe-uninitialized warning
@ 2026-03-02 19:12 Alexey Suchkov
  2026-03-02 19:22 ` Matthew Wilcox
  2026-03-02 19:34 ` [PATCH] [PATCH v2] " Alexey Suchkov
  0 siblings, 2 replies; 9+ messages in thread
From: Alexey Suchkov @ 2026-03-02 19:12 UTC (permalink / raw)
  To: akpm; +Cc: linux-mm, linux-kernel, Alexey Suchkov

The local variable 'seq' in gup_fast (mm/gup.c) was declared
without initialization, which can trigger:

mm/gup.c:3165:20: warning: ‘seq’ may be used uninitialized [-Wmaybe-uninitialized]

Initialize 'seq' to 0. This does not change behavior, since
read_seqcount_retry() always writes to it before use.

Signed-off-by: Alexey Suchkov <dywoq.contact@gmail.com>
---
 mm/gup.c | 291 ++++++++++++++++++++++++++++---------------------------
 1 file changed, 146 insertions(+), 145 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index 8e7dc2c6e..2553e5110 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -55,15 +55,16 @@ static inline void sanity_check_pinned_pages(struct page **pages,
 
 		folio = page_folio(page);
 
-		if (is_zero_page(page) ||
-		    !folio_test_anon(folio))
+		if (is_zero_page(page) || !folio_test_anon(folio))
 			continue;
 		if (!folio_test_large(folio) || folio_test_hugetlb(folio))
-			VM_WARN_ON_ONCE_FOLIO(!PageAnonExclusive(&folio->page), folio);
+			VM_WARN_ON_ONCE_FOLIO(!PageAnonExclusive(&folio->page),
+					      folio);
 		else
 			/* Either a PTE-mapped or a PMD-mapped THP. */
 			VM_WARN_ON_ONCE_PAGE(!PageAnonExclusive(&folio->page) &&
-					     !PageAnonExclusive(page), page);
+						     !PageAnonExclusive(page),
+					     page);
 	}
 }
 
@@ -230,7 +231,9 @@ void folio_add_pin(struct folio *folio)
 }
 
 static inline struct folio *gup_folio_range_next(struct page *start,
-		unsigned long npages, unsigned long i, unsigned int *ntails)
+						 unsigned long npages,
+						 unsigned long i,
+						 unsigned int *ntails)
 {
 	struct page *next = start + i;
 	struct folio *folio = page_folio(next);
@@ -245,7 +248,9 @@ static inline struct folio *gup_folio_range_next(struct page *start,
 }
 
 static inline struct folio *gup_folio_next(struct page **list,
-		unsigned long npages, unsigned long i, unsigned int *ntails)
+					   unsigned long npages,
+					   unsigned long i,
+					   unsigned int *ntails)
 {
 	struct folio *folio = page_folio(list[i]);
 	unsigned int nr;
@@ -566,8 +571,7 @@ static struct folio *try_grab_folio_fast(struct page *page, int refs,
 	if (folio_has_pincount(folio))
 		atomic_add(refs, &folio->_pincount);
 	else
-		folio_ref_add(folio,
-				refs * (GUP_PIN_COUNTING_BIAS - 1));
+		folio_ref_add(folio, refs * (GUP_PIN_COUNTING_BIAS - 1));
 	/*
 	 * Adjust the pincount before re-checking the PTE for changes.
 	 * This is essentially a smp_mb() and is paired with a memory
@@ -579,11 +583,12 @@ static struct folio *try_grab_folio_fast(struct page *page, int refs,
 
 	return folio;
 }
-#endif	/* CONFIG_HAVE_GUP_FAST */
+#endif /* CONFIG_HAVE_GUP_FAST */
 
 /* Common code for can_follow_write_* */
 static inline bool can_follow_write_common(struct page *page,
-		struct vm_area_struct *vma, unsigned int flags)
+					   struct vm_area_struct *vma,
+					   unsigned int flags)
 {
 	/* Maybe FOLL_FORCE is set to override it? */
 	if (!(flags & FOLL_FORCE))
@@ -647,8 +652,8 @@ static inline bool can_follow_write_pud(pud_t pud, struct page *page,
 }
 
 static struct page *follow_huge_pud(struct vm_area_struct *vma,
-				    unsigned long addr, pud_t *pudp,
-				    int flags, unsigned long *page_mask)
+				    unsigned long addr, pud_t *pudp, int flags,
+				    unsigned long *page_mask)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct page *page;
@@ -726,7 +731,8 @@ static struct page *follow_huge_pmd(struct vm_area_struct *vma,
 		return ERR_PTR(-EMLINK);
 
 	VM_WARN_ON_ONCE_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
-			     !PageAnonExclusive(page), page);
+				     !PageAnonExclusive(page),
+			     page);
 
 	ret = try_grab_folio(page_folio(page), 1, flags);
 	if (ret)
@@ -735,7 +741,7 @@ static struct page *follow_huge_pmd(struct vm_area_struct *vma,
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 	if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH))
 		touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
-#endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
 	*page_mask = HPAGE_PMD_NR - 1;
@@ -743,10 +749,10 @@ static struct page *follow_huge_pmd(struct vm_area_struct *vma,
 	return page;
 }
 
-#else  /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
+#else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
 static struct page *follow_huge_pud(struct vm_area_struct *vma,
-				    unsigned long addr, pud_t *pudp,
-				    int flags, unsigned long *page_mask)
+				    unsigned long addr, pud_t *pudp, int flags,
+				    unsigned long *page_mask)
 {
 	return NULL;
 }
@@ -758,10 +764,10 @@ static struct page *follow_huge_pmd(struct vm_area_struct *vma,
 {
 	return NULL;
 }
-#endif	/* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
+#endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
 
 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
-		pte_t *pte, unsigned int flags)
+			  pte_t *pte, unsigned int flags)
 {
 	if (flags & FOLL_TOUCH) {
 		pte_t orig_entry = ptep_get(pte);
@@ -800,7 +806,8 @@ static inline bool can_follow_write_pte(pte_t pte, struct page *page,
 }
 
 static struct page *follow_page_pte(struct vm_area_struct *vma,
-		unsigned long address, pmd_t *pmd, unsigned int flags)
+				    unsigned long address, pmd_t *pmd,
+				    unsigned int flags)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct folio *folio;
@@ -852,7 +859,8 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
 	}
 
 	VM_WARN_ON_ONCE_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
-			     !PageAnonExclusive(page), page);
+				     !PageAnonExclusive(page),
+			     page);
 
 	/* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */
 	ret = try_grab_folio(folio, 1, flags);
@@ -875,8 +883,8 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
 		}
 	}
 	if (flags & FOLL_TOUCH) {
-		if ((flags & FOLL_WRITE) &&
-		    !pte_dirty(pte) && !folio_test_dirty(folio))
+		if ((flags & FOLL_WRITE) && !pte_dirty(pte) &&
+		    !folio_test_dirty(folio))
 			folio_mark_dirty(folio);
 		/*
 		 * pte_mkyoung() would be more correct here, but atomic care
@@ -931,8 +939,9 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 		spin_unlock(ptl);
 		split_huge_pmd(vma, pmd, address);
 		/* If pmd was left empty, stuff a page table in there quickly */
-		return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
-			follow_page_pte(vma, address, pmd, flags);
+		return pte_alloc(mm, pmd) ?
+			       ERR_PTR(-ENOMEM) :
+			       follow_page_pte(vma, address, pmd, flags);
 	}
 	page = follow_huge_pmd(vma, address, pmd, flags, page_mask);
 	spin_unlock(ptl);
@@ -1005,8 +1014,8 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
  * by a page descriptor (see also vm_normal_page()).
  */
 static struct page *follow_page_mask(struct vm_area_struct *vma,
-			      unsigned long address, unsigned int flags,
-			      unsigned long *page_mask)
+				     unsigned long address, unsigned int flags,
+				     unsigned long *page_mask)
 {
 	pgd_t *pgd;
 	struct mm_struct *mm = vma->vm_mm;
@@ -1028,8 +1037,8 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
 }
 
 static int get_gate_page(struct mm_struct *mm, unsigned long address,
-		unsigned int gup_flags, struct vm_area_struct **vma,
-		struct page **page)
+			 unsigned int gup_flags, struct vm_area_struct **vma,
+			 struct page **page)
 {
 	pgd_t *pgd;
 	p4d_t *p4d;
@@ -1084,9 +1093,8 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
  * FOLL_NOWAIT, the mmap_lock may be released.  If it is, *@locked will be set
  * to 0 and -EBUSY returned.
  */
-static int faultin_page(struct vm_area_struct *vma,
-		unsigned long address, unsigned int flags, bool unshare,
-		int *locked)
+static int faultin_page(struct vm_area_struct *vma, unsigned long address,
+			unsigned int flags, bool unshare, int *locked)
 {
 	unsigned int fault_flags = 0;
 	vm_fault_t ret;
@@ -1220,8 +1228,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
 		return -EFAULT;
 
 	if (write) {
-		if (!vma_anon &&
-		    !writable_file_mapping_allowed(vma, gup_flags))
+		if (!vma_anon && !writable_file_mapping_allowed(vma, gup_flags))
 			return -EFAULT;
 
 		if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) {
@@ -1263,7 +1270,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
  * historically expanded the stack in the GUP code.
  */
 static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm,
-	 unsigned long addr)
+					     unsigned long addr)
 {
 #ifdef CONFIG_STACK_GROWSUP
 	return vma_lookup(mm, addr);
@@ -1283,15 +1290,16 @@ static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm,
 		return NULL;
 
 	/* Let's not warn more than once an hour.. */
-	now = jiffies; next = next_warn;
+	now = jiffies;
+	next = next_warn;
 	if (next && time_before(now, next))
 		return NULL;
-	next_warn = now + 60*60*HZ;
+	next_warn = now + 60 * 60 * HZ;
 
 	/* Let people know things may have changed. */
 	pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n",
-		current->comm, task_pid_nr(current),
-		vma->vm_start, vma->vm_end, addr);
+		current->comm, task_pid_nr(current), vma->vm_start, vma->vm_end,
+		addr);
 	dump_stack();
 	return NULL;
 #endif
@@ -1351,10 +1359,9 @@ static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm,
  * instead of __get_user_pages. __get_user_pages should be used only if
  * you need some special @gup_flags.
  */
-static long __get_user_pages(struct mm_struct *mm,
-		unsigned long start, unsigned long nr_pages,
-		unsigned int gup_flags, struct page **pages,
-		int *locked)
+static long __get_user_pages(struct mm_struct *mm, unsigned long start,
+			     unsigned long nr_pages, unsigned int gup_flags,
+			     struct page **pages, int *locked)
 {
 	long ret = 0, i = 0;
 	struct vm_area_struct *vma = NULL;
@@ -1396,8 +1403,8 @@ static long __get_user_pages(struct mm_struct *mm,
 			vma = gup_vma_lookup(mm, start);
 			if (!vma && in_gate_area(mm, start)) {
 				ret = get_gate_page(mm, start & PAGE_MASK,
-						gup_flags, &vma,
-						pages ? &page : NULL);
+						    gup_flags, &vma,
+						    pages ? &page : NULL);
 				if (ret)
 					goto out;
 				page_mask = 0;
@@ -1496,7 +1503,8 @@ static long __get_user_pages(struct mm_struct *mm,
 			for (j = 0; j < page_increm; j++) {
 				subpage = page + j;
 				pages[i + j] = subpage;
-				flush_anon_page(vma, subpage, start + j * PAGE_SIZE);
+				flush_anon_page(vma, subpage,
+						start + j * PAGE_SIZE);
 				flush_dcache_page(subpage);
 			}
 		}
@@ -1512,7 +1520,7 @@ static long __get_user_pages(struct mm_struct *mm,
 static bool vma_permits_fault(struct vm_area_struct *vma,
 			      unsigned int fault_flags)
 {
-	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
+	bool write = !!(fault_flags & FAULT_FLAG_WRITE);
 	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
 	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
 
@@ -1561,9 +1569,8 @@ static bool vma_permits_fault(struct vm_area_struct *vma,
  * This function will not return with an unlocked mmap_lock. So it has not the
  * same semantics wrt the @mm->mmap_lock as does filemap_fault().
  */
-int fixup_user_fault(struct mm_struct *mm,
-		     unsigned long address, unsigned int fault_flags,
-		     bool *unlocked)
+int fixup_user_fault(struct mm_struct *mm, unsigned long address,
+		     unsigned int fault_flags, bool *unlocked)
 {
 	struct vm_area_struct *vma;
 	vm_fault_t ret;
@@ -1646,12 +1653,10 @@ static bool gup_signal_pending(unsigned int flags)
  * Please note that this function, unlike __get_user_pages(), will not return 0
  * for nr_pages > 0, unless FOLL_NOWAIT is used.
  */
-static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
-						unsigned long start,
-						unsigned long nr_pages,
-						struct page **pages,
-						int *locked,
-						unsigned int flags)
+static __always_inline long
+__get_user_pages_locked(struct mm_struct *mm, unsigned long start,
+			unsigned long nr_pages, struct page **pages,
+			int *locked, unsigned int flags)
 {
 	long ret, pages_done;
 	bool must_unlock = false;
@@ -1668,8 +1673,7 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
 			return -EAGAIN;
 		must_unlock = true;
 		*locked = 1;
-	}
-	else
+	} else
 		mmap_assert_locked(mm);
 
 	if (flags & FOLL_PIN)
@@ -1749,8 +1753,8 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
 		}
 
 		*locked = 1;
-		ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
-				       pages, locked);
+		ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, pages,
+				       locked);
 		if (!*locked) {
 			/* Continue to retry until we succeeded */
 			VM_WARN_ON_ONCE(ret != 0);
@@ -1810,8 +1814,8 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
  * If @locked is non-NULL, it must held for read only and may be
  * released.  If it's released, *@locked will be set to 0.
  */
-long populate_vma_page_range(struct vm_area_struct *vma,
-		unsigned long start, unsigned long end, int *locked)
+long populate_vma_page_range(struct vm_area_struct *vma, unsigned long start,
+			     unsigned long end, int *locked)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	unsigned long nr_pages = (end - start) / PAGE_SIZE;
@@ -1822,7 +1826,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
 	VM_WARN_ON_ONCE(!PAGE_ALIGNED(start));
 	VM_WARN_ON_ONCE(!PAGE_ALIGNED(end));
 	VM_WARN_ON_ONCE_VMA(start < vma->vm_start, vma);
-	VM_WARN_ON_ONCE_VMA(end   > vma->vm_end, vma);
+	VM_WARN_ON_ONCE_VMA(end > vma->vm_end, vma);
 	mmap_assert_locked(mm);
 
 	/*
@@ -1857,8 +1861,8 @@ long populate_vma_page_range(struct vm_area_struct *vma,
 	 * We made sure addr is within a VMA, so the following will
 	 * not result in a stack expansion that recurses back here.
 	 */
-	ret = __get_user_pages(mm, start, nr_pages, gup_flags,
-			       NULL, locked ? locked : &local_locked);
+	ret = __get_user_pages(mm, start, nr_pages, gup_flags, NULL,
+			       locked ? locked : &local_locked);
 	lru_add_drain();
 	return ret;
 }
@@ -1964,7 +1968,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
 		if (ret < 0) {
 			if (ignore_errors) {
 				ret = 0;
-				continue;	/* continue at next VMA */
+				continue; /* continue at next VMA */
 			}
 			break;
 		}
@@ -1973,12 +1977,12 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
 	}
 	if (locked)
 		mmap_read_unlock(mm);
-	return ret;	/* 0 or negative error code */
+	return ret; /* 0 or negative error code */
 }
 #else /* CONFIG_MMU */
 static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
-		unsigned long nr_pages, struct page **pages,
-		int *locked, unsigned int foll_flags)
+				    unsigned long nr_pages, struct page **pages,
+				    int *locked, unsigned int foll_flags)
 {
 	struct vm_area_struct *vma;
 	bool must_unlock = false;
@@ -2002,10 +2006,10 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
 	/* calculate required read or write permissions.
 	 * If FOLL_FORCE is set, we only require the "MAY" flags.
 	 */
-	vm_flags  = (foll_flags & FOLL_WRITE) ?
-			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
-	vm_flags &= (foll_flags & FOLL_FORCE) ?
-			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+	vm_flags = (foll_flags & FOLL_WRITE) ? (VM_WRITE | VM_MAYWRITE) :
+					       (VM_READ | VM_MAYREAD);
+	vm_flags &= (foll_flags & FOLL_FORCE) ? (VM_MAYREAD | VM_MAYWRITE) :
+						(VM_READ | VM_WRITE);
 
 	for (i = 0; i < nr_pages; i++) {
 		vma = find_vma(mm, start);
@@ -2031,7 +2035,7 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
 		*locked = 0;
 	}
 
-	return i ? : -EFAULT;
+	return i ?: -EFAULT;
 }
 #endif /* !CONFIG_MMU */
 
@@ -2055,7 +2059,8 @@ size_t fault_in_writeable(char __user *uaddr, size_t size)
 		return size;
 
 	/* Stop once we overflow to 0. */
-	for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE))
+	for (cur = start; cur && cur < end;
+	     cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE))
 		unsafe_put_user(0, (char __user *)cur, out);
 out:
 	user_write_access_end();
@@ -2125,7 +2130,8 @@ size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
 
 	mmap_read_lock(mm);
 	/* Stop once we overflow to 0. */
-	for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE))
+	for (cur = start; cur && cur < end;
+	     cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE))
 		if (fixup_user_fault(mm, cur, FAULT_FLAG_WRITE, &unlocked))
 			break;
 	mmap_read_unlock(mm);
@@ -2157,7 +2163,8 @@ size_t fault_in_readable(const char __user *uaddr, size_t size)
 		return size;
 
 	/* Stop once we overflow to 0. */
-	for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE))
+	for (cur = start; cur && cur < end;
+	     cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE))
 		unsafe_get_user(c, (const char __user *)cur, out);
 out:
 	user_read_access_end();
@@ -2235,7 +2242,8 @@ static void pofs_unpin(struct pages_or_folios *pofs)
 }
 
 static struct folio *pofs_next_folio(struct folio *folio,
-		struct pages_or_folios *pofs, long *index_ptr)
+				     struct pages_or_folios *pofs,
+				     long *index_ptr)
 {
 	long i = *index_ptr + 1;
 
@@ -2262,9 +2270,9 @@ static struct folio *pofs_next_folio(struct folio *folio,
 /*
  * Returns the number of collected folios. Return value is always >= 0.
  */
-static unsigned long collect_longterm_unpinnable_folios(
-		struct list_head *movable_folio_list,
-		struct pages_or_folios *pofs)
+static unsigned long
+collect_longterm_unpinnable_folios(struct list_head *movable_folio_list,
+				   struct pages_or_folios *pofs)
 {
 	unsigned long collected = 0;
 	struct folio *folio;
@@ -2273,7 +2281,6 @@ static unsigned long collect_longterm_unpinnable_folios(
 
 	for (folio = pofs_get_folio(pofs, i); folio;
 	     folio = pofs_next_folio(folio, pofs, &i)) {
-
 		if (folio_is_longterm_pinnable(folio))
 			continue;
 
@@ -2288,14 +2295,14 @@ static unsigned long collect_longterm_unpinnable_folios(
 		}
 
 		if (drained == 0 && folio_may_be_lru_cached(folio) &&
-				folio_ref_count(folio) !=
-				folio_expected_ref_count(folio) + 1) {
+		    folio_ref_count(folio) !=
+			    folio_expected_ref_count(folio) + 1) {
 			lru_add_drain();
 			drained = 1;
 		}
 		if (drained == 1 && folio_may_be_lru_cached(folio) &&
-				folio_ref_count(folio) !=
-				folio_expected_ref_count(folio) + 1) {
+		    folio_ref_count(folio) !=
+			    folio_expected_ref_count(folio) + 1) {
 			lru_add_drain_all();
 			drained = 2;
 		}
@@ -2388,8 +2395,8 @@ check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs)
 	LIST_HEAD(movable_folio_list);
 	unsigned long collected;
 
-	collected = collect_longterm_unpinnable_folios(&movable_folio_list,
-						       pofs);
+	collected =
+		collect_longterm_unpinnable_folios(&movable_folio_list, pofs);
 	if (!collected)
 		return 0;
 
@@ -2462,12 +2469,9 @@ static long check_and_migrate_movable_folios(unsigned long nr_folios,
  * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
  * allows us to process the FOLL_LONGTERM flag.
  */
-static long __gup_longterm_locked(struct mm_struct *mm,
-				  unsigned long start,
-				  unsigned long nr_pages,
-				  struct page **pages,
-				  int *locked,
-				  unsigned int gup_flags)
+static long __gup_longterm_locked(struct mm_struct *mm, unsigned long start,
+				  unsigned long nr_pages, struct page **pages,
+				  int *locked, unsigned int gup_flags)
 {
 	unsigned int flags;
 	long rc, nr_pinned_pages;
@@ -2478,9 +2482,8 @@ static long __gup_longterm_locked(struct mm_struct *mm,
 
 	flags = memalloc_pin_save();
 	do {
-		nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
-							  pages, locked,
-							  gup_flags);
+		nr_pinned_pages = __get_user_pages_locked(
+			mm, start, nr_pages, pages, locked, gup_flags);
 		if (nr_pinned_pages <= 0) {
 			rc = nr_pinned_pages;
 			break;
@@ -2527,7 +2530,8 @@ static bool is_valid_gup_args(struct page **pages, int *locked,
 		return false;
 
 	/* LONGTERM can only be specified when pinning */
-	if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM)))
+	if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) &&
+			 (gup_flags & FOLL_LONGTERM)))
 		return false;
 
 	/* Pages input must be given if using GET/PIN */
@@ -2600,10 +2604,9 @@ static bool is_valid_gup_args(struct page **pages, int *locked,
  * should use get_user_pages_remote because it cannot pass
  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
  */
-long get_user_pages_remote(struct mm_struct *mm,
-		unsigned long start, unsigned long nr_pages,
-		unsigned int gup_flags, struct page **pages,
-		int *locked)
+long get_user_pages_remote(struct mm_struct *mm, unsigned long start,
+			   unsigned long nr_pages, unsigned int gup_flags,
+			   struct page **pages, int *locked)
 {
 	int local_locked = 1;
 
@@ -2618,10 +2621,9 @@ long get_user_pages_remote(struct mm_struct *mm,
 EXPORT_SYMBOL(get_user_pages_remote);
 
 #else /* CONFIG_MMU */
-long get_user_pages_remote(struct mm_struct *mm,
-			   unsigned long start, unsigned long nr_pages,
-			   unsigned int gup_flags, struct page **pages,
-			   int *locked)
+long get_user_pages_remote(struct mm_struct *mm, unsigned long start,
+			   unsigned long nr_pages, unsigned int gup_flags,
+			   struct page **pages, int *locked)
 {
 	return 0;
 }
@@ -2827,8 +2829,8 @@ static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags)
  * pmdp_collapse_flush() in the THP collapse code path).
  */
 static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
-		unsigned long end, unsigned int flags, struct page **pages,
-		int *nr)
+			      unsigned long end, unsigned int flags,
+			      struct page **pages, int *nr)
 {
 	int ret = 0;
 	pte_t *ptep, *ptem;
@@ -2914,16 +2916,16 @@ static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
  * useful to have gup_fast_pmd_leaf even if we can't operate on ptes.
  */
 static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
-		unsigned long end, unsigned int flags, struct page **pages,
-		int *nr)
+			      unsigned long end, unsigned int flags,
+			      struct page **pages, int *nr)
 {
 	return 0;
 }
 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
 
 static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
-		unsigned long end, unsigned int flags, struct page **pages,
-		int *nr)
+			     unsigned long end, unsigned int flags,
+			     struct page **pages, int *nr)
 {
 	struct page *page;
 	struct folio *folio;
@@ -2965,8 +2967,8 @@ static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
 }
 
 static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
-		unsigned long end, unsigned int flags, struct page **pages,
-		int *nr)
+			     unsigned long end, unsigned int flags,
+			     struct page **pages, int *nr)
 {
 	struct page *page;
 	struct folio *folio;
@@ -3009,8 +3011,8 @@ static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
 }
 
 static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
-		unsigned long end, unsigned int flags, struct page **pages,
-		int *nr)
+			      unsigned long end, unsigned int flags,
+			      struct page **pages, int *nr)
 {
 	unsigned long next;
 	pmd_t *pmdp;
@@ -3029,7 +3031,7 @@ static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
 				return 0;
 
 			if (!gup_fast_pmd_leaf(pmd, pmdp, addr, next, flags,
-				pages, nr))
+					       pages, nr))
 				return 0;
 
 		} else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags,
@@ -3041,8 +3043,8 @@ static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
 }
 
 static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
-		unsigned long end, unsigned int flags, struct page **pages,
-		int *nr)
+			      unsigned long end, unsigned int flags,
+			      struct page **pages, int *nr)
 {
 	unsigned long next;
 	pud_t *pudp;
@@ -3067,8 +3069,8 @@ static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
 }
 
 static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
-		unsigned long end, unsigned int flags, struct page **pages,
-		int *nr)
+			      unsigned long end, unsigned int flags,
+			      struct page **pages, int *nr)
 {
 	unsigned long next;
 	p4d_t *p4dp;
@@ -3081,8 +3083,8 @@ static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
 		if (!p4d_present(p4d))
 			return 0;
 		BUILD_BUG_ON(p4d_leaf(p4d));
-		if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags,
-					pages, nr))
+		if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags, pages,
+					nr))
 			return 0;
 	} while (p4dp++, addr = next, addr != end);
 
@@ -3090,7 +3092,7 @@ static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
 }
 
 static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
-		unsigned int flags, struct page **pages, int *nr)
+			       unsigned int flags, struct page **pages, int *nr)
 {
 	unsigned long next;
 	pgd_t *pgdp;
@@ -3103,14 +3105,15 @@ static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
 		if (pgd_none(pgd))
 			return;
 		BUILD_BUG_ON(pgd_leaf(pgd));
-		if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags,
-					pages, nr))
+		if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags, pages,
+					nr))
 			return;
 	} while (pgdp++, addr = next, addr != end);
 }
 #else
 static inline void gup_fast_pgd_range(unsigned long addr, unsigned long end,
-		unsigned int flags, struct page **pages, int *nr)
+				      unsigned int flags, struct page **pages,
+				      int *nr)
 {
 }
 #endif /* CONFIG_HAVE_GUP_FAST */
@@ -3127,18 +3130,19 @@ static bool gup_fast_permitted(unsigned long start, unsigned long end)
 #endif
 
 static unsigned long gup_fast(unsigned long start, unsigned long end,
-		unsigned int gup_flags, struct page **pages)
+			      unsigned int gup_flags, struct page **pages)
 {
 	unsigned long flags;
 	int nr_pinned = 0;
-	unsigned seq;
+	unsigned int seq = 0;
 
 	if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) ||
 	    !gup_fast_permitted(start, end))
 		return 0;
 
 	if (gup_flags & FOLL_PIN) {
-		if (!raw_seqcount_try_begin(&current->mm->write_protect_seq, seq))
+		if (!raw_seqcount_try_begin(&current->mm->write_protect_seq,
+					    seq))
 			return 0;
 	}
 
@@ -3173,17 +3177,17 @@ static unsigned long gup_fast(unsigned long start, unsigned long end,
 }
 
 static int gup_fast_fallback(unsigned long start, unsigned long nr_pages,
-		unsigned int gup_flags, struct page **pages)
+			     unsigned int gup_flags, struct page **pages)
 {
 	unsigned long len, end;
 	unsigned long nr_pinned;
 	int locked = 0;
 	int ret;
 
-	if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
-				       FOLL_FORCE | FOLL_PIN | FOLL_GET |
-				       FOLL_FAST_ONLY | FOLL_NOFAULT |
-				       FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT)))
+	if (WARN_ON_ONCE(gup_flags &
+			 ~(FOLL_WRITE | FOLL_LONGTERM | FOLL_FORCE | FOLL_PIN |
+			   FOLL_GET | FOLL_FAST_ONLY | FOLL_NOFAULT |
+			   FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT)))
 		return -EINVAL;
 
 	if (gup_flags & FOLL_PIN)
@@ -3339,10 +3343,9 @@ EXPORT_SYMBOL_GPL(pin_user_pages_fast);
  * Note that if a zero_page is amongst the returned pages, it will not have
  * pins in it and unpin_user_page*() will not remove pins from it.
  */
-long pin_user_pages_remote(struct mm_struct *mm,
-			   unsigned long start, unsigned long nr_pages,
-			   unsigned int gup_flags, struct page **pages,
-			   int *locked)
+long pin_user_pages_remote(struct mm_struct *mm, unsigned long start,
+			   unsigned long nr_pages, unsigned int gup_flags,
+			   struct page **pages, int *locked)
 {
 	int local_locked = 1;
 
@@ -3380,8 +3383,8 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages,
 
 	if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
 		return 0;
-	return __gup_longterm_locked(current->mm, start, nr_pages,
-				     pages, &locked, gup_flags);
+	return __gup_longterm_locked(current->mm, start, nr_pages, pages,
+				     &locked, gup_flags);
 }
 EXPORT_SYMBOL(pin_user_pages);
 
@@ -3480,10 +3483,8 @@ long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
 			 * reason, we try to allocate them and add them to the
 			 * page cache.
 			 */
-			nr_found = filemap_get_folios_contig(memfd->f_mapping,
-							     &start_idx,
-							     end_idx,
-							     &fbatch);
+			nr_found = filemap_get_folios_contig(
+				memfd->f_mapping, &start_idx, end_idx, &fbatch);
 			if (folio) {
 				folio_put(folio);
 				folio = NULL;
-- 
2.53.0



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] mm: initialize 'seq' in gup_fast to remove -Wmaybe-uninitialized warning
  2026-03-02 19:12 [PATCH] mm: initialize 'seq' in gup_fast to remove -Wmaybe-uninitialized warning Alexey Suchkov
@ 2026-03-02 19:22 ` Matthew Wilcox
  2026-03-02 19:34 ` [PATCH] [PATCH v2] " Alexey Suchkov
  1 sibling, 0 replies; 9+ messages in thread
From: Matthew Wilcox @ 2026-03-02 19:22 UTC (permalink / raw)
  To: Alexey Suchkov; +Cc: akpm, linux-mm, linux-kernel, Alexey Suchkov

On Mon, Mar 02, 2026 at 10:12:37PM +0300, Alexey Suchkov wrote:
> The local variable 'seq' in gup_fast (mm/gup.c) was declared
> without initialization, which can trigger:

I was going to look at your patch to see why it's wrong, but it contains
so much gratuitous reformatting that I gave up.  NAK.


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH] [PATCH v2] mm: initialize 'seq' in gup_fast to remove -Wmaybe-uninitialized warning
  2026-03-02 19:12 [PATCH] mm: initialize 'seq' in gup_fast to remove -Wmaybe-uninitialized warning Alexey Suchkov
  2026-03-02 19:22 ` Matthew Wilcox
@ 2026-03-02 19:34 ` Alexey Suchkov
  2026-03-02 19:44   ` Matthew Wilcox
                     ` (2 more replies)
  1 sibling, 3 replies; 9+ messages in thread
From: Alexey Suchkov @ 2026-03-02 19:34 UTC (permalink / raw)
  To: aleks.koyf; +Cc: akpm, dywoq.contact, linux-kernel, linux-mm

The local variable 'seq' in gup_fast (mm/gup.c) was declared
without initialization, which can trigger:

mm/gup.c:3165:20: warning: ‘seq’ may be used uninitialized [-Wmaybe-uninitialized]

Initialize 'seq' to 0. This does not change behavior, since
read_seqcount_retry() always writes to it before use.

Changes since v1:
- Removes the accidental formatting from v1.

Signed-off-by: Alexey Suchkov <dywoq.contact@gmail.com>
---
 mm/gup.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/gup.c b/mm/gup.c
index 8e7dc2c6e..879798de5 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -3131,7 +3131,7 @@ static unsigned long gup_fast(unsigned long start, unsigned long end,
 {
 	unsigned long flags;
 	int nr_pinned = 0;
-	unsigned seq;
+	unsigned int seq = 0;
 
 	if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) ||
 	    !gup_fast_permitted(start, end))
-- 
2.53.0



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] [PATCH v2] mm: initialize 'seq' in gup_fast to remove -Wmaybe-uninitialized warning
  2026-03-02 19:34 ` [PATCH] [PATCH v2] " Alexey Suchkov
@ 2026-03-02 19:44   ` Matthew Wilcox
  2026-03-02 19:57     ` Alexey Suchkov
  2026-03-02 19:48   ` Joshua Hahn
  2026-03-02 20:43   ` Andrew Morton
  2 siblings, 1 reply; 9+ messages in thread
From: Matthew Wilcox @ 2026-03-02 19:44 UTC (permalink / raw)
  To: Alexey Suchkov; +Cc: akpm, dywoq.contact, linux-kernel, linux-mm

On Mon, Mar 02, 2026 at 10:34:05PM +0300, Alexey Suchkov wrote:
> The local variable 'seq' in gup_fast (mm/gup.c) was declared
> without initialization, which can trigger:
> 
> mm/gup.c:3165:20: warning: ‘seq’ may be used uninitialized [-Wmaybe-uninitialized]
> 
> Initialize 'seq' to 0. This does not change behavior, since
> read_seqcount_retry() always writes to it before use.

What compiler?


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] [PATCH v2] mm: initialize 'seq' in gup_fast to remove -Wmaybe-uninitialized warning
  2026-03-02 19:34 ` [PATCH] [PATCH v2] " Alexey Suchkov
  2026-03-02 19:44   ` Matthew Wilcox
@ 2026-03-02 19:48   ` Joshua Hahn
  2026-03-02 19:53     ` Matthew Wilcox
  2026-03-02 20:09     ` Alexey Suchkov
  2026-03-02 20:43   ` Andrew Morton
  2 siblings, 2 replies; 9+ messages in thread
From: Joshua Hahn @ 2026-03-02 19:48 UTC (permalink / raw)
  To: Alexey Suchkov; +Cc: akpm, dywoq.contact, linux-kernel, linux-mm

On Mon,  2 Mar 2026 22:34:05 +0300 Alexey Suchkov <aleks.koyf@gmail.com> wrote:

> The local variable 'seq' in gup_fast (mm/gup.c) was declared
> without initialization, which can trigger:
> 
> mm/gup.c:3165:20: warning: ‘seq’ may be used uninitialized [-Wmaybe-uninitialized]
> 
> Initialize 'seq' to 0. This does not change behavior, since
> read_seqcount_retry() always writes to it before use.

Hi Alexey, I hope you're doing well!

Unfortunately I think that this patch is still unneeded. The variable seq is
used in two places, raw_seqcount_try_begin and read_seqcount_retry, and nowhere
else in the function. Both are guarded by if (gup_flags & FOLL_PIN).

raw_seqcount_try_begin is a macro that sets the value of seq.
read_seqcount_retry is a macro that indeed reads the value of seq, but at
that point seq has already been initialized by raw_seqcount_try_begin.

So I am having a hard time seeing where it would ever be used in an
uninitialized context. The compiler should be smart enough to expand the
macro, see that the writer and consumer are under the same if conditions,
and realize that it is always initialized before being used.

I'm curious to know if the compilation warning above is theoretical, or
if you've actually it it during compilation.

Let me know what you think. Have a great day!
Joshua

> Changes since v1:
> - Removes the accidental formatting from v1.
> 
> Signed-off-by: Alexey Suchkov <dywoq.contact@gmail.com>
> ---
>  mm/gup.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/mm/gup.c b/mm/gup.c
> index 8e7dc2c6e..879798de5 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -3131,7 +3131,7 @@ static unsigned long gup_fast(unsigned long start, unsigned long end,
>  {
>  	unsigned long flags;
>  	int nr_pinned = 0;
> -	unsigned seq;
> +	unsigned int seq = 0;
>  
>  	if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) ||
>  	    !gup_fast_permitted(start, end))
> -- 
> 2.53.0


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] [PATCH v2] mm: initialize 'seq' in gup_fast to remove -Wmaybe-uninitialized warning
  2026-03-02 19:48   ` Joshua Hahn
@ 2026-03-02 19:53     ` Matthew Wilcox
  2026-03-02 20:09     ` Alexey Suchkov
  1 sibling, 0 replies; 9+ messages in thread
From: Matthew Wilcox @ 2026-03-02 19:53 UTC (permalink / raw)
  To: Joshua Hahn; +Cc: Alexey Suchkov, akpm, dywoq.contact, linux-kernel, linux-mm

On Mon, Mar 02, 2026 at 11:48:35AM -0800, Joshua Hahn wrote:
> Unfortunately I think that this patch is still unneeded. The variable seq is
> used in two places, raw_seqcount_try_begin and read_seqcount_retry, and nowhere
> else in the function. Both are guarded by if (gup_flags & FOLL_PIN).

Yeah, historically GCC wasn't great with that pattern.  It failed to
notice that the condition was the same and so there was no path through
the code that would use an uninit variable.  Hence my question about
what compiler version he's using to see whether we can disregard this
because it's an old compiler that we don't support any more.


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] [PATCH v2] mm: initialize 'seq' in gup_fast to remove -Wmaybe-uninitialized warning
  2026-03-02 19:44   ` Matthew Wilcox
@ 2026-03-02 19:57     ` Alexey Suchkov
  0 siblings, 0 replies; 9+ messages in thread
From: Alexey Suchkov @ 2026-03-02 19:57 UTC (permalink / raw)
  To: willy; +Cc: akpm, dywoq.contact, linux-kernel, linux-mm

> What compiler?
My compiler is GCC 15.2.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] [PATCH v2] mm: initialize 'seq' in gup_fast to remove -Wmaybe-uninitialized warning
  2026-03-02 19:48   ` Joshua Hahn
  2026-03-02 19:53     ` Matthew Wilcox
@ 2026-03-02 20:09     ` Alexey Suchkov
  1 sibling, 0 replies; 9+ messages in thread
From: Alexey Suchkov @ 2026-03-02 20:09 UTC (permalink / raw)
  To: joshua.hahnjy; +Cc: akpm, aleks.koyf, dywoq.contact, linux-kernel, linux-mm

> Hi Alexey, I hope you're doing well!
> 
> Unfortunately I think that this patch is still unneeded. The variable seq is
> used in two places, raw_seqcount_try_begin and read_seqcount_retry, and nowhere
> else in the function. Both are guarded by if (gup_flags & FOLL_PIN).
>
> raw_seqcount_try_begin is a macro that sets the value of seq.
> read_seqcount_retry is a macro that indeed reads the value of seq, but at
> that point seq has already been initialized by raw_seqcount_try_begin.

> So I am having a hard time seeing where it would ever be used in an
> uninitialized context. The compiler should be smart enough to expand the
> macro, see that the writer and consumer are under the same if conditions,
> and realize that it is always initialized before being used.
>
> I'm curious to know if the compilation warning above is theoretical, or
> if you've actually it it during compilation.
>
> Let me know what you think. Have a great day!
> Joshua

Hi Joshua,

Thanks for your review. I did see this warning in practice with GCC 15.2.1 and "make -j$(nproc) W=2":

In function ‘gup_fast’, 
	inlined from ‘gup_fast_fallback’ at mm/gup.c:3202:14: 
mm/gup.c:3165:20: warning: ‘seq’ may be used uninitialized [-Wmaybe-uninitialized] 
3165 | 		if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) { 
     | 									  ^ 
mm/gup.c: In function ‘gup_fast_fallback’: 
mm/gup.c:3134:18: note: ‘seq’ was declared here 
3134 | unsigned seq; 
     |		^~~


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] [PATCH v2] mm: initialize 'seq' in gup_fast to remove -Wmaybe-uninitialized warning
  2026-03-02 19:34 ` [PATCH] [PATCH v2] " Alexey Suchkov
  2026-03-02 19:44   ` Matthew Wilcox
  2026-03-02 19:48   ` Joshua Hahn
@ 2026-03-02 20:43   ` Andrew Morton
  2 siblings, 0 replies; 9+ messages in thread
From: Andrew Morton @ 2026-03-02 20:43 UTC (permalink / raw)
  To: Alexey Suchkov
  Cc: dywoq.contact, linux-kernel, linux-mm, Matthew Wilcox, Joshua Hahn

On Mon,  2 Mar 2026 22:34:05 +0300 Alexey Suchkov <aleks.koyf@gmail.com> wrote:

> The local variable 'seq' in gup_fast (mm/gup.c) was declared
> without initialization, which can trigger:
> 
> mm/gup.c:3165:20: warning: ‘seq’ may be used uninitialized [-Wmaybe-uninitialized]
> 
> Initialize 'seq' to 0. This does not change behavior, since
> read_seqcount_retry() always writes to it before use.
> 
> ...
>
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -3131,7 +3131,7 @@ static unsigned long gup_fast(unsigned long start, unsigned long end,
>  {
>  	unsigned long flags;
>  	int nr_pinned = 0;
> -	unsigned seq;
> +	unsigned int seq = 0;
>  
>  	if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) ||
>  	    !gup_fast_permitted(start, end))

stupid gcc.  I liked uninitalized_var(), particularly for its
self-documenting nature.  Never agreed with Linus's hate on it.

Thanks, I tweaked the changelog a bit:

: The local variable 'seq' in gup_fast (mm/gup.c) was declared
: without initialization, which with gcc-15.2.1 can trigger:
: 
: mm/gup.c:3165:20: warning: `seq' may be used uninitialized [-Wmaybe-uninitialized]
: 
: Work around this by initializing 'seq' to 0.  This does not change
: behavior, since read_seqcount_retry() always writes to it before use.


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2026-03-02 20:43 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2026-03-02 19:12 [PATCH] mm: initialize 'seq' in gup_fast to remove -Wmaybe-uninitialized warning Alexey Suchkov
2026-03-02 19:22 ` Matthew Wilcox
2026-03-02 19:34 ` [PATCH] [PATCH v2] " Alexey Suchkov
2026-03-02 19:44   ` Matthew Wilcox
2026-03-02 19:57     ` Alexey Suchkov
2026-03-02 19:48   ` Joshua Hahn
2026-03-02 19:53     ` Matthew Wilcox
2026-03-02 20:09     ` Alexey Suchkov
2026-03-02 20:43   ` Andrew Morton

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox