linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm/userfaultfd: fix hugetlb fault mutex hash calculation
@ 2026-03-06 14:03 Jianhui Zhou
  2026-03-06 16:53 ` Peter Xu
  2026-03-07  3:27 ` SeongJae Park
  0 siblings, 2 replies; 4+ messages in thread
From: Jianhui Zhou @ 2026-03-06 14:03 UTC (permalink / raw)
  To: Muchun Song, Oscar Salvador, Andrew Morton, Mike Rapoport
  Cc: David Hildenbrand, Peter Xu, Andrea Arcangeli, Mike Kravetz,
	linux-mm, linux-kernel, Jonas Zhou, Jianhui Zhou,
	syzbot+f525fd79634858f478e7, stable

In mfill_atomic_hugetlb(), linear_page_index() is used to calculate the
page index for hugetlb_fault_mutex_hash(). However, linear_page_index()
returns the index in PAGE_SIZE units, while hugetlb_fault_mutex_hash()
expects the index in huge page units (as calculated by
vma_hugecache_offset()). This mismatch means that different addresses
within the same huge page can produce different hash values, leading to
the use of different mutexes for the same huge page. This can cause
races between faulting threads, which can corrupt the reservation map
and trigger the BUG_ON in resv_map_release().

Fix this by replacing linear_page_index() with vma_hugecache_offset()
and applying huge_page_mask() to align the address properly. To make
vma_hugecache_offset() available outside of mm/hugetlb.c, move it to
include/linux/hugetlb.h as a static inline function.

Fixes: 60d4d2d2b40e ("userfaultfd: hugetlbfs: add __mcopy_atomic_hugetlb for huge page UFFDIO_COPY")
Reported-by: syzbot+f525fd79634858f478e7@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=f525fd79634858f478e7
Cc: stable@vger.kernel.org
Signed-off-by: Jianhui Zhou <jianhuizzzzz@gmail.com>
---
 include/linux/hugetlb.h | 17 +++++++++++++++++
 mm/hugetlb.c            | 11 -----------
 mm/userfaultfd.c        |  5 ++++-
 3 files changed, 21 insertions(+), 12 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 65910437be1c..3f994f3e839c 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -796,6 +796,17 @@ static inline unsigned huge_page_shift(struct hstate *h)
 	return h->order + PAGE_SHIFT;
 }
 
+/*
+ * Convert the address within this vma to the page offset within
+ * the mapping, huge page units here.
+ */
+static inline pgoff_t vma_hugecache_offset(struct hstate *h,
+		struct vm_area_struct *vma, unsigned long address)
+{
+	return ((address - vma->vm_start) >> huge_page_shift(h)) +
+		(vma->vm_pgoff >> huge_page_order(h));
+}
+
 static inline bool order_is_gigantic(unsigned int order)
 {
 	return order > MAX_PAGE_ORDER;
@@ -1197,6 +1208,12 @@ static inline unsigned int huge_page_shift(struct hstate *h)
 	return PAGE_SHIFT;
 }
 
+static inline pgoff_t vma_hugecache_offset(struct hstate *h,
+		struct vm_area_struct *vma, unsigned long address)
+{
+	return linear_page_index(vma, address);
+}
+
 static inline bool hstate_is_gigantic(struct hstate *h)
 {
 	return false;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0beb6e22bc26..b87ed652c748 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1006,17 +1006,6 @@ static long region_count(struct resv_map *resv, long f, long t)
 	return chg;
 }
 
-/*
- * Convert the address within this vma to the page offset within
- * the mapping, huge page units here.
- */
-static pgoff_t vma_hugecache_offset(struct hstate *h,
-			struct vm_area_struct *vma, unsigned long address)
-{
-	return ((address - vma->vm_start) >> huge_page_shift(h)) +
-			(vma->vm_pgoff >> huge_page_order(h));
-}
-
 /**
  * vma_kernel_pagesize - Page size granularity for this VMA.
  * @vma: The user mapping.
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 927086bb4a3c..8efebc47a410 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -507,6 +507,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
 	pgoff_t idx;
 	u32 hash;
 	struct address_space *mapping;
+	struct hstate *h;
 
 	/*
 	 * There is no default zero huge page for all huge page sizes as
@@ -564,6 +565,8 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
 			goto out_unlock;
 	}
 
+	h = hstate_vma(dst_vma);
+
 	while (src_addr < src_start + len) {
 		VM_WARN_ON_ONCE(dst_addr >= dst_start + len);
 
@@ -573,7 +576,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
 		 * in the case of shared pmds.  fault mutex prevents
 		 * races with other faulting threads.
 		 */
-		idx = linear_page_index(dst_vma, dst_addr);
+		idx = vma_hugecache_offset(h, dst_vma, dst_addr & huge_page_mask(h));
 		mapping = dst_vma->vm_file->f_mapping;
 		hash = hugetlb_fault_mutex_hash(mapping, idx);
 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
-- 
2.43.0



^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] mm/userfaultfd: fix hugetlb fault mutex hash calculation
  2026-03-06 14:03 [PATCH] mm/userfaultfd: fix hugetlb fault mutex hash calculation Jianhui Zhou
@ 2026-03-06 16:53 ` Peter Xu
  2026-03-07  3:27 ` SeongJae Park
  1 sibling, 0 replies; 4+ messages in thread
From: Peter Xu @ 2026-03-06 16:53 UTC (permalink / raw)
  To: Jianhui Zhou
  Cc: Muchun Song, Oscar Salvador, Andrew Morton, Mike Rapoport,
	David Hildenbrand, Andrea Arcangeli, Mike Kravetz, linux-mm,
	linux-kernel, Jonas Zhou, syzbot+f525fd79634858f478e7, stable

On Fri, Mar 06, 2026 at 10:03:32PM +0800, Jianhui Zhou wrote:
> In mfill_atomic_hugetlb(), linear_page_index() is used to calculate the
> page index for hugetlb_fault_mutex_hash(). However, linear_page_index()
> returns the index in PAGE_SIZE units, while hugetlb_fault_mutex_hash()
> expects the index in huge page units (as calculated by
> vma_hugecache_offset()). This mismatch means that different addresses
> within the same huge page can produce different hash values, leading to
> the use of different mutexes for the same huge page. This can cause
> races between faulting threads, which can corrupt the reservation map
> and trigger the BUG_ON in resv_map_release().
> 
> Fix this by replacing linear_page_index() with vma_hugecache_offset()
> and applying huge_page_mask() to align the address properly. To make
> vma_hugecache_offset() available outside of mm/hugetlb.c, move it to
> include/linux/hugetlb.h as a static inline function.
> 
> Fixes: 60d4d2d2b40e ("userfaultfd: hugetlbfs: add __mcopy_atomic_hugetlb for huge page UFFDIO_COPY")
> Reported-by: syzbot+f525fd79634858f478e7@syzkaller.appspotmail.com
> Closes: https://syzkaller.appspot.com/bug?extid=f525fd79634858f478e7
> Cc: stable@vger.kernel.org
> Signed-off-by: Jianhui Zhou <jianhuizzzzz@gmail.com>

Good catch.. only one trivial comment below.

> ---
>  include/linux/hugetlb.h | 17 +++++++++++++++++
>  mm/hugetlb.c            | 11 -----------
>  mm/userfaultfd.c        |  5 ++++-
>  3 files changed, 21 insertions(+), 12 deletions(-)
> 
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index 65910437be1c..3f994f3e839c 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -796,6 +796,17 @@ static inline unsigned huge_page_shift(struct hstate *h)
>  	return h->order + PAGE_SHIFT;
>  }
>  
> +/*
> + * Convert the address within this vma to the page offset within
> + * the mapping, huge page units here.
> + */
> +static inline pgoff_t vma_hugecache_offset(struct hstate *h,
> +		struct vm_area_struct *vma, unsigned long address)
> +{
> +	return ((address - vma->vm_start) >> huge_page_shift(h)) +
> +		(vma->vm_pgoff >> huge_page_order(h));
> +}
> +
>  static inline bool order_is_gigantic(unsigned int order)
>  {
>  	return order > MAX_PAGE_ORDER;
> @@ -1197,6 +1208,12 @@ static inline unsigned int huge_page_shift(struct hstate *h)
>  	return PAGE_SHIFT;
>  }
>  
> +static inline pgoff_t vma_hugecache_offset(struct hstate *h,
> +		struct vm_area_struct *vma, unsigned long address)
> +{
> +	return linear_page_index(vma, address);
> +}

IIUC we don't need this; the userfaultfd.c reference should only happen
when CONFIG_HUGETLB_PAGE.  Please double check.

Thanks,

> +
>  static inline bool hstate_is_gigantic(struct hstate *h)
>  {
>  	return false;
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 0beb6e22bc26..b87ed652c748 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1006,17 +1006,6 @@ static long region_count(struct resv_map *resv, long f, long t)
>  	return chg;
>  }
>  
> -/*
> - * Convert the address within this vma to the page offset within
> - * the mapping, huge page units here.
> - */
> -static pgoff_t vma_hugecache_offset(struct hstate *h,
> -			struct vm_area_struct *vma, unsigned long address)
> -{
> -	return ((address - vma->vm_start) >> huge_page_shift(h)) +
> -			(vma->vm_pgoff >> huge_page_order(h));
> -}
> -
>  /**
>   * vma_kernel_pagesize - Page size granularity for this VMA.
>   * @vma: The user mapping.
> diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> index 927086bb4a3c..8efebc47a410 100644
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
> @@ -507,6 +507,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
>  	pgoff_t idx;
>  	u32 hash;
>  	struct address_space *mapping;
> +	struct hstate *h;
>  
>  	/*
>  	 * There is no default zero huge page for all huge page sizes as
> @@ -564,6 +565,8 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
>  			goto out_unlock;
>  	}
>  
> +	h = hstate_vma(dst_vma);
> +
>  	while (src_addr < src_start + len) {
>  		VM_WARN_ON_ONCE(dst_addr >= dst_start + len);
>  
> @@ -573,7 +576,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
>  		 * in the case of shared pmds.  fault mutex prevents
>  		 * races with other faulting threads.
>  		 */
> -		idx = linear_page_index(dst_vma, dst_addr);
> +		idx = vma_hugecache_offset(h, dst_vma, dst_addr & huge_page_mask(h));
>  		mapping = dst_vma->vm_file->f_mapping;
>  		hash = hugetlb_fault_mutex_hash(mapping, idx);
>  		mutex_lock(&hugetlb_fault_mutex_table[hash]);
> -- 
> 2.43.0
> 

-- 
Peter Xu



^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] mm/userfaultfd: fix hugetlb fault mutex hash calculation
  2026-03-06 14:03 [PATCH] mm/userfaultfd: fix hugetlb fault mutex hash calculation Jianhui Zhou
  2026-03-06 16:53 ` Peter Xu
@ 2026-03-07  3:27 ` SeongJae Park
  1 sibling, 0 replies; 4+ messages in thread
From: SeongJae Park @ 2026-03-07  3:27 UTC (permalink / raw)
  To: Jianhui Zhou
  Cc: SeongJae Park, Muchun Song, Oscar Salvador, Andrew Morton,
	Mike Rapoport, David Hildenbrand, Peter Xu, Andrea Arcangeli,
	Mike Kravetz, linux-mm, linux-kernel, Jonas Zhou,
	syzbot+f525fd79634858f478e7, stable

Hello Jianhui,

On Fri,  6 Mar 2026 22:03:32 +0800 Jianhui Zhou <jianhuizzzzz@gmail.com> wrote:

> In mfill_atomic_hugetlb(), linear_page_index() is used to calculate the
> page index for hugetlb_fault_mutex_hash(). However, linear_page_index()
> returns the index in PAGE_SIZE units, while hugetlb_fault_mutex_hash()
> expects the index in huge page units (as calculated by
> vma_hugecache_offset()). This mismatch means that different addresses
> within the same huge page can produce different hash values, leading to
> the use of different mutexes for the same huge page. This can cause
> races between faulting threads, which can corrupt the reservation map
> and trigger the BUG_ON in resv_map_release().
> 
> Fix this by replacing linear_page_index() with vma_hugecache_offset()
> and applying huge_page_mask() to align the address properly. To make
> vma_hugecache_offset() available outside of mm/hugetlb.c, move it to
> include/linux/hugetlb.h as a static inline function.
> 
> Fixes: 60d4d2d2b40e ("userfaultfd: hugetlbfs: add __mcopy_atomic_hugetlb for huge page UFFDIO_COPY")
> Reported-by: syzbot+f525fd79634858f478e7@syzkaller.appspotmail.com
> Closes: https://syzkaller.appspot.com/bug?extid=f525fd79634858f478e7
> Cc: stable@vger.kernel.org
> Signed-off-by: Jianhui Zhou <jianhuizzzzz@gmail.com>
> ---
[...]
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
[...]
> +static inline pgoff_t vma_hugecache_offset(struct hstate *h,
> +		struct vm_area_struct *vma, unsigned long address)
> +{
> +	return linear_page_index(vma, address);
> +}
> +

I just found this patch makes UML build fails as below.

    $ make ARCH=um O=.kunit olddefconfig
    Building with:
    $ make all compile_commands.json scripts_gdb ARCH=um O=.kunit --jobs=8
    ERROR:root:In file included from ../io_uring/rsrc.c:9:
    ../include/linux/hugetlb.h: In function ‘vma_hugecache_offset’:
    ../include/linux/hugetlb.h:1214:16: error: implicit declaration of function ‘linear_page_index’ [-Wimplicit-function-declaration]
     1214 |         return linear_page_index(vma, address);
          |                ^~~~~~~~~~~~~~~~~

Maybe we need to include pagemap.h?  I confirmed below attaching patch fix the
error on my setup.


Thanks,
SJ

[...]
=== >8 ===
From f55581ba154d6c8aaaf1f1d33cc317b5bf463147 Mon Sep 17 00:00:00 2001
From: SeongJae Park <sj@kernel.org>
Date: Fri, 6 Mar 2026 19:23:28 -0800
Subject: [PATCH] mm/hugetlb: include pagemap.h to fix build error
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Without this, UML build fails as below:

    $ make all compile_commands.json scripts_gdb ARCH=um O=.kunit --jobs=8
    ERROR:root:In file included from ../io_uring/rsrc.c:9:
    ../include/linux/hugetlb.h: In function ‘vma_hugecache_offset’:
    ../include/linux/hugetlb.h:1214:16: error: implicit declaration of function ‘linear_page_index’ [-Wimplicit-function-declaration]
     1214 |         return linear_page_index(vma, address);
          |                ^~~~~~~~~~~~~~~~~

Signed-off-by: SeongJae Park <sj@kernel.org>
---
 include/linux/hugetlb.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 3f994f3e839cf..63426bd716839 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -15,6 +15,7 @@
 #include <linux/gfp.h>
 #include <linux/userfaultfd_k.h>
 #include <linux/nodemask.h>
+#include <linux/pagemap.h>
 
 struct mmu_gather;
 struct node;
-- 
2.47.3



^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH] mm/userfaultfd: fix hugetlb fault mutex hash calculation
@ 2026-03-06 13:59 Jianhui Zhou
  0 siblings, 0 replies; 4+ messages in thread
From: Jianhui Zhou @ 2026-03-06 13:59 UTC (permalink / raw)
  To: Muchun Song, Oscar Salvador, Andrew Morton, Mike Rapoport
  Cc: David Hildenbrand, Peter Xu, Andrea Arcangeli, Mike Kravetz,
	linux-mm, linux-kernel, Jonas Zhou, Jianhui Zhou,
	syzbot+f525fd79634858f478e7, stable

From: Jianhui Zhou <jianhuizzzzz@gmail.com>

In mfill_atomic_hugetlb(), linear_page_index() is used to calculate the
page index for hugetlb_fault_mutex_hash(). However, linear_page_index()
returns the index in PAGE_SIZE units, while hugetlb_fault_mutex_hash()
expects the index in huge page units (as calculated by
vma_hugecache_offset()). This mismatch means that different addresses
within the same huge page can produce different hash values, leading to
the use of different mutexes for the same huge page. This can cause
races between faulting threads, which can corrupt the reservation map
and trigger the BUG_ON in resv_map_release().

Fix this by replacing linear_page_index() with vma_hugecache_offset()
and applying huge_page_mask() to align the address properly. To make
vma_hugecache_offset() available outside of mm/hugetlb.c, move it to
include/linux/hugetlb.h as a static inline function.

Fixes: 60d4d2d2b40e ("userfaultfd: hugetlbfs: add __mcopy_atomic_hugetlb for huge page UFFDIO_COPY")
Reported-by: syzbot+f525fd79634858f478e7@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=f525fd79634858f478e7
Cc: stable@vger.kernel.org
Signed-off-by: Jianhui Zhou <jianhuizzzzz@gmail.com>
---
 include/linux/hugetlb.h | 17 +++++++++++++++++
 mm/hugetlb.c            | 11 -----------
 mm/userfaultfd.c        |  5 ++++-
 3 files changed, 21 insertions(+), 12 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 65910437be1c..3f994f3e839c 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -796,6 +796,17 @@ static inline unsigned huge_page_shift(struct hstate *h)
 	return h->order + PAGE_SHIFT;
 }
 
+/*
+ * Convert the address within this vma to the page offset within
+ * the mapping, huge page units here.
+ */
+static inline pgoff_t vma_hugecache_offset(struct hstate *h,
+		struct vm_area_struct *vma, unsigned long address)
+{
+	return ((address - vma->vm_start) >> huge_page_shift(h)) +
+		(vma->vm_pgoff >> huge_page_order(h));
+}
+
 static inline bool order_is_gigantic(unsigned int order)
 {
 	return order > MAX_PAGE_ORDER;
@@ -1197,6 +1208,12 @@ static inline unsigned int huge_page_shift(struct hstate *h)
 	return PAGE_SHIFT;
 }
 
+static inline pgoff_t vma_hugecache_offset(struct hstate *h,
+		struct vm_area_struct *vma, unsigned long address)
+{
+	return linear_page_index(vma, address);
+}
+
 static inline bool hstate_is_gigantic(struct hstate *h)
 {
 	return false;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0beb6e22bc26..b87ed652c748 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1006,17 +1006,6 @@ static long region_count(struct resv_map *resv, long f, long t)
 	return chg;
 }
 
-/*
- * Convert the address within this vma to the page offset within
- * the mapping, huge page units here.
- */
-static pgoff_t vma_hugecache_offset(struct hstate *h,
-			struct vm_area_struct *vma, unsigned long address)
-{
-	return ((address - vma->vm_start) >> huge_page_shift(h)) +
-			(vma->vm_pgoff >> huge_page_order(h));
-}
-
 /**
  * vma_kernel_pagesize - Page size granularity for this VMA.
  * @vma: The user mapping.
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 927086bb4a3c..8efebc47a410 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -507,6 +507,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
 	pgoff_t idx;
 	u32 hash;
 	struct address_space *mapping;
+	struct hstate *h;
 
 	/*
 	 * There is no default zero huge page for all huge page sizes as
@@ -564,6 +565,8 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
 			goto out_unlock;
 	}
 
+	h = hstate_vma(dst_vma);
+
 	while (src_addr < src_start + len) {
 		VM_WARN_ON_ONCE(dst_addr >= dst_start + len);
 
@@ -573,7 +576,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
 		 * in the case of shared pmds.  fault mutex prevents
 		 * races with other faulting threads.
 		 */
-		idx = linear_page_index(dst_vma, dst_addr);
+		idx = vma_hugecache_offset(h, dst_vma, dst_addr & huge_page_mask(h));
 		mapping = dst_vma->vm_file->f_mapping;
 		hash = hugetlb_fault_mutex_hash(mapping, idx);
 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
-- 
2.43.0



^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2026-03-07  3:28 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2026-03-06 14:03 [PATCH] mm/userfaultfd: fix hugetlb fault mutex hash calculation Jianhui Zhou
2026-03-06 16:53 ` Peter Xu
2026-03-07  3:27 ` SeongJae Park
  -- strict thread matches above, loose matches on Subject: below --
2026-03-06 13:59 Jianhui Zhou

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox