linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3] mm/vmalloc: fix data race in show_numa_info()
@ 2025-05-07 14:25 Jeongjun Park
  2025-05-07 22:33 ` Andrew Morton
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Jeongjun Park @ 2025-05-07 14:25 UTC (permalink / raw)
  To: akpm, urezki; +Cc: edumazet, linux-mm, linux-kernel, Jeongjun Park

The following data-race was found in show_numa_info():

==================================================================
BUG: KCSAN: data-race in vmalloc_info_show / vmalloc_info_show

read to 0xffff88800971fe30 of 4 bytes by task 8289 on cpu 0:
 show_numa_info mm/vmalloc.c:4936 [inline]
 vmalloc_info_show+0x5a8/0x7e0 mm/vmalloc.c:5016
 seq_read_iter+0x373/0xb40 fs/seq_file.c:230
 proc_reg_read_iter+0x11e/0x170 fs/proc/inode.c:299
 new_sync_read fs/read_write.c:489 [inline]
 vfs_read+0x5b4/0x740 fs/read_write.c:570
 ksys_read+0xbe/0x190 fs/read_write.c:713
 __do_sys_read fs/read_write.c:722 [inline]
 __se_sys_read fs/read_write.c:720 [inline]
 __x64_sys_read+0x41/0x50 fs/read_write.c:720
 x64_sys_call+0x1729/0x1fd0 arch/x86/include/generated/asm/syscalls_64.h:1
 do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
 do_syscall_64+0xa6/0x1b0 arch/x86/entry/syscall_64.c:94
 entry_SYSCALL_64_after_hwframe+0x77/0x7f

write to 0xffff88800971fe30 of 4 bytes by task 8287 on cpu 1:
 show_numa_info mm/vmalloc.c:4934 [inline]
 vmalloc_info_show+0x38f/0x7e0 mm/vmalloc.c:5016
 seq_read_iter+0x373/0xb40 fs/seq_file.c:230
 proc_reg_read_iter+0x11e/0x170 fs/proc/inode.c:299
 new_sync_read fs/read_write.c:489 [inline]
 vfs_read+0x5b4/0x740 fs/read_write.c:570
 ksys_read+0xbe/0x190 fs/read_write.c:713
 __do_sys_read fs/read_write.c:722 [inline]
 __se_sys_read fs/read_write.c:720 [inline]
 __x64_sys_read+0x41/0x50 fs/read_write.c:720
 x64_sys_call+0x1729/0x1fd0 arch/x86/include/generated/asm/syscalls_64.h:1
 do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
 do_syscall_64+0xa6/0x1b0 arch/x86/entry/syscall_64.c:94
 entry_SYSCALL_64_after_hwframe+0x77/0x7f

value changed: 0x0000008f -> 0x00000000
==================================================================

According to this report, there is a read/write data-race because m->private
is accessible to multiple CPUs. To fix this, instead of allocating the heap
in proc_vmalloc_init() and passing the heap address to m->private,
show_numa_info() should allocate the heap.

One thing to note is that show_numa_info() is called in a critical section
of a spinlock, so it must be allocated on the heap with GFP_ATOMIC flag.

Fixes: a47a126ad5ea ("vmallocinfo: add NUMA information")
Suggested-by: Eric Dumazet <edumazet@google.com>
Suggested-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: Jeongjun Park <aha310510@gmail.com>
---
v3: Following Uladzislau Rezki's suggestion, we check v->flags beforehand
	to avoid printing uninitialized members of vm_struct.
- Link to v2: https://lore.kernel.org/all/20250506082520.84153-1-aha310510@gmail.com/
v2: Refactoring some functions and fix patch as per Eric Dumazet suggestion
- Link to v1: https://lore.kernel.org/all/20250505171948.24410-1-aha310510@gmail.com/
---
 mm/vmalloc.c | 51 ++++++++++++++++++++++++++-------------------------
 1 file changed, 26 insertions(+), 25 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 3ed720a787ec..9139025e20e5 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4914,28 +4914,32 @@ bool vmalloc_dump_obj(void *object)
 #endif
 
 #ifdef CONFIG_PROC_FS
+
+/*
+ * Print number of pages allocated on each memory node.
+ *
+ * This function can only be called if CONFIG_NUMA is enabled
+ * and VM_UNINITIALIZED bit in v->flags is disabled.
+ */
 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
 {
-	if (IS_ENABLED(CONFIG_NUMA)) {
-		unsigned int nr, *counters = m->private;
-		unsigned int step = 1U << vm_area_page_order(v);
+	unsigned int nr, *counters;
+	unsigned int step = 1U << vm_area_page_order(v);
 
-		if (!counters)
-			return;
+	counters = kcalloc(nr_node_ids, sizeof(unsigned int), GFP_ATOMIC);
+	if (!counters)
+		return;
 
-		if (v->flags & VM_UNINITIALIZED)
-			return;
-		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
-		smp_rmb();
+	/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
+	smp_rmb();
 
-		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
+	for (nr = 0; nr < v->nr_pages; nr += step)
+		counters[page_to_nid(v->pages[nr])] += step;
+	for_each_node_state(nr, N_HIGH_MEMORY)
+		if (counters[nr])
+			seq_printf(m, " N%u=%u", nr, counters[nr]);
 
-		for (nr = 0; nr < v->nr_pages; nr += step)
-			counters[page_to_nid(v->pages[nr])] += step;
-		for_each_node_state(nr, N_HIGH_MEMORY)
-			if (counters[nr])
-				seq_printf(m, " N%u=%u", nr, counters[nr]);
-	}
+	kfree(counters);
 }
 
 static void show_purge_info(struct seq_file *m)
@@ -4979,6 +4983,8 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
 			}
 
 			v = va->vm;
+			if (v->flags & VM_UNINITIALIZED)
+				continue;
 
 			seq_printf(m, "0x%pK-0x%pK %7ld",
 				v->addr, v->addr + v->size, v->size);
@@ -5013,7 +5019,9 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
 			if (is_vmalloc_addr(v->pages))
 				seq_puts(m, " vpages");
 
-			show_numa_info(m, v);
+			if (IS_ENABLED(CONFIG_NUMA))
+				show_numa_info(m, v);
+
 			seq_putc(m, '\n');
 		}
 		spin_unlock(&vn->busy.lock);
@@ -5028,14 +5036,7 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
 
 static int __init proc_vmalloc_init(void)
 {
-	void *priv_data = NULL;
-
-	if (IS_ENABLED(CONFIG_NUMA))
-		priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
-
-	proc_create_single_data("vmallocinfo",
-		0400, NULL, vmalloc_info_show, priv_data);
-
+	proc_create_single("vmallocinfo", 0400, NULL, vmalloc_info_show);
 	return 0;
 }
 module_init(proc_vmalloc_init);
--


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] mm/vmalloc: fix data race in show_numa_info()
  2025-05-07 14:25 [PATCH v3] mm/vmalloc: fix data race in show_numa_info() Jeongjun Park
@ 2025-05-07 22:33 ` Andrew Morton
  2025-05-08  4:47   ` Jeongjun Park
  2025-05-07 22:50 ` Ozgur Kara
       [not found] ` <01100196acf1ede5-ae116361-04f2-4e8f-b7a4-7079d6158ffb-000000@eu-north-1.amazonses.com>
  2 siblings, 1 reply; 7+ messages in thread
From: Andrew Morton @ 2025-05-07 22:33 UTC (permalink / raw)
  To: Jeongjun Park; +Cc: urezki, edumazet, linux-mm, linux-kernel

On Wed,  7 May 2025 23:25:52 +0900 Jeongjun Park <aha310510@gmail.com> wrote:

> The following data-race was found in show_numa_info():
> 
> ...
>
> 
> According to this report, there is a read/write data-race because m->private
> is accessible to multiple CPUs. To fix this, instead of allocating the heap
> in proc_vmalloc_init() and passing the heap address to m->private,
> show_numa_info() should allocate the heap.
> 
> One thing to note is that show_numa_info() is called in a critical section
> of a spinlock, so it must be allocated on the heap with GFP_ATOMIC flag.

GFP_ATOMIC is unfortunate.  Can vmalloc_info_show() allocate the
storage outside the lock and pass that pointer into show_numa_info()? 
That way will be more efficient also, less allocating and freeing.




^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] mm/vmalloc: fix data race in show_numa_info()
  2025-05-07 14:25 [PATCH v3] mm/vmalloc: fix data race in show_numa_info() Jeongjun Park
  2025-05-07 22:33 ` Andrew Morton
@ 2025-05-07 22:50 ` Ozgur Kara
       [not found] ` <01100196acf1ede5-ae116361-04f2-4e8f-b7a4-7079d6158ffb-000000@eu-north-1.amazonses.com>
  2 siblings, 0 replies; 7+ messages in thread
From: Ozgur Kara @ 2025-05-07 22:50 UTC (permalink / raw)
  To: Jeongjun Park; +Cc: akpm, urezki, edumazet, linux-mm, linux-kernel

Jeongjun Park <aha310510@gmail.com>, 7 May 2025 Çar, 17:32 tarihinde şunu yazdı:
>
> The following data-race was found in show_numa_info():
>
> ==================================================================
> BUG: KCSAN: data-race in vmalloc_info_show / vmalloc_info_show
>
> read to 0xffff88800971fe30 of 4 bytes by task 8289 on cpu 0:
>  show_numa_info mm/vmalloc.c:4936 [inline]
>  vmalloc_info_show+0x5a8/0x7e0 mm/vmalloc.c:5016
>  seq_read_iter+0x373/0xb40 fs/seq_file.c:230
>  proc_reg_read_iter+0x11e/0x170 fs/proc/inode.c:299
>  new_sync_read fs/read_write.c:489 [inline]
>  vfs_read+0x5b4/0x740 fs/read_write.c:570
>  ksys_read+0xbe/0x190 fs/read_write.c:713
>  __do_sys_read fs/read_write.c:722 [inline]
>  __se_sys_read fs/read_write.c:720 [inline]
>  __x64_sys_read+0x41/0x50 fs/read_write.c:720
>  x64_sys_call+0x1729/0x1fd0 arch/x86/include/generated/asm/syscalls_64.h:1
>  do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
>  do_syscall_64+0xa6/0x1b0 arch/x86/entry/syscall_64.c:94
>  entry_SYSCALL_64_after_hwframe+0x77/0x7f
>
> write to 0xffff88800971fe30 of 4 bytes by task 8287 on cpu 1:
>  show_numa_info mm/vmalloc.c:4934 [inline]
>  vmalloc_info_show+0x38f/0x7e0 mm/vmalloc.c:5016
>  seq_read_iter+0x373/0xb40 fs/seq_file.c:230
>  proc_reg_read_iter+0x11e/0x170 fs/proc/inode.c:299
>  new_sync_read fs/read_write.c:489 [inline]
>  vfs_read+0x5b4/0x740 fs/read_write.c:570
>  ksys_read+0xbe/0x190 fs/read_write.c:713
>  __do_sys_read fs/read_write.c:722 [inline]
>  __se_sys_read fs/read_write.c:720 [inline]
>  __x64_sys_read+0x41/0x50 fs/read_write.c:720
>  x64_sys_call+0x1729/0x1fd0 arch/x86/include/generated/asm/syscalls_64.h:1
>  do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
>  do_syscall_64+0xa6/0x1b0 arch/x86/entry/syscall_64.c:94
>  entry_SYSCALL_64_after_hwframe+0x77/0x7f
>
> value changed: 0x0000008f -> 0x00000000
> ==================================================================
>
> According to this report, there is a read/write data-race because m->private
> is accessible to multiple CPUs. To fix this, instead of allocating the heap
> in proc_vmalloc_init() and passing the heap address to m->private,
> show_numa_info() should allocate the heap.
>
> One thing to note is that show_numa_info() is called in a critical section
> of a spinlock, so it must be allocated on the heap with GFP_ATOMIC flag.
>
> Fixes: a47a126ad5ea ("vmallocinfo: add NUMA information")
> Suggested-by: Eric Dumazet <edumazet@google.com>
> Suggested-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> Signed-off-by: Jeongjun Park <aha310510@gmail.com>
> ---
> v3: Following Uladzislau Rezki's suggestion, we check v->flags beforehand
>         to avoid printing uninitialized members of vm_struct.
> - Link to v2: https://lore.kernel.org/all/20250506082520.84153-1-aha310510@gmail.com/
> v2: Refactoring some functions and fix patch as per Eric Dumazet suggestion
> - Link to v1: https://lore.kernel.org/all/20250505171948.24410-1-aha310510@gmail.com/
> ---
>  mm/vmalloc.c | 51 ++++++++++++++++++++++++++-------------------------
>  1 file changed, 26 insertions(+), 25 deletions(-)
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 3ed720a787ec..9139025e20e5 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -4914,28 +4914,32 @@ bool vmalloc_dump_obj(void *object)
>  #endif
>
>  #ifdef CONFIG_PROC_FS
> +
> +/*
> + * Print number of pages allocated on each memory node.
> + *
> + * This function can only be called if CONFIG_NUMA is enabled
> + * and VM_UNINITIALIZED bit in v->flags is disabled.
> + */
>  static void show_numa_info(struct seq_file *m, struct vm_struct *v)
>  {
> -       if (IS_ENABLED(CONFIG_NUMA)) {
> -               unsigned int nr, *counters = m->private;
> -               unsigned int step = 1U << vm_area_page_order(v);
> +       unsigned int nr, *counters;
> +       unsigned int step = 1U << vm_area_page_order(v);
>
> -               if (!counters)
> -                       return;
> +       counters = kcalloc(nr_node_ids, sizeof(unsigned int), GFP_ATOMIC);
> +       if (!counters)
> +               return;
>
> -               if (v->flags & VM_UNINITIALIZED)

Hello,

although skipping memory blocks with VM_UNINITIALIZED flag seems like
a good idea maybe it might be a good idea to check correctness of
memory areas.

if (v && (v->flags & VM_UNINITIALIZED)) {
    continue;
}

> -                       return;
> -               /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
> -               smp_rmb();
> +       /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
> +       smp_rmb();
>
> -               memset(counters, 0, nr_node_ids * sizeof(unsigned int));
> +       for (nr = 0; nr < v->nr_pages; nr += step)
> +               counters[page_to_nid(v->pages[nr])] += step;
> +       for_each_node_state(nr, N_HIGH_MEMORY)
> +               if (counters[nr])
> +                       seq_printf(m, " N%u=%u", nr, counters[nr]);
>
> -               for (nr = 0; nr < v->nr_pages; nr += step)
> -                       counters[page_to_nid(v->pages[nr])] += step;
> -               for_each_node_state(nr, N_HIGH_MEMORY)
> -                       if (counters[nr])
> -                               seq_printf(m, " N%u=%u", nr, counters[nr]);
> -       }
> +       kfree(counters);
>  }
>
>  static void show_purge_info(struct seq_file *m)
> @@ -4979,6 +4983,8 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
>                         }
>
>                         v = va->vm;
> +                       if (v->flags & VM_UNINITIALIZED)
> +                               continue;
>
>                         seq_printf(m, "0x%pK-0x%pK %7ld",
>                                 v->addr, v->addr + v->size, v->size);
> @@ -5013,7 +5019,9 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
>                         if (is_vmalloc_addr(v->pages))
>                                 seq_puts(m, " vpages");
>
> -                       show_numa_info(m, v);
> +                       if (IS_ENABLED(CONFIG_NUMA))
> +                               show_numa_info(m, v);
> +
>                         seq_putc(m, '\n');
>                 }
>                 spin_unlock(&vn->busy.lock);
> @@ -5028,14 +5036,7 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
>
>  static int __init proc_vmalloc_init(void)
>  {
> -       void *priv_data = NULL;
> -
> -       if (IS_ENABLED(CONFIG_NUMA))
> -               priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
> -
> -       proc_create_single_data("vmallocinfo",
> -               0400, NULL, vmalloc_info_show, priv_data);
> -
> +       proc_create_single("vmallocinfo", 0400, NULL, vmalloc_info_show);

proc_create_single function clean but it no longer receives data like
priv_data right? so if priv_data is needed again code will not work.
if use priv_data becomes necessary, a suitable memory allocation and
release mechanism should be added for this.
otherwise a memory leak could occur and perhaps the use of kfree
instead of kmalloc could also be added.

proc_create_single_data("vmallocinfo", 0400, NULL, vmalloc_info_show,
priv_data);

// use kfree and free priv_data
kfree(priv_data);

Regards

Ozgur

>         return 0;
>  }
>  module_init(proc_vmalloc_init);
> --
>
>
>


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] mm/vmalloc: fix data race in show_numa_info()
  2025-05-07 22:33 ` Andrew Morton
@ 2025-05-08  4:47   ` Jeongjun Park
  2025-05-08  7:56     ` Ozgur Kara
  0 siblings, 1 reply; 7+ messages in thread
From: Jeongjun Park @ 2025-05-08  4:47 UTC (permalink / raw)
  To: Andrew Morton; +Cc: urezki, edumazet, linux-mm, linux-kernel

Andrew Morton <akpm@linux-foundation.org> wrote:
>
> On Wed,  7 May 2025 23:25:52 +0900 Jeongjun Park <aha310510@gmail.com> wrote:
>
> > The following data-race was found in show_numa_info():
> >
> > ...
> >
> >
> > According to this report, there is a read/write data-race because m->private
> > is accessible to multiple CPUs. To fix this, instead of allocating the heap
> > in proc_vmalloc_init() and passing the heap address to m->private,
> > show_numa_info() should allocate the heap.
> >
> > One thing to note is that show_numa_info() is called in a critical section
> > of a spinlock, so it must be allocated on the heap with GFP_ATOMIC flag.
>
> GFP_ATOMIC is unfortunate.  Can vmalloc_info_show() allocate the
> storage outside the lock and pass that pointer into show_numa_info()?
> That way will be more efficient also, less allocating and freeing.
>
>

That's good idea! Definitely, if you modify vmalloc_info_show() to
allocate the heap before taking the spinlock and initialize the heap
to 0 at the beginning of the loop, we don't need to use GFP_ATOMIC,
and we only need to allocate the heap once, which is much more efficient.

I'll send you v4 patch that reflects this right away.

Regards,

Jeongjun Park


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] mm/vmalloc: fix data race in show_numa_info()
       [not found] ` <01100196acf1ede5-ae116361-04f2-4e8f-b7a4-7079d6158ffb-000000@eu-north-1.amazonses.com>
@ 2025-05-08  5:04   ` Jeongjun Park
  2025-05-08  6:18     ` Jeongjun Park
  0 siblings, 1 reply; 7+ messages in thread
From: Jeongjun Park @ 2025-05-08  5:04 UTC (permalink / raw)
  To: Ozgur Kara; +Cc: akpm, urezki, edumazet, linux-mm, linux-kernel

Ozgur Kara <ozgur@goosey.org> wrote:
>
> Jeongjun Park <aha310510@gmail.com>, 7 May 2025 Çar, 17:32 tarihinde şunu yazdı:
> >
> > The following data-race was found in show_numa_info():
> >
> > ==================================================================
> > BUG: KCSAN: data-race in vmalloc_info_show / vmalloc_info_show
> >
> > read to 0xffff88800971fe30 of 4 bytes by task 8289 on cpu 0:
> >  show_numa_info mm/vmalloc.c:4936 [inline]
> >  vmalloc_info_show+0x5a8/0x7e0 mm/vmalloc.c:5016
> >  seq_read_iter+0x373/0xb40 fs/seq_file.c:230
> >  proc_reg_read_iter+0x11e/0x170 fs/proc/inode.c:299
> >  new_sync_read fs/read_write.c:489 [inline]
> >  vfs_read+0x5b4/0x740 fs/read_write.c:570
> >  ksys_read+0xbe/0x190 fs/read_write.c:713
> >  __do_sys_read fs/read_write.c:722 [inline]
> >  __se_sys_read fs/read_write.c:720 [inline]
> >  __x64_sys_read+0x41/0x50 fs/read_write.c:720
> >  x64_sys_call+0x1729/0x1fd0 arch/x86/include/generated/asm/syscalls_64.h:1
> >  do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
> >  do_syscall_64+0xa6/0x1b0 arch/x86/entry/syscall_64.c:94
> >  entry_SYSCALL_64_after_hwframe+0x77/0x7f
> >
> > write to 0xffff88800971fe30 of 4 bytes by task 8287 on cpu 1:
> >  show_numa_info mm/vmalloc.c:4934 [inline]
> >  vmalloc_info_show+0x38f/0x7e0 mm/vmalloc.c:5016
> >  seq_read_iter+0x373/0xb40 fs/seq_file.c:230
> >  proc_reg_read_iter+0x11e/0x170 fs/proc/inode.c:299
> >  new_sync_read fs/read_write.c:489 [inline]
> >  vfs_read+0x5b4/0x740 fs/read_write.c:570
> >  ksys_read+0xbe/0x190 fs/read_write.c:713
> >  __do_sys_read fs/read_write.c:722 [inline]
> >  __se_sys_read fs/read_write.c:720 [inline]
> >  __x64_sys_read+0x41/0x50 fs/read_write.c:720
> >  x64_sys_call+0x1729/0x1fd0 arch/x86/include/generated/asm/syscalls_64.h:1
> >  do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
> >  do_syscall_64+0xa6/0x1b0 arch/x86/entry/syscall_64.c:94
> >  entry_SYSCALL_64_after_hwframe+0x77/0x7f
> >
> > value changed: 0x0000008f -> 0x00000000
> > ==================================================================
> >
> > According to this report, there is a read/write data-race because m->private
> > is accessible to multiple CPUs. To fix this, instead of allocating the heap
> > in proc_vmalloc_init() and passing the heap address to m->private,
> > show_numa_info() should allocate the heap.
> >
> > One thing to note is that show_numa_info() is called in a critical section
> > of a spinlock, so it must be allocated on the heap with GFP_ATOMIC flag.
> >
> > Fixes: a47a126ad5ea ("vmallocinfo: add NUMA information")
> > Suggested-by: Eric Dumazet <edumazet@google.com>
> > Suggested-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > Signed-off-by: Jeongjun Park <aha310510@gmail.com>
> > ---
> > v3: Following Uladzislau Rezki's suggestion, we check v->flags beforehand
> >         to avoid printing uninitialized members of vm_struct.
> > - Link to v2: https://lore.kernel.org/all/20250506082520.84153-1-aha310510@gmail.com/
> > v2: Refactoring some functions and fix patch as per Eric Dumazet suggestion
> > - Link to v1: https://lore.kernel.org/all/20250505171948.24410-1-aha310510@gmail.com/
> > ---
> >  mm/vmalloc.c | 51 ++++++++++++++++++++++++++-------------------------
> >  1 file changed, 26 insertions(+), 25 deletions(-)
> >
> > diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> > index 3ed720a787ec..9139025e20e5 100644
> > --- a/mm/vmalloc.c
> > +++ b/mm/vmalloc.c
> > @@ -4914,28 +4914,32 @@ bool vmalloc_dump_obj(void *object)
> >  #endif
> >
> >  #ifdef CONFIG_PROC_FS
> > +
> > +/*
> > + * Print number of pages allocated on each memory node.
> > + *
> > + * This function can only be called if CONFIG_NUMA is enabled
> > + * and VM_UNINITIALIZED bit in v->flags is disabled.
> > + */
> >  static void show_numa_info(struct seq_file *m, struct vm_struct *v)
> >  {
> > -       if (IS_ENABLED(CONFIG_NUMA)) {
> > -               unsigned int nr, *counters = m->private;
> > -               unsigned int step = 1U << vm_area_page_order(v);
> > +       unsigned int nr, *counters;
> > +       unsigned int step = 1U << vm_area_page_order(v);
> >
> > -               if (!counters)
> > -                       return;
> > +       counters = kcalloc(nr_node_ids, sizeof(unsigned int), GFP_ATOMIC);
> > +       if (!counters)
> > +               return;
> >
> > -               if (v->flags & VM_UNINITIALIZED)
>
> Hello,
>
> although skipping memory blocks with VM_UNINITIALIZED flag seems like
> a good idea maybe it might be a good idea to check correctness of
> memory areas.
>
> if (v && (v->flags & VM_UNINITIALIZED)) {
>     continue;
> }
>

Thanks for the suggestion! Not related to data-race, but it seems like
a good idea to add some check code in case null-deref occurs. I'll reflect
this in the v4 patch.

Regards,

Jeongjun Park

> > -                       return;
> > -               /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
> > -               smp_rmb();
> > +       /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
> > +       smp_rmb();
> >
> > -               memset(counters, 0, nr_node_ids * sizeof(unsigned int));
> > +       for (nr = 0; nr < v->nr_pages; nr += step)
> > +               counters[page_to_nid(v->pages[nr])] += step;
> > +       for_each_node_state(nr, N_HIGH_MEMORY)
> > +               if (counters[nr])
> > +                       seq_printf(m, " N%u=%u", nr, counters[nr]);
> >
> > -               for (nr = 0; nr < v->nr_pages; nr += step)
> > -                       counters[page_to_nid(v->pages[nr])] += step;
> > -               for_each_node_state(nr, N_HIGH_MEMORY)
> > -                       if (counters[nr])
> > -                               seq_printf(m, " N%u=%u", nr, counters[nr]);
> > -       }
> > +       kfree(counters);
> >  }
> >
> >  static void show_purge_info(struct seq_file *m)
> > @@ -4979,6 +4983,8 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
> >                         }
> >
> >                         v = va->vm;
> > +                       if (v->flags & VM_UNINITIALIZED)
> > +                               continue;
> >
> >                         seq_printf(m, "0x%pK-0x%pK %7ld",
> >                                 v->addr, v->addr + v->size, v->size);
> > @@ -5013,7 +5019,9 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
> >                         if (is_vmalloc_addr(v->pages))
> >                                 seq_puts(m, " vpages");
> >
> > -                       show_numa_info(m, v);
> > +                       if (IS_ENABLED(CONFIG_NUMA))
> > +                               show_numa_info(m, v);
> > +
> >                         seq_putc(m, '\n');
> >                 }
> >                 spin_unlock(&vn->busy.lock);
> > @@ -5028,14 +5036,7 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
> >
> >  static int __init proc_vmalloc_init(void)
> >  {
> > -       void *priv_data = NULL;
> > -
> > -       if (IS_ENABLED(CONFIG_NUMA))
> > -               priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
> > -
> > -       proc_create_single_data("vmallocinfo",
> > -               0400, NULL, vmalloc_info_show, priv_data);
> > -
> > +       proc_create_single("vmallocinfo", 0400, NULL, vmalloc_info_show);
>
> proc_create_single function clean but it no longer receives data like
> priv_data right? so if priv_data is needed again code will not work.
> if use priv_data becomes necessary, a suitable memory allocation and
> release mechanism should be added for this.
> otherwise a memory leak could occur and perhaps the use of kfree
> instead of kmalloc could also be added.
>
> proc_create_single_data("vmallocinfo", 0400, NULL, vmalloc_info_show,
> priv_data);
>
> // use kfree and free priv_data
> kfree(priv_data);
>
> Regards
>
> Ozgur
>
> >         return 0;
> >  }
> >  module_init(proc_vmalloc_init);
> > --
> >
> >
> >


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] mm/vmalloc: fix data race in show_numa_info()
  2025-05-08  5:04   ` Jeongjun Park
@ 2025-05-08  6:18     ` Jeongjun Park
  0 siblings, 0 replies; 7+ messages in thread
From: Jeongjun Park @ 2025-05-08  6:18 UTC (permalink / raw)
  To: Ozgur Kara; +Cc: akpm, urezki, edumazet, linux-mm, linux-kernel

Jeongjun Park <aha310510@gmail.com> wrote:
>
> Ozgur Kara <ozgur@goosey.org> wrote:
> >
> > Jeongjun Park <aha310510@gmail.com>, 7 May 2025 Çar, 17:32 tarihinde şunu yazdı:
> > >
> > > The following data-race was found in show_numa_info():
> > >
> > > ==================================================================
> > > BUG: KCSAN: data-race in vmalloc_info_show / vmalloc_info_show
> > >
> > > read to 0xffff88800971fe30 of 4 bytes by task 8289 on cpu 0:
> > >  show_numa_info mm/vmalloc.c:4936 [inline]
> > >  vmalloc_info_show+0x5a8/0x7e0 mm/vmalloc.c:5016
> > >  seq_read_iter+0x373/0xb40 fs/seq_file.c:230
> > >  proc_reg_read_iter+0x11e/0x170 fs/proc/inode.c:299
> > >  new_sync_read fs/read_write.c:489 [inline]
> > >  vfs_read+0x5b4/0x740 fs/read_write.c:570
> > >  ksys_read+0xbe/0x190 fs/read_write.c:713
> > >  __do_sys_read fs/read_write.c:722 [inline]
> > >  __se_sys_read fs/read_write.c:720 [inline]
> > >  __x64_sys_read+0x41/0x50 fs/read_write.c:720
> > >  x64_sys_call+0x1729/0x1fd0 arch/x86/include/generated/asm/syscalls_64.h:1
> > >  do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
> > >  do_syscall_64+0xa6/0x1b0 arch/x86/entry/syscall_64.c:94
> > >  entry_SYSCALL_64_after_hwframe+0x77/0x7f
> > >
> > > write to 0xffff88800971fe30 of 4 bytes by task 8287 on cpu 1:
> > >  show_numa_info mm/vmalloc.c:4934 [inline]
> > >  vmalloc_info_show+0x38f/0x7e0 mm/vmalloc.c:5016
> > >  seq_read_iter+0x373/0xb40 fs/seq_file.c:230
> > >  proc_reg_read_iter+0x11e/0x170 fs/proc/inode.c:299
> > >  new_sync_read fs/read_write.c:489 [inline]
> > >  vfs_read+0x5b4/0x740 fs/read_write.c:570
> > >  ksys_read+0xbe/0x190 fs/read_write.c:713
> > >  __do_sys_read fs/read_write.c:722 [inline]
> > >  __se_sys_read fs/read_write.c:720 [inline]
> > >  __x64_sys_read+0x41/0x50 fs/read_write.c:720
> > >  x64_sys_call+0x1729/0x1fd0 arch/x86/include/generated/asm/syscalls_64.h:1
> > >  do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
> > >  do_syscall_64+0xa6/0x1b0 arch/x86/entry/syscall_64.c:94
> > >  entry_SYSCALL_64_after_hwframe+0x77/0x7f
> > >
> > > value changed: 0x0000008f -> 0x00000000
> > > ==================================================================
> > >
> > > According to this report, there is a read/write data-race because m->private
> > > is accessible to multiple CPUs. To fix this, instead of allocating the heap
> > > in proc_vmalloc_init() and passing the heap address to m->private,
> > > show_numa_info() should allocate the heap.
> > >
> > > One thing to note is that show_numa_info() is called in a critical section
> > > of a spinlock, so it must be allocated on the heap with GFP_ATOMIC flag.
> > >
> > > Fixes: a47a126ad5ea ("vmallocinfo: add NUMA information")
> > > Suggested-by: Eric Dumazet <edumazet@google.com>
> > > Suggested-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > > Signed-off-by: Jeongjun Park <aha310510@gmail.com>
> > > ---
> > > v3: Following Uladzislau Rezki's suggestion, we check v->flags beforehand
> > >         to avoid printing uninitialized members of vm_struct.
> > > - Link to v2: https://lore.kernel.org/all/20250506082520.84153-1-aha310510@gmail.com/
> > > v2: Refactoring some functions and fix patch as per Eric Dumazet suggestion
> > > - Link to v1: https://lore.kernel.org/all/20250505171948.24410-1-aha310510@gmail.com/
> > > ---
> > >  mm/vmalloc.c | 51 ++++++++++++++++++++++++++-------------------------
> > >  1 file changed, 26 insertions(+), 25 deletions(-)
> > >
> > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> > > index 3ed720a787ec..9139025e20e5 100644
> > > --- a/mm/vmalloc.c
> > > +++ b/mm/vmalloc.c
> > > @@ -4914,28 +4914,32 @@ bool vmalloc_dump_obj(void *object)
> > >  #endif
> > >
> > >  #ifdef CONFIG_PROC_FS
> > > +
> > > +/*
> > > + * Print number of pages allocated on each memory node.
> > > + *
> > > + * This function can only be called if CONFIG_NUMA is enabled
> > > + * and VM_UNINITIALIZED bit in v->flags is disabled.
> > > + */
> > >  static void show_numa_info(struct seq_file *m, struct vm_struct *v)
> > >  {
> > > -       if (IS_ENABLED(CONFIG_NUMA)) {
> > > -               unsigned int nr, *counters = m->private;
> > > -               unsigned int step = 1U << vm_area_page_order(v);
> > > +       unsigned int nr, *counters;
> > > +       unsigned int step = 1U << vm_area_page_order(v);
> > >
> > > -               if (!counters)
> > > -                       return;
> > > +       counters = kcalloc(nr_node_ids, sizeof(unsigned int), GFP_ATOMIC);
> > > +       if (!counters)
> > > +               return;
> > >
> > > -               if (v->flags & VM_UNINITIALIZED)
> >
> > Hello,
> >
> > although skipping memory blocks with VM_UNINITIALIZED flag seems like
> > a good idea maybe it might be a good idea to check correctness of
> > memory areas.
> >
> > if (v && (v->flags & VM_UNINITIALIZED)) {
> >     continue;
> > }
> >
>
> Thanks for the suggestion! Not related to data-race, but it seems like
> a good idea to add some check code in case null-deref occurs. I'll reflect
> this in the v4 patch.
>
> Regards,
>
> Jeongjun Park
>

Oh, I misread the code. This function already checks if the va->vm
value is null, so there's no need to do this duplicate check.

Regards,

Jeongjun Park

> > > -                       return;
> > > -               /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
> > > -               smp_rmb();
> > > +       /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
> > > +       smp_rmb();
> > >
> > > -               memset(counters, 0, nr_node_ids * sizeof(unsigned int));
> > > +       for (nr = 0; nr < v->nr_pages; nr += step)
> > > +               counters[page_to_nid(v->pages[nr])] += step;
> > > +       for_each_node_state(nr, N_HIGH_MEMORY)
> > > +               if (counters[nr])
> > > +                       seq_printf(m, " N%u=%u", nr, counters[nr]);
> > >
> > > -               for (nr = 0; nr < v->nr_pages; nr += step)
> > > -                       counters[page_to_nid(v->pages[nr])] += step;
> > > -               for_each_node_state(nr, N_HIGH_MEMORY)
> > > -                       if (counters[nr])
> > > -                               seq_printf(m, " N%u=%u", nr, counters[nr]);
> > > -       }
> > > +       kfree(counters);
> > >  }
> > >
> > >  static void show_purge_info(struct seq_file *m)
> > > @@ -4979,6 +4983,8 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
> > >                         }
> > >
> > >                         v = va->vm;
> > > +                       if (v->flags & VM_UNINITIALIZED)
> > > +                               continue;
> > >
> > >                         seq_printf(m, "0x%pK-0x%pK %7ld",
> > >                                 v->addr, v->addr + v->size, v->size);
> > > @@ -5013,7 +5019,9 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
> > >                         if (is_vmalloc_addr(v->pages))
> > >                                 seq_puts(m, " vpages");
> > >
> > > -                       show_numa_info(m, v);
> > > +                       if (IS_ENABLED(CONFIG_NUMA))
> > > +                               show_numa_info(m, v);
> > > +
> > >                         seq_putc(m, '\n');
> > >                 }
> > >                 spin_unlock(&vn->busy.lock);
> > > @@ -5028,14 +5036,7 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
> > >
> > >  static int __init proc_vmalloc_init(void)
> > >  {
> > > -       void *priv_data = NULL;
> > > -
> > > -       if (IS_ENABLED(CONFIG_NUMA))
> > > -               priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
> > > -
> > > -       proc_create_single_data("vmallocinfo",
> > > -               0400, NULL, vmalloc_info_show, priv_data);
> > > -
> > > +       proc_create_single("vmallocinfo", 0400, NULL, vmalloc_info_show);
> >
> > proc_create_single function clean but it no longer receives data like
> > priv_data right? so if priv_data is needed again code will not work.
> > if use priv_data becomes necessary, a suitable memory allocation and
> > release mechanism should be added for this.
> > otherwise a memory leak could occur and perhaps the use of kfree
> > instead of kmalloc could also be added.
> >
> > proc_create_single_data("vmallocinfo", 0400, NULL, vmalloc_info_show,
> > priv_data);
> >
> > // use kfree and free priv_data
> > kfree(priv_data);
> >
> > Regards
> >
> > Ozgur
> >
> > >         return 0;
> > >  }
> > >  module_init(proc_vmalloc_init);
> > > --
> > >
> > >
> > >


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] mm/vmalloc: fix data race in show_numa_info()
  2025-05-08  4:47   ` Jeongjun Park
@ 2025-05-08  7:56     ` Ozgur Kara
  0 siblings, 0 replies; 7+ messages in thread
From: Ozgur Kara @ 2025-05-08  7:56 UTC (permalink / raw)
  To: Jeongjun Park; +Cc: Andrew Morton, urezki, edumazet, linux-mm, linux-kernel

Jeongjun Park <aha310510@gmail.com>, 8 May 2025 Per, 07:47 tarihinde şunu yazdı:
>
> Andrew Morton <akpm@linux-foundation.org> wrote:
> >
> > On Wed,  7 May 2025 23:25:52 +0900 Jeongjun Park <aha310510@gmail.com> wrote:
> >
> > > The following data-race was found in show_numa_info():
> > >
> > > ...
> > >
> > >
> > > According to this report, there is a read/write data-race because m->private
> > > is accessible to multiple CPUs. To fix this, instead of allocating the heap
> > > in proc_vmalloc_init() and passing the heap address to m->private,
> > > show_numa_info() should allocate the heap.
> > >
> > > One thing to note is that show_numa_info() is called in a critical section
> > > of a spinlock, so it must be allocated on the heap with GFP_ATOMIC flag.
> >
> > GFP_ATOMIC is unfortunate.  Can vmalloc_info_show() allocate the
> > storage outside the lock and pass that pointer into show_numa_info()?
> > That way will be more efficient also, less allocating and freeing.
> >
> >
>
> That's good idea! Definitely, if you modify vmalloc_info_show() to
> allocate the heap before taking the spinlock and initialize the heap
> to 0 at the beginning of the loop, we don't need to use GFP_ATOMIC,
> and we only need to allocate the heap once, which is much more efficient.
>
> I'll send you v4 patch that reflects this right away.
>

Hello,

I think so but i'm not sure if it will work because i just thought of
it as an idea because we need to check if v is null or not.

Regards,

Ozgur

> Regards,
>
> Jeongjun Park
>
>
>


^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2025-05-08  7:56 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-05-07 14:25 [PATCH v3] mm/vmalloc: fix data race in show_numa_info() Jeongjun Park
2025-05-07 22:33 ` Andrew Morton
2025-05-08  4:47   ` Jeongjun Park
2025-05-08  7:56     ` Ozgur Kara
2025-05-07 22:50 ` Ozgur Kara
     [not found] ` <01100196acf1ede5-ae116361-04f2-4e8f-b7a4-7079d6158ffb-000000@eu-north-1.amazonses.com>
2025-05-08  5:04   ` Jeongjun Park
2025-05-08  6:18     ` Jeongjun Park

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox