linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3] mm: add per-order mTHP swap-in fallback/fallback_charge counters
@ 2024-12-02 12:47 Wenchao Hao
  2024-12-02 22:44 ` Barry Song
  2024-12-06 11:21 ` Lance Yang
  0 siblings, 2 replies; 3+ messages in thread
From: Wenchao Hao @ 2024-12-02 12:47 UTC (permalink / raw)
  To: Jonathan Corbet, Andrew Morton, David Hildenbrand, Barry Song,
	Ryan Roberts, Baolin Wang, Usama Arif, Lance Yang,
	Matthew Wilcox, Peter Xu, linux-doc, linux-kernel, linux-mm
  Cc: Wenchao Hao

Currently, large folio swap-in is supported, but we lack a method to
analyze their success ratio. Similar to anon_fault_fallback, we introduce
per-order mTHP swpin_fallback and swpin_fallback_charge counters for
calculating their success ratio. The new counters are located at:

/sys/kernel/mm/transparent_hugepage/hugepages-<size>/stats/
	swpin_fallback
	swpin_fallback_charge

Signed-off-by: Wenchao Hao <haowenchao22@gmail.com>
---
V3:
 Update description about swpin_fallback and swpin_fallback_charge

V2:
 Introduce swapin_fallback_charge, which increments if it fails to
 charge a huge page to memory despite successful allocation.

 Documentation/admin-guide/mm/transhuge.rst | 10 ++++++++++
 include/linux/huge_mm.h                    |  2 ++
 mm/huge_memory.c                           |  6 ++++++
 mm/memory.c                                |  2 ++
 4 files changed, 20 insertions(+)

diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index 333958ef0d5f..156a03af0a88 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -591,6 +591,16 @@ swpin
 	is incremented every time a huge page is swapped in from a non-zswap
 	swap device in one piece.
 
+swpin_fallback
+	is incremented if swapin fails to allocate or charge a huge page
+	and instead falls back to using huge pages with lower orders or
+	small pages.
+
+swpin_fallback_charge
+	is incremented if swapin fails to charge a huge page and instead
+	falls back to using  huge pages with lower orders or small pages
+	even though the allocation was successful.
+
 swpout
 	is incremented every time a huge page is swapped out to a non-zswap
 	swap device in one piece without splitting.
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index b94c2e8ee918..93e509b6c00e 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -121,6 +121,8 @@ enum mthp_stat_item {
 	MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
 	MTHP_STAT_ZSWPOUT,
 	MTHP_STAT_SWPIN,
+	MTHP_STAT_SWPIN_FALLBACK,
+	MTHP_STAT_SWPIN_FALLBACK_CHARGE,
 	MTHP_STAT_SWPOUT,
 	MTHP_STAT_SWPOUT_FALLBACK,
 	MTHP_STAT_SHMEM_ALLOC,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ab46ef718b44..d062b257376d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -617,6 +617,8 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
 DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT);
 DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN);
+DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK);
+DEFINE_MTHP_STAT_ATTR(swpin_fallback_charge, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
 DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
 DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
 #ifdef CONFIG_SHMEM
@@ -637,6 +639,8 @@ static struct attribute *anon_stats_attrs[] = {
 #ifndef CONFIG_SHMEM
 	&zswpout_attr.attr,
 	&swpin_attr.attr,
+	&swpin_fallback_attr.attr,
+	&swpin_fallback_charge_attr.attr,
 	&swpout_attr.attr,
 	&swpout_fallback_attr.attr,
 #endif
@@ -669,6 +673,8 @@ static struct attribute *any_stats_attrs[] = {
 #ifdef CONFIG_SHMEM
 	&zswpout_attr.attr,
 	&swpin_attr.attr,
+	&swpin_fallback_attr.attr,
+	&swpin_fallback_charge_attr.attr,
 	&swpout_attr.attr,
 	&swpout_fallback_attr.attr,
 #endif
diff --git a/mm/memory.c b/mm/memory.c
index d5a1b0a6bf1f..a44547600c02 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4189,8 +4189,10 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
 			if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
 							    gfp, entry))
 				return folio;
+			count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
 			folio_put(folio);
 		}
+		count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK);
 		order = next_order(&orders, order);
 	}
 
-- 
2.45.0



^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v3] mm: add per-order mTHP swap-in fallback/fallback_charge counters
  2024-12-02 12:47 [PATCH v3] mm: add per-order mTHP swap-in fallback/fallback_charge counters Wenchao Hao
@ 2024-12-02 22:44 ` Barry Song
  2024-12-06 11:21 ` Lance Yang
  1 sibling, 0 replies; 3+ messages in thread
From: Barry Song @ 2024-12-02 22:44 UTC (permalink / raw)
  To: Wenchao Hao
  Cc: Jonathan Corbet, Andrew Morton, David Hildenbrand, Ryan Roberts,
	Baolin Wang, Usama Arif, Lance Yang, Matthew Wilcox, Peter Xu,
	linux-doc, linux-kernel, linux-mm

On Tue, Dec 3, 2024 at 1:47 AM Wenchao Hao <haowenchao22@gmail.com> wrote:
>
> Currently, large folio swap-in is supported, but we lack a method to
> analyze their success ratio. Similar to anon_fault_fallback, we introduce
> per-order mTHP swpin_fallback and swpin_fallback_charge counters for
> calculating their success ratio. The new counters are located at:
>
> /sys/kernel/mm/transparent_hugepage/hugepages-<size>/stats/
>         swpin_fallback
>         swpin_fallback_charge
>
> Signed-off-by: Wenchao Hao <haowenchao22@gmail.com>

LGTM, thanks!

Reviewed-by: Barry Song <baohua@kernel.org>

> ---
> V3:
>  Update description about swpin_fallback and swpin_fallback_charge
>
> V2:
>  Introduce swapin_fallback_charge, which increments if it fails to
>  charge a huge page to memory despite successful allocation.
>
>  Documentation/admin-guide/mm/transhuge.rst | 10 ++++++++++
>  include/linux/huge_mm.h                    |  2 ++
>  mm/huge_memory.c                           |  6 ++++++
>  mm/memory.c                                |  2 ++
>  4 files changed, 20 insertions(+)
>
> diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
> index 333958ef0d5f..156a03af0a88 100644
> --- a/Documentation/admin-guide/mm/transhuge.rst
> +++ b/Documentation/admin-guide/mm/transhuge.rst
> @@ -591,6 +591,16 @@ swpin
>         is incremented every time a huge page is swapped in from a non-zswap
>         swap device in one piece.
>
> +swpin_fallback
> +       is incremented if swapin fails to allocate or charge a huge page
> +       and instead falls back to using huge pages with lower orders or
> +       small pages.
> +
> +swpin_fallback_charge
> +       is incremented if swapin fails to charge a huge page and instead
> +       falls back to using  huge pages with lower orders or small pages
> +       even though the allocation was successful.
> +
>  swpout
>         is incremented every time a huge page is swapped out to a non-zswap
>         swap device in one piece without splitting.
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index b94c2e8ee918..93e509b6c00e 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -121,6 +121,8 @@ enum mthp_stat_item {
>         MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
>         MTHP_STAT_ZSWPOUT,
>         MTHP_STAT_SWPIN,
> +       MTHP_STAT_SWPIN_FALLBACK,
> +       MTHP_STAT_SWPIN_FALLBACK_CHARGE,
>         MTHP_STAT_SWPOUT,
>         MTHP_STAT_SWPOUT_FALLBACK,
>         MTHP_STAT_SHMEM_ALLOC,
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index ab46ef718b44..d062b257376d 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -617,6 +617,8 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
>  DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
>  DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT);
>  DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN);
> +DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK);
> +DEFINE_MTHP_STAT_ATTR(swpin_fallback_charge, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
>  DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
>  DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
>  #ifdef CONFIG_SHMEM
> @@ -637,6 +639,8 @@ static struct attribute *anon_stats_attrs[] = {
>  #ifndef CONFIG_SHMEM
>         &zswpout_attr.attr,
>         &swpin_attr.attr,
> +       &swpin_fallback_attr.attr,
> +       &swpin_fallback_charge_attr.attr,
>         &swpout_attr.attr,
>         &swpout_fallback_attr.attr,
>  #endif
> @@ -669,6 +673,8 @@ static struct attribute *any_stats_attrs[] = {
>  #ifdef CONFIG_SHMEM
>         &zswpout_attr.attr,
>         &swpin_attr.attr,
> +       &swpin_fallback_attr.attr,
> +       &swpin_fallback_charge_attr.attr,
>         &swpout_attr.attr,
>         &swpout_fallback_attr.attr,
>  #endif
> diff --git a/mm/memory.c b/mm/memory.c
> index d5a1b0a6bf1f..a44547600c02 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4189,8 +4189,10 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
>                         if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
>                                                             gfp, entry))
>                                 return folio;
> +                       count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
>                         folio_put(folio);
>                 }
> +               count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK);
>                 order = next_order(&orders, order);
>         }
>
> --
> 2.45.0
>


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v3] mm: add per-order mTHP swap-in fallback/fallback_charge counters
  2024-12-02 12:47 [PATCH v3] mm: add per-order mTHP swap-in fallback/fallback_charge counters Wenchao Hao
  2024-12-02 22:44 ` Barry Song
@ 2024-12-06 11:21 ` Lance Yang
  1 sibling, 0 replies; 3+ messages in thread
From: Lance Yang @ 2024-12-06 11:21 UTC (permalink / raw)
  To: Wenchao Hao
  Cc: Jonathan Corbet, Andrew Morton, David Hildenbrand, Barry Song,
	Ryan Roberts, Baolin Wang, Usama Arif, Matthew Wilcox, Peter Xu,
	linux-doc, linux-kernel, linux-mm

On Mon, Dec 2, 2024 at 8:47 PM Wenchao Hao <haowenchao22@gmail.com> wrote:
>
> Currently, large folio swap-in is supported, but we lack a method to
> analyze their success ratio. Similar to anon_fault_fallback, we introduce
> per-order mTHP swpin_fallback and swpin_fallback_charge counters for
> calculating their success ratio. The new counters are located at:
>
> /sys/kernel/mm/transparent_hugepage/hugepages-<size>/stats/
>         swpin_fallback
>         swpin_fallback_charge
>
> Signed-off-by: Wenchao Hao <haowenchao22@gmail.com>

LGTM. Feel free to add:

Reviewed-by: Lance Yang <ioworker0@gmail.com>

Thanks,
Lance

> ---
> V3:
>  Update description about swpin_fallback and swpin_fallback_charge
>
> V2:
>  Introduce swapin_fallback_charge, which increments if it fails to
>  charge a huge page to memory despite successful allocation.
>
>  Documentation/admin-guide/mm/transhuge.rst | 10 ++++++++++
>  include/linux/huge_mm.h                    |  2 ++
>  mm/huge_memory.c                           |  6 ++++++
>  mm/memory.c                                |  2 ++
>  4 files changed, 20 insertions(+)
>
> diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
> index 333958ef0d5f..156a03af0a88 100644
> --- a/Documentation/admin-guide/mm/transhuge.rst
> +++ b/Documentation/admin-guide/mm/transhuge.rst
> @@ -591,6 +591,16 @@ swpin
>         is incremented every time a huge page is swapped in from a non-zswap
>         swap device in one piece.
>
> +swpin_fallback
> +       is incremented if swapin fails to allocate or charge a huge page
> +       and instead falls back to using huge pages with lower orders or
> +       small pages.
> +
> +swpin_fallback_charge
> +       is incremented if swapin fails to charge a huge page and instead
> +       falls back to using  huge pages with lower orders or small pages
> +       even though the allocation was successful.
> +
>  swpout
>         is incremented every time a huge page is swapped out to a non-zswap
>         swap device in one piece without splitting.
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index b94c2e8ee918..93e509b6c00e 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -121,6 +121,8 @@ enum mthp_stat_item {
>         MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
>         MTHP_STAT_ZSWPOUT,
>         MTHP_STAT_SWPIN,
> +       MTHP_STAT_SWPIN_FALLBACK,
> +       MTHP_STAT_SWPIN_FALLBACK_CHARGE,
>         MTHP_STAT_SWPOUT,
>         MTHP_STAT_SWPOUT_FALLBACK,
>         MTHP_STAT_SHMEM_ALLOC,
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index ab46ef718b44..d062b257376d 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -617,6 +617,8 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
>  DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
>  DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT);
>  DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN);
> +DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK);
> +DEFINE_MTHP_STAT_ATTR(swpin_fallback_charge, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
>  DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
>  DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
>  #ifdef CONFIG_SHMEM
> @@ -637,6 +639,8 @@ static struct attribute *anon_stats_attrs[] = {
>  #ifndef CONFIG_SHMEM
>         &zswpout_attr.attr,
>         &swpin_attr.attr,
> +       &swpin_fallback_attr.attr,
> +       &swpin_fallback_charge_attr.attr,
>         &swpout_attr.attr,
>         &swpout_fallback_attr.attr,
>  #endif
> @@ -669,6 +673,8 @@ static struct attribute *any_stats_attrs[] = {
>  #ifdef CONFIG_SHMEM
>         &zswpout_attr.attr,
>         &swpin_attr.attr,
> +       &swpin_fallback_attr.attr,
> +       &swpin_fallback_charge_attr.attr,
>         &swpout_attr.attr,
>         &swpout_fallback_attr.attr,
>  #endif
> diff --git a/mm/memory.c b/mm/memory.c
> index d5a1b0a6bf1f..a44547600c02 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4189,8 +4189,10 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
>                         if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
>                                                             gfp, entry))
>                                 return folio;
> +                       count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
>                         folio_put(folio);
>                 }
> +               count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK);
>                 order = next_order(&orders, order);
>         }
>
> --
> 2.45.0
>


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2024-12-06 11:21 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-12-02 12:47 [PATCH v3] mm: add per-order mTHP swap-in fallback/fallback_charge counters Wenchao Hao
2024-12-02 22:44 ` Barry Song
2024-12-06 11:21 ` Lance Yang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox