linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm/khugepaged: make reserved memory adaptively
@ 2023-09-02  5:26 wolfgang huang
  2023-09-05 22:10 ` Yang Shi
  0 siblings, 1 reply; 2+ messages in thread
From: wolfgang huang @ 2023-09-02  5:26 UTC (permalink / raw)
  To: akpm; +Cc: linux-mm, wolfgang huang

From: wolfgang huang <huangjinhui@kylinos.cn>

In the 64k page configuration of ARM64, the size of THP is 512MB,
which usually reserves almost 5% of memory. However, the probability
of THP usage is not high, especially in the madvise configure. and
THP is not usually used, but a large amount of memory is reserved
for THP use, resulting in a lot of memory waste.

So a dynamic method of adjusting reserved memory is proposed. when
scan and trying to merge into THP, the reserved memory is pulled up,
and when the system does not need to synthesize THP, the reserved
memory is kept low. this method can save a lot of memory in scenarios
without synthesizing THP, and can effectively pull up the reserved
memory to support when THP synthesis is needed. especially in the
case of madvise configure, the running effect will be better.

Signed-off-by: wolfgang huang <huangjinhui@kylinos.cn>
---
 mm/khugepaged.c | 36 +++++++++++++++++++++++++++++++++++-
 1 file changed, 35 insertions(+), 1 deletion(-)

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 88433cc25d8a..41a837e618a5 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -129,6 +129,10 @@ static struct khugepaged_scan khugepaged_scan = {
 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 };
 
+/* khugepaged should scan or not, trying to be thp, default as false */
+static unsigned int khugepaged_thp_scan_state;
+static void set_recommended_min_free_kbytes(void);
+
 #ifdef CONFIG_SYSFS
 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
 					 struct kobj_attribute *attr,
@@ -2549,6 +2553,33 @@ static void khugepaged_wait_work(void)
 		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
 }
 
+static int khugepaged_threshold(void)
+{
+	/* thp size threshold check */
+	if ((PAGE_SIZE << HPAGE_PMD_ORDER) >= SZ_512M)
+		return true;
+	return false;
+}
+
+static void khugepaged_update_wmarks(void)
+{
+	if (!khugepaged_threshold())
+		return;
+
+	/* __khugepaged_enter push khugepaged to work, raise watermark */
+	if (khugepaged_has_work()) {
+		/* Once set, do not repeat distrub watermark */
+		if (!khugepaged_thp_scan_state) {
+			khugepaged_thp_scan_state = true;
+			mutex_lock(&khugepaged_mutex);
+			set_recommended_min_free_kbytes();
+			mutex_unlock(&khugepaged_mutex);
+		}
+	} else {
+		khugepaged_thp_scan_state = false;
+	}
+}
+
 static int khugepaged(void *none)
 {
 	struct khugepaged_mm_slot *mm_slot;
@@ -2559,6 +2590,7 @@ static int khugepaged(void *none)
 	while (!kthread_should_stop()) {
 		khugepaged_do_scan(&khugepaged_collapse_control);
 		khugepaged_wait_work();
+		khugepaged_update_wmarks();
 	}
 
 	spin_lock(&khugepaged_mm_lock);
@@ -2576,7 +2608,9 @@ static void set_recommended_min_free_kbytes(void)
 	int nr_zones = 0;
 	unsigned long recommended_min;
 
-	if (!hugepage_flags_enabled()) {
+	if (!hugepage_flags_enabled() ||
+		(khugepaged_threshold() &&
+		 !khugepaged_thp_scan_state)) {
 		calculate_min_free_kbytes();
 		goto update_wmarks;
 	}
-- 
2.34.1



^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH] mm/khugepaged: make reserved memory adaptively
  2023-09-02  5:26 [PATCH] mm/khugepaged: make reserved memory adaptively wolfgang huang
@ 2023-09-05 22:10 ` Yang Shi
  0 siblings, 0 replies; 2+ messages in thread
From: Yang Shi @ 2023-09-05 22:10 UTC (permalink / raw)
  To: wolfgang huang; +Cc: akpm, linux-mm, wolfgang huang

On Fri, Sep 1, 2023 at 10:26 PM wolfgang huang <wolfgang9277@126.com> wrote:
>
> From: wolfgang huang <huangjinhui@kylinos.cn>
>
> In the 64k page configuration of ARM64, the size of THP is 512MB,
> which usually reserves almost 5% of memory. However, the probability
> of THP usage is not high, especially in the madvise configure. and
> THP is not usually used, but a large amount of memory is reserved
> for THP use, resulting in a lot of memory waste.
>
> So a dynamic method of adjusting reserved memory is proposed. when
> scan and trying to merge into THP, the reserved memory is pulled up,
> and when the system does not need to synthesize THP, the reserved
> memory is kept low. this method can save a lot of memory in scenarios
> without synthesizing THP, and can effectively pull up the reserved
> memory to support when THP synthesis is needed. especially in the
> case of madvise configure, the running effect will be better.
>
> Signed-off-by: wolfgang huang <huangjinhui@kylinos.cn>

Thanks for the patch. There was already a thread about this problem,
please refer to:
https://lore.kernel.org/linux-mm/20230817035155.84230-1-liusong@linux.alibaba.com/

> ---
>  mm/khugepaged.c | 36 +++++++++++++++++++++++++++++++++++-
>  1 file changed, 35 insertions(+), 1 deletion(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 88433cc25d8a..41a837e618a5 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -129,6 +129,10 @@ static struct khugepaged_scan khugepaged_scan = {
>         .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
>  };
>
> +/* khugepaged should scan or not, trying to be thp, default as false */
> +static unsigned int khugepaged_thp_scan_state;
> +static void set_recommended_min_free_kbytes(void);
> +
>  #ifdef CONFIG_SYSFS
>  static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
>                                          struct kobj_attribute *attr,
> @@ -2549,6 +2553,33 @@ static void khugepaged_wait_work(void)
>                 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
>  }
>
> +static int khugepaged_threshold(void)
> +{
> +       /* thp size threshold check */
> +       if ((PAGE_SIZE << HPAGE_PMD_ORDER) >= SZ_512M)
> +               return true;
> +       return false;
> +}
> +
> +static void khugepaged_update_wmarks(void)
> +{
> +       if (!khugepaged_threshold())
> +               return;
> +
> +       /* __khugepaged_enter push khugepaged to work, raise watermark */
> +       if (khugepaged_has_work()) {
> +               /* Once set, do not repeat distrub watermark */
> +               if (!khugepaged_thp_scan_state) {
> +                       khugepaged_thp_scan_state = true;
> +                       mutex_lock(&khugepaged_mutex);
> +                       set_recommended_min_free_kbytes();
> +                       mutex_unlock(&khugepaged_mutex);
> +               }
> +       } else {
> +               khugepaged_thp_scan_state = false;
> +       }
> +}
> +
>  static int khugepaged(void *none)
>  {
>         struct khugepaged_mm_slot *mm_slot;
> @@ -2559,6 +2590,7 @@ static int khugepaged(void *none)
>         while (!kthread_should_stop()) {
>                 khugepaged_do_scan(&khugepaged_collapse_control);
>                 khugepaged_wait_work();
> +               khugepaged_update_wmarks();
>         }
>
>         spin_lock(&khugepaged_mm_lock);
> @@ -2576,7 +2608,9 @@ static void set_recommended_min_free_kbytes(void)
>         int nr_zones = 0;
>         unsigned long recommended_min;
>
> -       if (!hugepage_flags_enabled()) {
> +       if (!hugepage_flags_enabled() ||
> +               (khugepaged_threshold() &&
> +                !khugepaged_thp_scan_state)) {
>                 calculate_min_free_kbytes();
>                 goto update_wmarks;
>         }
> --
> 2.34.1
>
>


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-09-05 22:10 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-09-02  5:26 [PATCH] mm/khugepaged: make reserved memory adaptively wolfgang huang
2023-09-05 22:10 ` Yang Shi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox