* [PATCH v2] mm: compaction: refactor compact_node()
@ 2024-02-08 1:36 Kefeng Wang
2024-02-19 6:40 ` Baolin Wang
0 siblings, 1 reply; 2+ messages in thread
From: Kefeng Wang @ 2024-02-08 1:36 UTC (permalink / raw)
To: Andrew Morton, linux-mm; +Cc: Kefeng Wang
Refactor compact_node() to handle both proactive and synchronous compact
memory, which cleanups code a bit.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
v2:
- drop proactive_compact_node() and add comments for compact_node()
suggested by Andrew
mm/compaction.c | 65 ++++++++++++++++---------------------------------
1 file changed, 21 insertions(+), 44 deletions(-)
diff --git a/mm/compaction.c b/mm/compaction.c
index e63a4ee7e029..de882ecb61c5 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2885,25 +2885,27 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
}
/*
- * Compact all zones within a node till each zone's fragmentation score
- * reaches within proactive compaction thresholds (as determined by the
- * proactiveness tunable).
+ * compact_node() - compact all zones within a node
+ * @pgdat: The node page data
+ * @proactive: Whether the compaction is proactive
*
- * It is possible that the function returns before reaching score targets
- * due to various back-off conditions, such as, contention on per-node or
- * per-zone locks.
+ * For proactive compaction, compact till each zone's fragmentation score
+ * reaches within proactive compaction thresholds (as determined by the
+ * proactiveness tunable), it is possible that the function returns before
+ * reaching score targets due to various back-off conditions, such as,
+ * contention on per-node or per-zone locks.
*/
-static void proactive_compact_node(pg_data_t *pgdat)
+static void compact_node(pg_data_t *pgdat, bool proactive)
{
int zoneid;
struct zone *zone;
struct compact_control cc = {
.order = -1,
- .mode = MIGRATE_SYNC_LIGHT,
+ .mode = proactive ? MIGRATE_SYNC_LIGHT : MIGRATE_SYNC,
.ignore_skip_hint = true,
.whole_zone = true,
.gfp_mask = GFP_KERNEL,
- .proactive_compaction = true,
+ .proactive_compaction = proactive,
};
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
@@ -2915,41 +2917,16 @@ static void proactive_compact_node(pg_data_t *pgdat)
compact_zone(&cc, NULL);
- count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
- cc.total_migrate_scanned);
- count_compact_events(KCOMPACTD_FREE_SCANNED,
- cc.total_free_scanned);
- }
-}
-
-/* Compact all zones within a node */
-static void compact_node(int nid)
-{
- pg_data_t *pgdat = NODE_DATA(nid);
- int zoneid;
- struct zone *zone;
- struct compact_control cc = {
- .order = -1,
- .mode = MIGRATE_SYNC,
- .ignore_skip_hint = true,
- .whole_zone = true,
- .gfp_mask = GFP_KERNEL,
- };
-
-
- for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
-
- zone = &pgdat->node_zones[zoneid];
- if (!populated_zone(zone))
- continue;
-
- cc.zone = zone;
-
- compact_zone(&cc, NULL);
+ if (proactive) {
+ count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
+ cc.total_migrate_scanned);
+ count_compact_events(KCOMPACTD_FREE_SCANNED,
+ cc.total_free_scanned);
+ }
}
}
-/* Compact all nodes in the system */
+/* Compact all zones of all nodes in the system */
static void compact_nodes(void)
{
int nid;
@@ -2958,7 +2935,7 @@ static void compact_nodes(void)
lru_add_drain_all();
for_each_online_node(nid)
- compact_node(nid);
+ compact_node(NODE_DATA(nid), false);
}
static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write,
@@ -3020,7 +2997,7 @@ static ssize_t compact_store(struct device *dev,
/* Flush pending updates to the LRU lists */
lru_add_drain_all();
- compact_node(nid);
+ compact_node(NODE_DATA(nid), false);
}
return count;
@@ -3229,7 +3206,7 @@ static int kcompactd(void *p)
unsigned int prev_score, score;
prev_score = fragmentation_score_node(pgdat);
- proactive_compact_node(pgdat);
+ compact_node(pgdat, true);
score = fragmentation_score_node(pgdat);
/*
* Defer proactive compaction if the fragmentation
--
2.27.0
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH v2] mm: compaction: refactor compact_node()
2024-02-08 1:36 [PATCH v2] mm: compaction: refactor compact_node() Kefeng Wang
@ 2024-02-19 6:40 ` Baolin Wang
0 siblings, 0 replies; 2+ messages in thread
From: Baolin Wang @ 2024-02-19 6:40 UTC (permalink / raw)
To: Kefeng Wang, Andrew Morton, linux-mm
On 2024/2/8 09:36, Kefeng Wang wrote:
> Refactor compact_node() to handle both proactive and synchronous compact
> memory, which cleanups code a bit.
>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
LGTM.
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> ---
> v2:
> - drop proactive_compact_node() and add comments for compact_node()
> suggested by Andrew
>
> mm/compaction.c | 65 ++++++++++++++++---------------------------------
> 1 file changed, 21 insertions(+), 44 deletions(-)
>
> diff --git a/mm/compaction.c b/mm/compaction.c
> index e63a4ee7e029..de882ecb61c5 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -2885,25 +2885,27 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
> }
>
> /*
> - * Compact all zones within a node till each zone's fragmentation score
> - * reaches within proactive compaction thresholds (as determined by the
> - * proactiveness tunable).
> + * compact_node() - compact all zones within a node
> + * @pgdat: The node page data
> + * @proactive: Whether the compaction is proactive
> *
> - * It is possible that the function returns before reaching score targets
> - * due to various back-off conditions, such as, contention on per-node or
> - * per-zone locks.
> + * For proactive compaction, compact till each zone's fragmentation score
> + * reaches within proactive compaction thresholds (as determined by the
> + * proactiveness tunable), it is possible that the function returns before
> + * reaching score targets due to various back-off conditions, such as,
> + * contention on per-node or per-zone locks.
> */
> -static void proactive_compact_node(pg_data_t *pgdat)
> +static void compact_node(pg_data_t *pgdat, bool proactive)
> {
> int zoneid;
> struct zone *zone;
> struct compact_control cc = {
> .order = -1,
> - .mode = MIGRATE_SYNC_LIGHT,
> + .mode = proactive ? MIGRATE_SYNC_LIGHT : MIGRATE_SYNC,
> .ignore_skip_hint = true,
> .whole_zone = true,
> .gfp_mask = GFP_KERNEL,
> - .proactive_compaction = true,
> + .proactive_compaction = proactive,
> };
>
> for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
> @@ -2915,41 +2917,16 @@ static void proactive_compact_node(pg_data_t *pgdat)
>
> compact_zone(&cc, NULL);
>
> - count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
> - cc.total_migrate_scanned);
> - count_compact_events(KCOMPACTD_FREE_SCANNED,
> - cc.total_free_scanned);
> - }
> -}
> -
> -/* Compact all zones within a node */
> -static void compact_node(int nid)
> -{
> - pg_data_t *pgdat = NODE_DATA(nid);
> - int zoneid;
> - struct zone *zone;
> - struct compact_control cc = {
> - .order = -1,
> - .mode = MIGRATE_SYNC,
> - .ignore_skip_hint = true,
> - .whole_zone = true,
> - .gfp_mask = GFP_KERNEL,
> - };
> -
> -
> - for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
> -
> - zone = &pgdat->node_zones[zoneid];
> - if (!populated_zone(zone))
> - continue;
> -
> - cc.zone = zone;
> -
> - compact_zone(&cc, NULL);
> + if (proactive) {
> + count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
> + cc.total_migrate_scanned);
> + count_compact_events(KCOMPACTD_FREE_SCANNED,
> + cc.total_free_scanned);
> + }
> }
> }
>
> -/* Compact all nodes in the system */
> +/* Compact all zones of all nodes in the system */
> static void compact_nodes(void)
> {
> int nid;
> @@ -2958,7 +2935,7 @@ static void compact_nodes(void)
> lru_add_drain_all();
>
> for_each_online_node(nid)
> - compact_node(nid);
> + compact_node(NODE_DATA(nid), false);
> }
>
> static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write,
> @@ -3020,7 +2997,7 @@ static ssize_t compact_store(struct device *dev,
> /* Flush pending updates to the LRU lists */
> lru_add_drain_all();
>
> - compact_node(nid);
> + compact_node(NODE_DATA(nid), false);
> }
>
> return count;
> @@ -3229,7 +3206,7 @@ static int kcompactd(void *p)
> unsigned int prev_score, score;
>
> prev_score = fragmentation_score_node(pgdat);
> - proactive_compact_node(pgdat);
> + compact_node(pgdat, true);
> score = fragmentation_score_node(pgdat);
> /*
> * Defer proactive compaction if the fragmentation
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2024-02-19 6:41 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-02-08 1:36 [PATCH v2] mm: compaction: refactor compact_node() Kefeng Wang
2024-02-19 6:40 ` Baolin Wang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox