linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Vlastimil Babka <vbabka@suse.cz>
To: Johannes Weiner <hannes@cmpxchg.org>,
	Andrew Morton <akpm@linux-foundation.org>
Cc: Mel Gorman <mgorman@techsingularity.net>,
	Michal Hocko <mhocko@suse.com>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	kernel-team@fb.com
Subject: Re: [PATCH 3/5] mm: compaction: refactor __compaction_suitable()
Date: Mon, 29 May 2023 19:11:35 +0200	[thread overview]
Message-ID: <5f3ad5f3-780d-1ff7-5f97-0dc8b5611581@suse.cz> (raw)
In-Reply-To: <20230519123959.77335-4-hannes@cmpxchg.org>

On 5/19/23 14:39, Johannes Weiner wrote:
> __compaction_suitable() is supposed to check for available migration
> targets. However, it also checks whether the operation was requested
> via /proc/sys/vm/compact_memory, and whether the original allocation
> request can already succeed. These don't apply to all callsites.
> 
> Move the checks out to the callers, so that later patches can deal
> with them one by one. No functional change intended.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

Note comment on compaction_suitable() still mentions COMPACT_SUCCESS, that
is no longer possible, so delete that line?
Also on closer look, both compaction_suitable() and __compaction_suitable()
could now simply return bool. The callers use it that way anyway. There
would just be some extra fiddling internally aroud the tracepoint.

> ---
>  include/linux/compaction.h |  4 +-
>  mm/compaction.c            | 78 ++++++++++++++++++++++++--------------
>  mm/vmscan.c                | 35 ++++++++++-------
>  3 files changed, 73 insertions(+), 44 deletions(-)
> 
> diff --git a/include/linux/compaction.h b/include/linux/compaction.h
> index 1f0328a2ba48..9f7cf3e1bf89 100644
> --- a/include/linux/compaction.h
> +++ b/include/linux/compaction.h
> @@ -90,7 +90,7 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
>  		struct page **page);
>  extern void reset_isolation_suitable(pg_data_t *pgdat);
>  extern enum compact_result compaction_suitable(struct zone *zone, int order,
> -		unsigned int alloc_flags, int highest_zoneidx);
> +					       int highest_zoneidx);
>  
>  extern void compaction_defer_reset(struct zone *zone, int order,
>  				bool alloc_success);
> @@ -108,7 +108,7 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat)
>  }
>  
>  static inline enum compact_result compaction_suitable(struct zone *zone, int order,
> -					int alloc_flags, int highest_zoneidx)
> +						      int highest_zoneidx)
>  {
>  	return COMPACT_SKIPPED;
>  }
> diff --git a/mm/compaction.c b/mm/compaction.c
> index c9a4b6dffcf2..8f61cfa87c49 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -2206,24 +2206,10 @@ static enum compact_result compact_finished(struct compact_control *cc)
>  }
>  
>  static enum compact_result __compaction_suitable(struct zone *zone, int order,
> -					unsigned int alloc_flags,
>  					int highest_zoneidx,
>  					unsigned long wmark_target)
>  {
>  	unsigned long watermark;
> -
> -	if (is_via_compact_memory(order))
> -		return COMPACT_CONTINUE;
> -
> -	watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
> -	/*
> -	 * If watermarks for high-order allocation are already met, there
> -	 * should be no need for compaction at all.
> -	 */
> -	if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
> -								alloc_flags))
> -		return COMPACT_SUCCESS;
> -
>  	/*
>  	 * Watermarks for order-0 must be met for compaction to be able to
>  	 * isolate free pages for migration targets. This means that the
> @@ -2256,13 +2242,12 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
>   *   COMPACT_CONTINUE - If compaction should run now
>   */
>  enum compact_result compaction_suitable(struct zone *zone, int order,
> -					unsigned int alloc_flags,
>  					int highest_zoneidx)
>  {
>  	enum compact_result ret;
>  	int fragindex;
>  
> -	ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx,
> +	ret = __compaction_suitable(zone, order, highest_zoneidx,
>  				    zone_page_state(zone, NR_FREE_PAGES));
>  	/*
>  	 * fragmentation index determines if allocation failures are due to
> @@ -2306,7 +2291,16 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
>  	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
>  				ac->highest_zoneidx, ac->nodemask) {
>  		unsigned long available;
> -		enum compact_result compact_result;
> +		unsigned long watermark;
> +
> +		if (is_via_compact_memory(order))
> +			return true;
> +
> +		/* Allocation can already succeed, nothing to do */
> +		watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
> +		if (zone_watermark_ok(zone, order, watermark,
> +				      ac->highest_zoneidx, alloc_flags))
> +			continue;
>  
>  		/*
>  		 * Do not consider all the reclaimable memory because we do not
> @@ -2316,9 +2310,8 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
>  		 */
>  		available = zone_reclaimable_pages(zone) / order;
>  		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
> -		compact_result = __compaction_suitable(zone, order, alloc_flags,
> -				ac->highest_zoneidx, available);
> -		if (compact_result == COMPACT_CONTINUE)
> +		if (__compaction_suitable(zone, order, ac->highest_zoneidx,
> +					  available) == COMPACT_CONTINUE)
>  			return true;
>  	}
>  
> @@ -2348,11 +2341,23 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
>  	INIT_LIST_HEAD(&cc->migratepages);
>  
>  	cc->migratetype = gfp_migratetype(cc->gfp_mask);
> -	ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
> -							cc->highest_zoneidx);
> -	/* Compaction is likely to fail */
> -	if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
> -		return ret;
> +
> +	if (!is_via_compact_memory(cc->order)) {
> +		unsigned long watermark;
> +
> +		/* Allocation can already succeed, nothing to do */
> +		watermark = wmark_pages(cc->zone,
> +					cc->alloc_flags & ALLOC_WMARK_MASK);
> +		if (zone_watermark_ok(cc->zone, cc->order, watermark,
> +				      cc->highest_zoneidx, cc->alloc_flags))
> +			return COMPACT_SUCCESS;
> +
> +		ret = compaction_suitable(cc->zone, cc->order,
> +					  cc->highest_zoneidx);
> +		/* Compaction is likely to fail */
> +		if (ret == COMPACT_SKIPPED)
> +			return ret;
> +	}
>  
>  	/*
>  	 * Clear pageblock skip if there were failures recently and compaction
> @@ -2845,7 +2850,16 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
>  		if (!populated_zone(zone))
>  			continue;
>  
> -		if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
> +		if (is_via_compact_memory(pgdat->kcompactd_max_order))
> +			return true;
> +
> +		/* Allocation can already succeed, check other zones */
> +		if (zone_watermark_ok(zone, pgdat->kcompactd_max_order,
> +				      min_wmark_pages(zone),
> +				      highest_zoneidx, 0))
> +			continue;
> +
> +		if (compaction_suitable(zone, pgdat->kcompactd_max_order,
>  					highest_zoneidx) == COMPACT_CONTINUE)
>  			return true;
>  	}
> @@ -2883,10 +2897,18 @@ static void kcompactd_do_work(pg_data_t *pgdat)
>  		if (compaction_deferred(zone, cc.order))
>  			continue;
>  
> -		if (compaction_suitable(zone, cc.order, 0, zoneid) !=
> -							COMPACT_CONTINUE)
> +		if (is_via_compact_memory(cc.order))
> +			goto compact;
> +
> +		/* Allocation can already succeed, nothing to do */
> +		if (zone_watermark_ok(zone, cc.order,
> +				      min_wmark_pages(zone), zoneid, 0))
>  			continue;
>  
> +		if (compaction_suitable(zone, cc.order,
> +					zoneid) != COMPACT_CONTINUE)
> +			continue;
> +compact:
>  		if (kthread_should_stop())
>  			return;
>  
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index d257916f39e5..c9c0f3e081f5 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -6397,14 +6397,17 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
>  		if (!managed_zone(zone))
>  			continue;
>  
> -		switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
> -		case COMPACT_SUCCESS:
> -		case COMPACT_CONTINUE:
> +		if (sc->order == -1) /* is_via_compact_memory() */
> +			return false;
> +
> +		/* Allocation can already succeed, nothing to do */
> +		if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
> +				      sc->reclaim_idx, 0))
> +			return false;
> +
> +		if (compaction_suitable(zone, sc->order,
> +					sc->reclaim_idx) == COMPACT_CONTINUE)
>  			return false;
> -		default:
> -			/* check next zone */
> -			;
> -		}
>  	}
>  
>  	/*
> @@ -6592,16 +6595,20 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
>  static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
>  {
>  	unsigned long watermark;
> -	enum compact_result suitable;
>  
> -	suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
> -	if (suitable == COMPACT_SUCCESS)
> -		/* Allocation should succeed already. Don't reclaim. */
> +	if (sc->order == -1) /* is_via_compact_memory() */
> +		goto suitable;
> +
> +	/* Allocation can already succeed, nothing to do */
> +	if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
> +			      sc->reclaim_idx, 0))
>  		return true;
> -	if (suitable == COMPACT_SKIPPED)
> -		/* Compaction cannot yet proceed. Do reclaim. */
> -		return false;
>  
> +	/* Compaction cannot yet proceed. Do reclaim. */
> +	if (compaction_suitable(zone, sc->order,
> +				sc->reclaim_idx) == COMPACT_SKIPPED)
> +		return false;
> +suitable:
>  	/*
>  	 * Compaction is already possible, but it takes time to run and there
>  	 * are potentially other callers using the pages just freed. So proceed



  reply	other threads:[~2023-05-29 17:11 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-19 12:39 [PATCH 0/5] mm: compaction: cleanups & simplifications Johannes Weiner
2023-05-19 12:39 ` [PATCH 1/5] mm: compaction: remove compaction result helpers Johannes Weiner
2023-05-29  9:58   ` Vlastimil Babka
2023-05-19 12:39 ` [PATCH 2/5] mm: compaction: simplify should_compact_retry() Johannes Weiner
2023-05-29 13:03   ` Vlastimil Babka
2023-05-29 16:38     ` Johannes Weiner
2023-05-19 12:39 ` [PATCH 3/5] mm: compaction: refactor __compaction_suitable() Johannes Weiner
2023-05-29 17:11   ` Vlastimil Babka [this message]
2023-05-19 12:39 ` [PATCH 4/5] mm: compaction: remove unnecessary is_via_compact_memory() checks Johannes Weiner
2023-05-29 17:12   ` Vlastimil Babka
2023-05-19 12:39 ` [PATCH 5/5] mm: compaction: drop redundant watermark check in compaction_zonelist_suitable() Johannes Weiner
2023-05-29 17:12   ` Vlastimil Babka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5f3ad5f3-780d-1ff7-5f97-0dc8b5611581@suse.cz \
    --to=vbabka@suse.cz \
    --cc=akpm@linux-foundation.org \
    --cc=hannes@cmpxchg.org \
    --cc=kernel-team@fb.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@techsingularity.net \
    --cc=mhocko@suse.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox