From: Michal Hocko <mhocko@kernel.org>
To: qiwuchen55@gmail.com
Cc: akpm@linux-foundation.org, linux-mm@kvack.org,
chenqiwu <chenqiwu@xiaomi.com>
Subject: Re: [PATCH] mm/vmscan: fix incorrect return type for cgroup_reclaim()
Date: Thu, 19 Mar 2020 17:07:07 +0100 [thread overview]
Message-ID: <20200319160707.GG20800@dhcp22.suse.cz> (raw)
In-Reply-To: <1584633026-26288-1-git-send-email-qiwuchen55@gmail.com>
On Thu 19-03-20 23:50:26, qiwuchen55@gmail.com wrote:
> From: chenqiwu <chenqiwu@xiaomi.com>
>
> The return type of cgroup_reclaim() is bool, but the correct type
> should be struct mem_cgroup pointer. As a result, cgroup_reclaim()
> can be used to wrap sc->target_mem_cgroup in vmscan code.
Why is this an improvement? While we can hide the scan_control
dereference I am not seeing why this would be so much better.
Quite worse TBH because cgroup_reclaim is a predicate while you make it
return an object. This might be highly subjective thing, though.
Does the patch result in a better code generation at least. Because
without any obvious improvement I am not sure this is worth inclusion.
> Signed-off-by: chenqiwu <chenqiwu@xiaomi.com>
> ---
> mm/vmscan.c | 39 +++++++++++++++++++++------------------
> 1 file changed, 21 insertions(+), 18 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index dca623d..c795fc3 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -238,7 +238,7 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
> up_write(&shrinker_rwsem);
> }
>
> -static bool cgroup_reclaim(struct scan_control *sc)
> +static struct mem_cgroup *cgroup_reclaim(struct scan_control *sc)
> {
> return sc->target_mem_cgroup;
> }
> @@ -276,9 +276,9 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
> {
> }
>
> -static bool cgroup_reclaim(struct scan_control *sc)
> +static struct mem_cgroup *cgroup_reclaim(struct scan_control *sc)
> {
> - return false;
> + return NULL;
> }
>
> static bool writeback_throttling_sane(struct scan_control *sc)
> @@ -984,7 +984,7 @@ static enum page_references page_check_references(struct page *page,
> int referenced_ptes, referenced_page;
> unsigned long vm_flags;
>
> - referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
> + referenced_ptes = page_referenced(page, 1, cgroup_reclaim(sc),
> &vm_flags);
> referenced_page = TestClearPageReferenced(page);
>
> @@ -1422,7 +1422,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
> count_vm_event(PGLAZYFREED);
> count_memcg_page_event(page, PGLAZYFREED);
> } else if (!mapping || !__remove_mapping(mapping, page, true,
> - sc->target_mem_cgroup))
> + cgroup_reclaim(sc)))
> goto keep_locked;
>
> unlock_page(page);
> @@ -1907,6 +1907,7 @@ static int current_may_throttle(void)
> enum vm_event_item item;
> struct pglist_data *pgdat = lruvec_pgdat(lruvec);
> struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
> + struct mem_cgroup *target_memcg = cgroup_reclaim(sc);
> bool stalled = false;
>
> while (unlikely(too_many_isolated(pgdat, file, sc))) {
> @@ -1933,7 +1934,7 @@ static int current_may_throttle(void)
> reclaim_stat->recent_scanned[file] += nr_taken;
>
> item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
> - if (!cgroup_reclaim(sc))
> + if (!target_memcg)
> __count_vm_events(item, nr_scanned);
> __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
> spin_unlock_irq(&pgdat->lru_lock);
> @@ -1947,7 +1948,7 @@ static int current_may_throttle(void)
> spin_lock_irq(&pgdat->lru_lock);
>
> item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
> - if (!cgroup_reclaim(sc))
> + if (!target_memcg)
> __count_vm_events(item, nr_reclaimed);
> __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
> reclaim_stat->recent_rotated[0] += stat.nr_activate[0];
> @@ -2041,7 +2042,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
> }
> }
>
> - if (page_referenced(page, 0, sc->target_mem_cgroup,
> + if (page_referenced(page, 0, cgroup_reclaim(sc),
> &vm_flags)) {
> nr_rotated += hpage_nr_pages(page);
> /*
> @@ -2625,7 +2626,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
>
> static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
> {
> - struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
> + struct mem_cgroup *target_memcg = cgroup_reclaim(sc);
> struct mem_cgroup *memcg;
>
> memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
> @@ -2686,10 +2687,11 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
> struct reclaim_state *reclaim_state = current->reclaim_state;
> unsigned long nr_reclaimed, nr_scanned;
> struct lruvec *target_lruvec;
> + struct mem_cgroup *target_memcg = cgroup_reclaim(sc);
> bool reclaimable = false;
> unsigned long file;
>
> - target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
> + target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
>
> again:
> memset(&sc->nr, 0, sizeof(sc->nr));
> @@ -2744,7 +2746,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
> * thrashing file LRU becomes infinitely more attractive than
> * anon pages. Try to detect this based on file LRU size.
> */
> - if (!cgroup_reclaim(sc)) {
> + if (!target_memcg) {
> unsigned long total_high_wmark = 0;
> unsigned long free, anon;
> int z;
> @@ -2782,7 +2784,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
> }
>
> /* Record the subtree's reclaim efficiency */
> - vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
> + vmpressure(sc->gfp_mask, target_memcg, true,
> sc->nr_scanned - nr_scanned,
> sc->nr_reclaimed - nr_reclaimed);
>
> @@ -2833,7 +2835,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
> * stalling in wait_iff_congested().
> */
> if ((current_is_kswapd() ||
> - (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&
> + (target_memcg && writeback_throttling_sane(sc))) &&
> sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
> set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);
>
> @@ -3020,14 +3022,15 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
> pg_data_t *last_pgdat;
> struct zoneref *z;
> struct zone *zone;
> + struct mem_cgroup *target_memcg = cgroup_reclaim(sc);
> retry:
> delayacct_freepages_start();
>
> - if (!cgroup_reclaim(sc))
> + if (!target_memcg)
> __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
>
> do {
> - vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
> + vmpressure_prio(sc->gfp_mask, target_memcg,
> sc->priority);
> sc->nr_scanned = 0;
> shrink_zones(zonelist, sc);
> @@ -3053,12 +3056,12 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
> continue;
> last_pgdat = zone->zone_pgdat;
>
> - snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
> + snapshot_refaults(target_memcg, zone->zone_pgdat);
>
> - if (cgroup_reclaim(sc)) {
> + if (target_memcg) {
> struct lruvec *lruvec;
>
> - lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
> + lruvec = mem_cgroup_lruvec(target_memcg,
> zone->zone_pgdat);
> clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
> }
> --
> 1.9.1
>
--
Michal Hocko
SUSE Labs
next prev parent reply other threads:[~2020-03-19 16:07 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-19 15:50 qiwuchen55
2020-03-19 16:07 ` Michal Hocko [this message]
2020-03-20 7:34 ` Baoquan He
2020-03-19 16:20 ` Chris Down
2020-03-19 17:36 ` Matthew Wilcox
2020-03-20 2:29 ` chenqiwu
2020-03-20 7:28 ` Michal Hocko
2020-03-20 2:20 ` chenqiwu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200319160707.GG20800@dhcp22.suse.cz \
--to=mhocko@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=chenqiwu@xiaomi.com \
--cc=linux-mm@kvack.org \
--cc=qiwuchen55@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox