From: Ira Weiny <ira.weiny@intel.com>
To: Matthew Wilcox <willy@infradead.org>
Cc: linux-mm@kvack.org
Subject: Re: [PATCH v2 14/15] mm: Pass order to try_to_free_pages in GFP flags
Date: Fri, 10 May 2019 16:26:50 -0700 [thread overview]
Message-ID: <20190510232650.GA14369@iweiny-DESK2.sc.intel.com> (raw)
In-Reply-To: <20190510135038.17129-15-willy@infradead.org>
On Fri, May 10, 2019 at 06:50:37AM -0700, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
>
> Also remove the order argument from __perform_reclaim() and
> __alloc_pages_direct_reclaim() which only passed the argument down.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
> include/linux/swap.h | 2 +-
> include/trace/events/vmscan.h | 20 +++++++++-----------
> mm/page_alloc.c | 15 ++++++---------
> mm/vmscan.c | 13 ++++++-------
> 4 files changed, 22 insertions(+), 28 deletions(-)
>
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index 4bfb5c4ac108..029737fec38b 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -348,7 +348,7 @@ extern void lru_cache_add_active_or_unevictable(struct page *page,
>
> /* linux/mm/vmscan.c */
> extern unsigned long zone_reclaimable_pages(struct zone *zone);
> -extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
> +extern unsigned long try_to_free_pages(struct zonelist *zonelist,
> gfp_t gfp_mask, nodemask_t *mask);
> extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
> extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
> diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
> index a5ab2973e8dc..a6b1b20333b4 100644
> --- a/include/trace/events/vmscan.h
> +++ b/include/trace/events/vmscan.h
> @@ -100,45 +100,43 @@ TRACE_EVENT(mm_vmscan_wakeup_kswapd,
>
> DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
>
> - TP_PROTO(int order, gfp_t gfp_flags),
> + TP_PROTO(gfp_t gfp_flags),
>
> - TP_ARGS(order, gfp_flags),
> + TP_ARGS(gfp_flags),
>
> TP_STRUCT__entry(
> - __field( int, order )
> __field( gfp_t, gfp_flags )
> ),
>
> TP_fast_assign(
> - __entry->order = order;
> __entry->gfp_flags = gfp_flags;
> ),
>
> TP_printk("order=%d gfp_flags=%s",
> - __entry->order,
> + gfp_order(__entry->gfp_flags),
> show_gfp_flags(__entry->gfp_flags))
> );
>
> DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_begin,
>
> - TP_PROTO(int order, gfp_t gfp_flags),
> + TP_PROTO(gfp_t gfp_flags),
>
> - TP_ARGS(order, gfp_flags)
> + TP_ARGS(gfp_flags)
> );
>
> #ifdef CONFIG_MEMCG
> DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
>
> - TP_PROTO(int order, gfp_t gfp_flags),
> + TP_PROTO(gfp_t gfp_flags),
>
> - TP_ARGS(order, gfp_flags)
> + TP_ARGS(gfp_flags)
> );
>
> DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_reclaim_begin,
>
> - TP_PROTO(int order, gfp_t gfp_flags),
> + TP_PROTO(gfp_t gfp_flags),
>
> - TP_ARGS(order, gfp_flags)
> + TP_ARGS(gfp_flags)
> );
> #endif /* CONFIG_MEMCG */
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index d457dfa8a0ac..29daaf4ae4fb 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4024,9 +4024,7 @@ EXPORT_SYMBOL_GPL(fs_reclaim_release);
> #endif
>
> /* Perform direct synchronous page reclaim */
> -static int
> -__perform_reclaim(gfp_t gfp_mask, unsigned int order,
> - const struct alloc_context *ac)
> +static int __perform_reclaim(gfp_t gfp_mask, const struct alloc_context *ac)
> {
> struct reclaim_state reclaim_state;
> int progress;
> @@ -4043,8 +4041,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
> reclaim_state.reclaimed_slab = 0;
> current->reclaim_state = &reclaim_state;
>
> - progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
> - ac->nodemask);
> + progress = try_to_free_pages(ac->zonelist, gfp_mask, ac->nodemask);
>
> current->reclaim_state = NULL;
> memalloc_noreclaim_restore(noreclaim_flag);
> @@ -4058,14 +4055,14 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
>
> /* The really slow allocator path where we enter direct reclaim */
> static inline struct page *
> -__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
> - unsigned int alloc_flags, const struct alloc_context *ac,
> +__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int alloc_flags,
> + const struct alloc_context *ac,
> unsigned long *did_some_progress)
> {
> struct page *page = NULL;
> bool drained = false;
>
> - *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
> + *did_some_progress = __perform_reclaim(gfp_mask, ac);
> if (unlikely(!(*did_some_progress)))
> return NULL;
>
> @@ -4458,7 +4455,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
> goto nopage;
>
> /* Try direct reclaim and then allocating */
> - page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
> + page = __alloc_pages_direct_reclaim(gfp_mask, alloc_flags, ac,
> &did_some_progress);
> if (page)
> goto got_pg;
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index d9c3e873eca6..e4d4d9c1d7a9 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -3182,15 +3182,15 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
> return false;
> }
>
> -unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
> - gfp_t gfp_mask, nodemask_t *nodemask)
> +unsigned long try_to_free_pages(struct zonelist *zonelist, gfp_t gfp_mask,
> + nodemask_t *nodemask)
> {
> unsigned long nr_reclaimed;
> struct scan_control sc = {
> .nr_to_reclaim = SWAP_CLUSTER_MAX,
> .gfp_mask = current_gfp_context(gfp_mask),
> .reclaim_idx = gfp_zone(gfp_mask),
> - .order = order,
> + .order = gfp_order(gfp_mask),
NIT: Could we remove order from scan_control?
Ira
> .nodemask = nodemask,
> .priority = DEF_PRIORITY,
> .may_writepage = !laptop_mode,
> @@ -3215,7 +3215,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
> if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
> return 1;
>
> - trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
> + trace_mm_vmscan_direct_reclaim_begin(sc.gfp_mask);
>
> nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
>
> @@ -3244,8 +3244,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
> sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
> (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
>
> - trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
> - sc.gfp_mask);
> + trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.gfp_mask);
>
> /*
> * NOTE: Although we can get the priority field, using it
> @@ -3294,7 +3293,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
>
> zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
>
> - trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
> + trace_mm_vmscan_memcg_reclaim_begin(sc.gfp_mask);
>
> psi_memstall_enter(&pflags);
> noreclaim_flag = memalloc_noreclaim_save();
> --
> 2.20.1
>
next prev parent reply other threads:[~2019-05-10 23:26 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-10 13:50 [PATCH v2 00/15] Remove 'order' argument from many mm functions Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 01/15] mm: Remove gfp_flags argument from rmqueue_pcplist Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 02/15] mm: Pass order to __alloc_pages_nodemask in GFP flags Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 03/15] mm: Pass order to __alloc_pages " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 04/15] mm: Pass order to alloc_page_interleave " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 05/15] mm: Pass order to alloc_pages_current " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 06/15] mm: Pass order to alloc_pages_vma " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 07/15] mm: Pass order to __alloc_pages_node " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 08/15] mm: Pass order to __get_free_page " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 09/15] mm: Pass order to prep_new_page " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 10/15] mm: Pass order to rmqueue " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 11/15] mm: Pass order to get_page_from_freelist " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 12/15] mm: Pass order to __alloc_pages_cpuset_fallback " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 13/15] mm: Pass order to prepare_alloc_pages " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 14/15] mm: Pass order to try_to_free_pages " Matthew Wilcox
2019-05-10 23:26 ` Ira Weiny [this message]
2019-05-10 13:50 ` [PATCH v2 15/15] mm: Pass order to node_reclaim() " Matthew Wilcox
2019-05-10 23:30 ` [PATCH v2 00/15] Remove 'order' argument from many mm functions Ira Weiny
2019-05-13 10:51 ` Michal Hocko
2019-05-13 11:21 ` Matthew Wilcox
2019-05-13 11:42 ` Michal Hocko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190510232650.GA14369@iweiny-DESK2.sc.intel.com \
--to=ira.weiny@intel.com \
--cc=linux-mm@kvack.org \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox