linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Michal Hocko <mhocko@suse.com>
To: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Shakeel Butt <shakeel.butt@linux.dev>,
	Muchun Song <muchun.song@linux.dev>,
	linux-kernel@vger.kernel.org, cgroups@vger.kernel.org,
	linux-mm@kvack.org
Subject: Re: [PATCH v2 12/14] mm: memcg: group cgroup v1 memcg related declarations
Date: Tue, 25 Jun 2024 09:09:45 +0200	[thread overview]
Message-ID: <ZnptOT1R3PC6vYYU@tiehlicka> (raw)
In-Reply-To: <20240625005906.106920-13-roman.gushchin@linux.dev>

On Mon 24-06-24 17:59:04, Roman Gushchin wrote:
> Group all cgroup v1-related declarations at the end of memcontrol.h
> and mm/memcontrol-v1.h with an intention to put them all together
> under a config option later on. It should make things easier to
> follow and maintain too.
> 
> Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  include/linux/memcontrol.h | 144 +++++++++++++++++++------------------
>  mm/memcontrol-v1.h         |  89 ++++++++++++-----------
>  2 files changed, 123 insertions(+), 110 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 588179d29849..a70d64ed04f5 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -950,39 +950,13 @@ static inline void mem_cgroup_exit_user_fault(void)
>  	current->in_user_fault = 0;
>  }
>  
> -static inline bool task_in_memcg_oom(struct task_struct *p)
> -{
> -	return p->memcg_in_oom;
> -}
> -
> -bool mem_cgroup_oom_synchronize(bool wait);
>  struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
>  					    struct mem_cgroup *oom_domain);
>  void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
>  
> -void folio_memcg_lock(struct folio *folio);
> -void folio_memcg_unlock(struct folio *folio);
> -
>  void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
>  		       int val);
>  
> -/* try to stablize folio_memcg() for all the pages in a memcg */
> -static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
> -{
> -	rcu_read_lock();
> -
> -	if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
> -		return true;
> -
> -	rcu_read_unlock();
> -	return false;
> -}
> -
> -static inline void mem_cgroup_unlock_pages(void)
> -{
> -	rcu_read_unlock();
> -}
> -
>  /* idx can be of type enum memcg_stat_item or node_stat_item */
>  static inline void mod_memcg_state(struct mem_cgroup *memcg,
>  				   enum memcg_stat_item idx, int val)
> @@ -1109,10 +1083,6 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
>  
>  void split_page_memcg(struct page *head, int old_order, int new_order);
>  
> -unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
> -					gfp_t gfp_mask,
> -					unsigned long *total_scanned);
> -
>  #else /* CONFIG_MEMCG */
>  
>  #define MEM_CGROUP_ID_SHIFT	0
> @@ -1423,26 +1393,6 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
>  {
>  }
>  
> -static inline void folio_memcg_lock(struct folio *folio)
> -{
> -}
> -
> -static inline void folio_memcg_unlock(struct folio *folio)
> -{
> -}
> -
> -static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
> -{
> -	/* to match folio_memcg_rcu() */
> -	rcu_read_lock();
> -	return true;
> -}
> -
> -static inline void mem_cgroup_unlock_pages(void)
> -{
> -	rcu_read_unlock();
> -}
> -
>  static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
>  {
>  }
> @@ -1455,16 +1405,6 @@ static inline void mem_cgroup_exit_user_fault(void)
>  {
>  }
>  
> -static inline bool task_in_memcg_oom(struct task_struct *p)
> -{
> -	return false;
> -}
> -
> -static inline bool mem_cgroup_oom_synchronize(bool wait)
> -{
> -	return false;
> -}
> -
>  static inline struct mem_cgroup *mem_cgroup_get_oom_group(
>  	struct task_struct *victim, struct mem_cgroup *oom_domain)
>  {
> @@ -1558,14 +1498,6 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
>  static inline void split_page_memcg(struct page *head, int old_order, int new_order)
>  {
>  }
> -
> -static inline
> -unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
> -					gfp_t gfp_mask,
> -					unsigned long *total_scanned)
> -{
> -	return 0;
> -}
>  #endif /* CONFIG_MEMCG */
>  
>  /*
> @@ -1916,4 +1848,80 @@ static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
>  }
>  #endif
>  
> +
> +/* Cgroup v1-related declarations */
> +
> +#ifdef CONFIG_MEMCG
> +unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
> +					gfp_t gfp_mask,
> +					unsigned long *total_scanned);
> +
> +bool mem_cgroup_oom_synchronize(bool wait);
> +
> +static inline bool task_in_memcg_oom(struct task_struct *p)
> +{
> +	return p->memcg_in_oom;
> +}
> +
> +void folio_memcg_lock(struct folio *folio);
> +void folio_memcg_unlock(struct folio *folio);
> +
> +/* try to stablize folio_memcg() for all the pages in a memcg */
> +static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
> +{
> +	rcu_read_lock();
> +
> +	if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
> +		return true;
> +
> +	rcu_read_unlock();
> +	return false;
> +}
> +
> +static inline void mem_cgroup_unlock_pages(void)
> +{
> +	rcu_read_unlock();
> +}
> +
> +#else /* CONFIG_MEMCG */
> +static inline
> +unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
> +					gfp_t gfp_mask,
> +					unsigned long *total_scanned)
> +{
> +	return 0;
> +}
> +
> +static inline void folio_memcg_lock(struct folio *folio)
> +{
> +}
> +
> +static inline void folio_memcg_unlock(struct folio *folio)
> +{
> +}
> +
> +static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
> +{
> +	/* to match folio_memcg_rcu() */
> +	rcu_read_lock();
> +	return true;
> +}
> +
> +static inline void mem_cgroup_unlock_pages(void)
> +{
> +	rcu_read_unlock();
> +}
> +
> +static inline bool task_in_memcg_oom(struct task_struct *p)
> +{
> +	return false;
> +}
> +
> +static inline bool mem_cgroup_oom_synchronize(bool wait)
> +{
> +	return false;
> +}
> +
> +#endif /* CONFIG_MEMCG */
> +
>  #endif /* _LINUX_MEMCONTROL_H */
> diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h
> index 7d6ac4a4fb36..89d420793048 100644
> --- a/mm/memcontrol-v1.h
> +++ b/mm/memcontrol-v1.h
> @@ -5,15 +5,9 @@
>  
>  #include <linux/cgroup-defs.h>
>  
> -void memcg1_remove_from_trees(struct mem_cgroup *memcg);
> -
> -static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg)
> -{
> -	WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
> -}
> +/* Cgroup v1 and v2 common declarations */
>  
>  void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
> -void memcg1_check_events(struct mem_cgroup *memcg, int nid);
>  int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
>  		     unsigned int nr_pages);
>  
> @@ -29,30 +23,6 @@ static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
>  void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n);
>  void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n);
>  
> -bool memcg1_wait_acct_move(struct mem_cgroup *memcg);
> -struct cgroup_taskset;
> -int memcg1_can_attach(struct cgroup_taskset *tset);
> -void memcg1_cancel_attach(struct cgroup_taskset *tset);
> -void memcg1_move_task(void);
> -
> -/*
> - * Per memcg event counter is incremented at every pagein/pageout. With THP,
> - * it will be incremented by the number of pages. This counter is used
> - * to trigger some periodic events. This is straightforward and better
> - * than using jiffies etc. to handle periodic memcg event.
> - */
> -enum mem_cgroup_events_target {
> -	MEM_CGROUP_TARGET_THRESH,
> -	MEM_CGROUP_TARGET_SOFTLIMIT,
> -	MEM_CGROUP_NTARGETS,
> -};
> -
> -/* Whether legacy memory+swap accounting is active */
> -static bool do_memsw_account(void)
> -{
> -	return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
> -}
> -
>  /*
>   * Iteration constructs for visiting all cgroups (under a tree).  If
>   * loops are exited prematurely (break), mem_cgroup_iter_break() must
> @@ -68,24 +38,28 @@ static bool do_memsw_account(void)
>  	     iter != NULL;				\
>  	     iter = mem_cgroup_iter(NULL, iter, NULL))
>  
> -void memcg1_css_offline(struct mem_cgroup *memcg);
> +/* Whether legacy memory+swap accounting is active */
> +static bool do_memsw_account(void)
> +{
> +	return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
> +}
>  
> -/* for encoding cft->private value on file */
> -enum res_type {
> -	_MEM,
> -	_MEMSWAP,
> -	_KMEM,
> -	_TCP,
> +/*
> + * Per memcg event counter is incremented at every pagein/pageout. With THP,
> + * it will be incremented by the number of pages. This counter is used
> + * to trigger some periodic events. This is straightforward and better
> + * than using jiffies etc. to handle periodic memcg event.
> + */
> +enum mem_cgroup_events_target {
> +	MEM_CGROUP_TARGET_THRESH,
> +	MEM_CGROUP_TARGET_SOFTLIMIT,
> +	MEM_CGROUP_NTARGETS,
>  };
>  
>  bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
>  				enum mem_cgroup_events_target target);
>  unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap);
>  
> -bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked);
> -void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
> -void memcg1_oom_recover(struct mem_cgroup *memcg);
> -
>  void drain_all_stock(struct mem_cgroup *root_memcg);
>  unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
>  				      unsigned int lru_mask, bool tree);
> @@ -100,6 +74,37 @@ unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item);
>  unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item);
>  int memory_stat_show(struct seq_file *m, void *v);
>  
> +/* Cgroup v1-specific declarations */
> +
> +void memcg1_remove_from_trees(struct mem_cgroup *memcg);
> +
> +static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg)
> +{
> +	WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
> +}
> +
> +bool memcg1_wait_acct_move(struct mem_cgroup *memcg);
> +
> +struct cgroup_taskset;
> +int memcg1_can_attach(struct cgroup_taskset *tset);
> +void memcg1_cancel_attach(struct cgroup_taskset *tset);
> +void memcg1_move_task(void);
> +void memcg1_css_offline(struct mem_cgroup *memcg);
> +
> +/* for encoding cft->private value on file */
> +enum res_type {
> +	_MEM,
> +	_MEMSWAP,
> +	_KMEM,
> +	_TCP,
> +};
> +
> +bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked);
> +void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
> +void memcg1_oom_recover(struct mem_cgroup *memcg);
> +
> +void memcg1_check_events(struct mem_cgroup *memcg, int nid);
> +
>  void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
>  
>  extern struct cftype memsw_files[];
> -- 
> 2.45.2

-- 
Michal Hocko
SUSE Labs


  reply	other threads:[~2024-06-25  7:09 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-06-25  0:58 [PATCH v2 00/14] mm: memcg: separate legacy cgroup v1 code and put under config option Roman Gushchin
2024-06-25  0:58 ` [PATCH v2 01/14] mm: memcg: introduce memcontrol-v1.c Roman Gushchin
2024-06-25  7:05   ` Michal Hocko
2024-06-25  0:58 ` [PATCH v2 02/14] mm: memcg: move soft limit reclaim code to memcontrol-v1.c Roman Gushchin
2024-06-25  7:06   ` Michal Hocko
2024-06-25  0:58 ` [PATCH v2 03/14] mm: memcg: rename soft limit reclaim-related functions Roman Gushchin
2024-06-25  7:06   ` Michal Hocko
2024-06-25  0:58 ` [PATCH v2 04/14] mm: memcg: move charge migration code to memcontrol-v1.c Roman Gushchin
2024-06-25  7:07   ` Michal Hocko
2024-06-25  0:58 ` [PATCH v2 05/14] mm: memcg: rename charge move-related functions Roman Gushchin
2024-06-25  7:07   ` Michal Hocko
2024-06-25  0:58 ` [PATCH v2 06/14] mm: memcg: move legacy memcg event code into memcontrol-v1.c Roman Gushchin
2024-06-25  7:07   ` Michal Hocko
2024-06-25  0:58 ` [PATCH v2 07/14] mm: memcg: rename memcg_check_events() Roman Gushchin
2024-06-25  7:08   ` Michal Hocko
2024-06-25  0:59 ` [PATCH v2 08/14] mm: memcg: move cgroup v1 oom handling code into memcontrol-v1.c Roman Gushchin
2024-06-25  7:08   ` Michal Hocko
2024-06-25  0:59 ` [PATCH v2 09/14] mm: memcg: rename memcg_oom_recover() Roman Gushchin
2024-06-25  7:08   ` Michal Hocko
2024-06-25  0:59 ` [PATCH v2 10/14] mm: memcg: move cgroup v1 interface files to memcontrol-v1.c Roman Gushchin
2024-06-25  7:09   ` Michal Hocko
2024-06-25  0:59 ` [PATCH v2 11/14] mm: memcg: make memcg1_update_tree() static Roman Gushchin
2024-06-25  7:09   ` Michal Hocko
2024-06-25  0:59 ` [PATCH v2 12/14] mm: memcg: group cgroup v1 memcg related declarations Roman Gushchin
2024-06-25  7:09   ` Michal Hocko [this message]
2024-06-25  0:59 ` [PATCH v2 13/14] mm: memcg: put cgroup v1-related members of task_struct under config option Roman Gushchin
2024-06-25  7:19   ` Michal Hocko
2024-06-26 18:06     ` Roman Gushchin
2024-06-25  0:59 ` [PATCH v2 14/14] MAINTAINERS: add mm/memcontrol-v1.c/h to the list of maintained files Roman Gushchin
2024-06-25 17:03 ` [PATCH v2 00/14] mm: memcg: separate legacy cgroup v1 code and put under config option Shakeel Butt
2024-06-26 18:07   ` Roman Gushchin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZnptOT1R3PC6vYYU@tiehlicka \
    --to=mhocko@suse.com \
    --cc=akpm@linux-foundation.org \
    --cc=cgroups@vger.kernel.org \
    --cc=hannes@cmpxchg.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=muchun.song@linux.dev \
    --cc=roman.gushchin@linux.dev \
    --cc=shakeel.butt@linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox