linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Michal Hocko <mhocko@suse.com>
To: Nhat Pham <nphamcs@gmail.com>
Cc: akpm@linux-foundation.org, riel@surriel.com, hannes@cmpxchg.org,
	roman.gushchin@linux.dev, shakeelb@google.com,
	muchun.song@linux.dev, tj@kernel.org, lizefan.x@bytedance.com,
	shuah@kernel.org, mike.kravetz@oracle.com, yosryahmed@google.com,
	fvdl@google.com, linux-mm@kvack.org, kernel-team@meta.com,
	linux-kernel@vger.kernel.org, cgroups@vger.kernel.org
Subject: Re: [PATCH v3 1/3] memcontrol: add helpers for hugetlb memcg accounting
Date: Tue, 3 Oct 2023 13:50:01 +0200	[thread overview]
Message-ID: <ZRv/6aKEDJGGIeyq@dhcp22.suse.cz> (raw)
In-Reply-To: <20231003001828.2554080-2-nphamcs@gmail.com>

On Mon 02-10-23 17:18:26, Nhat Pham wrote:
> This patch exposes charge committing and cancelling as parts of the
> memory controller interface. These functionalities are useful when the
> try_charge() and commit_charge() stages have to be separated by other
> actions in between (which can fail). One such example is the new hugetlb
> accounting behavior in the following patch.
> 
> The patch also adds a helper function to obtain a reference to the
> current task's memcg.
> 
> Signed-off-by: Nhat Pham <nphamcs@gmail.com>

OK, makes sense.
Acked-by: Michal Hocko <mhocko@suse.com>

Usually we prefer to have newly introduced functions along with their
users but I do understand that this split just makes it easier to review
the main patch for the feature.

> ---
>  include/linux/memcontrol.h | 21 ++++++++++++++
>  mm/memcontrol.c            | 59 ++++++++++++++++++++++++++++++--------
>  2 files changed, 68 insertions(+), 12 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index e0cfab58ab71..42bf7e9b1a2f 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -653,6 +653,8 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
>  		page_counter_read(&memcg->memory);
>  }
>  
> +void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
> +
>  int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
>  
>  /**
> @@ -704,6 +706,8 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
>  	__mem_cgroup_uncharge_list(page_list);
>  }
>  
> +void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
> +
>  void mem_cgroup_migrate(struct folio *old, struct folio *new);
>  
>  /**
> @@ -760,6 +764,8 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
>  
>  struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
>  
> +struct mem_cgroup *get_mem_cgroup_from_current(void);
> +
>  struct lruvec *folio_lruvec_lock(struct folio *folio);
>  struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
>  struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
> @@ -1245,6 +1251,11 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
>  	return false;
>  }
>  
> +static inline void mem_cgroup_commit_charge(struct folio *folio,
> +		struct mem_cgroup *memcg)
> +{
> +}
> +
>  static inline int mem_cgroup_charge(struct folio *folio,
>  		struct mm_struct *mm, gfp_t gfp)
>  {
> @@ -1269,6 +1280,11 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
>  {
>  }
>  
> +static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
> +		unsigned int nr_pages)
> +{
> +}
> +
>  static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
>  {
>  }
> @@ -1306,6 +1322,11 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
>  	return NULL;
>  }
>  
> +static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
> +{
> +	return NULL;
> +}
> +
>  static inline
>  struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
>  {
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index d1a322a75172..0219befeae38 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -1086,6 +1086,27 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
>  }
>  EXPORT_SYMBOL(get_mem_cgroup_from_mm);
>  
> +/**
> + * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
> + */
> +struct mem_cgroup *get_mem_cgroup_from_current(void)
> +{
> +	struct mem_cgroup *memcg;
> +
> +	if (mem_cgroup_disabled())
> +		return NULL;
> +
> +again:
> +	rcu_read_lock();
> +	memcg = mem_cgroup_from_task(current);
> +	if (!css_tryget(&memcg->css)) {
> +		rcu_read_unlock();
> +		goto again;
> +	}
> +	rcu_read_unlock();
> +	return memcg;
> +}
> +
>  static __always_inline bool memcg_kmem_bypass(void)
>  {
>  	/* Allow remote memcg charging from any context. */
> @@ -2873,7 +2894,12 @@ static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
>  	return try_charge_memcg(memcg, gfp_mask, nr_pages);
>  }
>  
> -static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
> +/**
> + * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
> + * @memcg: memcg previously charged.
> + * @nr_pages: number of pages previously charged.
> + */
> +void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
>  {
>  	if (mem_cgroup_is_root(memcg))
>  		return;
> @@ -2898,6 +2924,22 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
>  	folio->memcg_data = (unsigned long)memcg;
>  }
>  
> +/**
> + * mem_cgroup_commit_charge - commit a previously successful try_charge().
> + * @folio: folio to commit the charge to.
> + * @memcg: memcg previously charged.
> + */
> +void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
> +{
> +	css_get(&memcg->css);
> +	commit_charge(folio, memcg);
> +
> +	local_irq_disable();
> +	mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
> +	memcg_check_events(memcg, folio_nid(folio));
> +	local_irq_enable();
> +}
> +
>  #ifdef CONFIG_MEMCG_KMEM
>  /*
>   * The allocated objcg pointers array is not accounted directly.
> @@ -6105,7 +6147,7 @@ static void __mem_cgroup_clear_mc(void)
>  
>  	/* we must uncharge all the leftover precharges from mc.to */
>  	if (mc.precharge) {
> -		cancel_charge(mc.to, mc.precharge);
> +		mem_cgroup_cancel_charge(mc.to, mc.precharge);
>  		mc.precharge = 0;
>  	}
>  	/*
> @@ -6113,7 +6155,7 @@ static void __mem_cgroup_clear_mc(void)
>  	 * we must uncharge here.
>  	 */
>  	if (mc.moved_charge) {
> -		cancel_charge(mc.from, mc.moved_charge);
> +		mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
>  		mc.moved_charge = 0;
>  	}
>  	/* we must fixup refcnts and charges */
> @@ -7020,20 +7062,13 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root,
>  static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
>  			gfp_t gfp)
>  {
> -	long nr_pages = folio_nr_pages(folio);
>  	int ret;
>  
> -	ret = try_charge(memcg, gfp, nr_pages);
> +	ret = try_charge(memcg, gfp, folio_nr_pages(folio));
>  	if (ret)
>  		goto out;
>  
> -	css_get(&memcg->css);
> -	commit_charge(folio, memcg);
> -
> -	local_irq_disable();
> -	mem_cgroup_charge_statistics(memcg, nr_pages);
> -	memcg_check_events(memcg, folio_nid(folio));
> -	local_irq_enable();
> +	mem_cgroup_commit_charge(folio, memcg);
>  out:
>  	return ret;
>  }
> -- 
> 2.34.1

-- 
Michal Hocko
SUSE Labs


  reply	other threads:[~2023-10-03 11:50 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-03  0:18 [PATCH v3 0/3] " Nhat Pham
2023-10-03  0:18 ` [PATCH v3 1/3] memcontrol: add helpers for " Nhat Pham
2023-10-03 11:50   ` Michal Hocko [this message]
2023-10-03 12:47   ` Johannes Weiner
2023-10-03  0:18 ` [PATCH v3 2/3] hugetlb: memcg: account hugetlb-backed memory in memory controller Nhat Pham
2023-10-03  0:26   ` Nhat Pham
2023-10-03 12:54   ` Johannes Weiner
2023-10-03 12:58   ` Michal Hocko
2023-10-03 15:59     ` Johannes Weiner
2023-10-03 17:13   ` Mike Kravetz
2023-10-03 18:01     ` Nhat Pham
2023-10-03 18:39       ` Johannes Weiner
2023-10-03 22:09         ` Nhat Pham
2023-10-03 22:42           ` Mike Kravetz
2023-10-03 23:26             ` Nhat Pham
2023-10-03 23:14     ` [PATCH] memcontrol: only transfer the memcg data for migration Nhat Pham
2023-10-03 23:22       ` Yosry Ahmed
2023-10-03 23:31         ` Nhat Pham
2023-10-03 23:54           ` Yosry Ahmed
2023-10-04  0:02             ` Nhat Pham
2023-10-04  0:02       ` Nhat Pham
2023-10-04 14:17       ` Johannes Weiner
2023-10-04 19:45   ` [PATCH v3 2/3] hugetlb: memcg: account hugetlb-backed memory in memory controller (fix) Nhat Pham
2023-10-06 17:25     ` Andrew Morton
2023-10-06 18:23       ` Nhat Pham
2023-10-03  0:18 ` [PATCH v3 3/3] selftests: add a selftest to verify hugetlb usage in memcg Nhat Pham

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZRv/6aKEDJGGIeyq@dhcp22.suse.cz \
    --to=mhocko@suse.com \
    --cc=akpm@linux-foundation.org \
    --cc=cgroups@vger.kernel.org \
    --cc=fvdl@google.com \
    --cc=hannes@cmpxchg.org \
    --cc=kernel-team@meta.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lizefan.x@bytedance.com \
    --cc=mike.kravetz@oracle.com \
    --cc=muchun.song@linux.dev \
    --cc=nphamcs@gmail.com \
    --cc=riel@surriel.com \
    --cc=roman.gushchin@linux.dev \
    --cc=shakeelb@google.com \
    --cc=shuah@kernel.org \
    --cc=tj@kernel.org \
    --cc=yosryahmed@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox