From: Michal Hocko <mhocko@kernel.org>
To: Tejun Heo <tj@kernel.org>
Cc: hannes@cmpxchg.org, cgroups@vger.kernel.org, linux-mm@kvack.org,
vdavydov@parallels.com, kernel-team@fb.com
Subject: Re: [PATCH 2/4] memcg: flatten task_struct->memcg_oom
Date: Fri, 28 Aug 2015 19:11:26 +0200 [thread overview]
Message-ID: <20150828171125.GB21463@dhcp22.suse.cz> (raw)
In-Reply-To: <1440775530-18630-3-git-send-email-tj@kernel.org>
On Fri 28-08-15 11:25:28, Tejun Heo wrote:
> task_struct->memcg_oom is a sub-struct containing fields which are
> used for async memcg oom handling. Most task_struct fields aren't
> packaged this way and it can lead to unnecessary alignment paddings.
> This patch flattens it.
>
> * task.memcg_oom.memcg -> task.memcg_in_oom
> * task.memcg_oom.gfp_mask -> task.memcg_oom_gfp_mask
> * task.memcg_oom.order -> task.memcg_oom_order
> * task.memcg_oom.may_oom -> task.memcg_may_oom
>
> In addition, task.memcg_may_oom is relocated to where other bitfields
> are which reduces the size of task_struct.
OK we will save 8B AFAICS which probably doesn't make much different for
this huge structure. But we already have memcg_kmem_skip_account bit
field there so another one makes sense. That alone would be sufficient
to save those bytes. Regarding the struct, I do not have a strong
opinion. I do not mind removing it.
> Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Michal Hocko <mhocko@suse.com>
> ---
> include/linux/memcontrol.h | 10 +++++-----
> include/linux/sched.h | 13 ++++++-------
> mm/memcontrol.c | 16 ++++++++--------
> 3 files changed, 19 insertions(+), 20 deletions(-)
>
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index ad800e6..3d28656 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -407,19 +407,19 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
>
> static inline void mem_cgroup_oom_enable(void)
> {
> - WARN_ON(current->memcg_oom.may_oom);
> - current->memcg_oom.may_oom = 1;
> + WARN_ON(current->memcg_may_oom);
> + current->memcg_may_oom = 1;
> }
>
> static inline void mem_cgroup_oom_disable(void)
> {
> - WARN_ON(!current->memcg_oom.may_oom);
> - current->memcg_oom.may_oom = 0;
> + WARN_ON(!current->memcg_may_oom);
> + current->memcg_may_oom = 0;
> }
>
> static inline bool task_in_memcg_oom(struct task_struct *p)
> {
> - return p->memcg_oom.memcg;
> + return p->memcg_in_oom;
> }
>
> bool mem_cgroup_oom_synchronize(bool wait);
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index a4ab9da..ef73b54 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -1451,7 +1451,9 @@ struct task_struct {
> unsigned sched_reset_on_fork:1;
> unsigned sched_contributes_to_load:1;
> unsigned sched_migrated:1;
> -
> +#ifdef CONFIG_MEMCG
> + unsigned memcg_may_oom:1;
> +#endif
> #ifdef CONFIG_MEMCG_KMEM
> unsigned memcg_kmem_skip_account:1;
> #endif
> @@ -1782,12 +1784,9 @@ struct task_struct {
> unsigned long trace_recursion;
> #endif /* CONFIG_TRACING */
> #ifdef CONFIG_MEMCG
> - struct memcg_oom_info {
> - struct mem_cgroup *memcg;
> - gfp_t gfp_mask;
> - int order;
> - unsigned int may_oom:1;
> - } memcg_oom;
> + struct mem_cgroup *memcg_in_oom;
> + gfp_t memcg_oom_gfp_mask;
> + int memcg_oom_order;
> #endif
> #ifdef CONFIG_UPROBES
> struct uprobe_task *utask;
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 18ecf75..74abb31 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -1652,7 +1652,7 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
>
> static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
> {
> - if (!current->memcg_oom.may_oom)
> + if (!current->memcg_may_oom)
> return;
> /*
> * We are in the middle of the charge context here, so we
> @@ -1669,9 +1669,9 @@ static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
> * and when we know whether the fault was overall successful.
> */
> css_get(&memcg->css);
> - current->memcg_oom.memcg = memcg;
> - current->memcg_oom.gfp_mask = mask;
> - current->memcg_oom.order = order;
> + current->memcg_in_oom = memcg;
> + current->memcg_oom_gfp_mask = mask;
> + current->memcg_oom_order = order;
> }
>
> /**
> @@ -1693,7 +1693,7 @@ static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
> */
> bool mem_cgroup_oom_synchronize(bool handle)
> {
> - struct mem_cgroup *memcg = current->memcg_oom.memcg;
> + struct mem_cgroup *memcg = current->memcg_in_oom;
> struct oom_wait_info owait;
> bool locked;
>
> @@ -1721,8 +1721,8 @@ bool mem_cgroup_oom_synchronize(bool handle)
> if (locked && !memcg->oom_kill_disable) {
> mem_cgroup_unmark_under_oom(memcg);
> finish_wait(&memcg_oom_waitq, &owait.wait);
> - mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
> - current->memcg_oom.order);
> + mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
> + current->memcg_oom_order);
> } else {
> schedule();
> mem_cgroup_unmark_under_oom(memcg);
> @@ -1739,7 +1739,7 @@ bool mem_cgroup_oom_synchronize(bool handle)
> memcg_oom_recover(memcg);
> }
> cleanup:
> - current->memcg_oom.memcg = NULL;
> + current->memcg_in_oom = NULL;
> css_put(&memcg->css);
> return true;
> }
> --
> 2.4.3
--
Michal Hocko
SUSE Labs
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2015-08-28 17:11 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-08-28 15:25 [PATCHSET] memcg: improve high limit behavior and always enable kmemcg on dfl hier Tejun Heo
2015-08-28 15:25 ` [PATCH 1/4] memcg: fix over-high reclaim amount Tejun Heo
2015-08-28 17:06 ` Michal Hocko
2015-08-28 18:32 ` Tejun Heo
2015-08-31 7:51 ` Michal Hocko
2015-08-31 13:38 ` Tejun Heo
2015-09-01 12:51 ` Michal Hocko
2015-09-01 18:33 ` Tejun Heo
2015-08-28 15:25 ` [PATCH 2/4] memcg: flatten task_struct->memcg_oom Tejun Heo
2015-08-28 17:11 ` Michal Hocko [this message]
2015-08-28 15:25 ` [PATCH 3/4] memcg: punt high overage reclaim to return-to-userland path Tejun Heo
2015-08-28 16:36 ` Vladimir Davydov
2015-08-28 16:48 ` Tejun Heo
2015-08-28 20:32 ` Vladimir Davydov
2015-08-28 20:44 ` Tejun Heo
2015-08-28 22:06 ` Tejun Heo
2015-08-29 7:59 ` Vladimir Davydov
2015-08-30 15:52 ` Vladimir Davydov
2015-08-28 17:13 ` Michal Hocko
2015-08-28 17:56 ` Tejun Heo
2015-08-28 20:45 ` Vladimir Davydov
2015-08-28 20:53 ` Tejun Heo
2015-08-28 21:07 ` Vladimir Davydov
2015-08-28 21:14 ` Tejun Heo
2015-08-28 15:25 ` [PATCH 4/4] memcg: always enable kmemcg on the default hierarchy Tejun Heo
2015-08-28 16:49 ` Vladimir Davydov
2015-08-28 16:56 ` Tejun Heo
2015-08-28 17:14 ` Michal Hocko
2015-08-28 17:41 ` Tejun Heo
2015-09-01 12:44 ` Michal Hocko
2015-09-01 18:51 ` Tejun Heo
2015-09-04 13:30 ` Michal Hocko
2015-09-04 15:38 ` Vladimir Davydov
2015-09-07 9:39 ` Michal Hocko
2015-09-07 10:01 ` Vladimir Davydov
2015-09-07 11:03 ` Michal Hocko
2015-09-04 16:18 ` Tejun Heo
2015-09-07 10:54 ` Michal Hocko
2015-09-08 18:50 ` Tejun Heo
2015-11-05 17:30 ` Michal Hocko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20150828171125.GB21463@dhcp22.suse.cz \
--to=mhocko@kernel.org \
--cc=cgroups@vger.kernel.org \
--cc=hannes@cmpxchg.org \
--cc=kernel-team@fb.com \
--cc=linux-mm@kvack.org \
--cc=tj@kernel.org \
--cc=vdavydov@parallels.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox