* [PATCH] mm: memcg: move cgroup v1 oom handling code into memcontrol-v1.c: fixup
@ 2024-07-04 0:27 Roman Gushchin
2024-07-04 1:52 ` Andrew Morton
0 siblings, 1 reply; 3+ messages in thread
From: Roman Gushchin @ 2024-07-04 0:27 UTC (permalink / raw)
To: Andrew Morton, Shakeel Butt
Cc: linux-mm, linux-kernel, Johannes Weiner, Michal Hocko,
Muchun Song, Roman Gushchin
This is a small fixup for the commit
"mm: memcg: move cgroup v1 oom handling code into memcontrol-v1.c".
I forgot to actually move two functions mem_cgroup_node_nr_lru_pages()
and mem_cgroup_nr_lru_pages() into mm/memcontrol-v1.c, so that they remain
in mm/memcontrol.c and their commented out duplicated versions in
mm/memcontrol-v1.c.
Andrew, can you, please, squash it into the original commit?
I checked that the rest of mm-unstable tree can be rebased
automatically without any merge conflicts.
Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
---
mm/memcontrol-v1.c | 74 +++++++++++++++++++++++-----------------------
mm/memcontrol-v1.h | 5 ----
mm/memcontrol.c | 38 ------------------------
3 files changed, 37 insertions(+), 80 deletions(-)
diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c
index 42829cbf7b48..597b03ee9e35 100644
--- a/mm/memcontrol-v1.c
+++ b/mm/memcontrol-v1.c
@@ -2494,43 +2494,43 @@ static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
-/* static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, */
-/* int nid, unsigned int lru_mask, bool tree) */
-/* { */
-/* struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); */
-/* unsigned long nr = 0; */
-/* enum lru_list lru; */
-
-/* VM_BUG_ON((unsigned)nid >= nr_node_ids); */
-
-/* for_each_lru(lru) { */
-/* if (!(BIT(lru) & lru_mask)) */
-/* continue; */
-/* if (tree) */
-/* nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); */
-/* else */
-/* nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); */
-/* } */
-/* return nr; */
-/* } */
-
-/* static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, */
-/* unsigned int lru_mask, */
-/* bool tree) */
-/* { */
-/* unsigned long nr = 0; */
-/* enum lru_list lru; */
-
-/* for_each_lru(lru) { */
-/* if (!(BIT(lru) & lru_mask)) */
-/* continue; */
-/* if (tree) */
-/* nr += memcg_page_state(memcg, NR_LRU_BASE + lru); */
-/* else */
-/* nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); */
-/* } */
-/* return nr; */
-/* } */
+static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
+ int nid, unsigned int lru_mask, bool tree)
+{
+ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
+ unsigned long nr = 0;
+ enum lru_list lru;
+
+ VM_BUG_ON((unsigned)nid >= nr_node_ids);
+
+ for_each_lru(lru) {
+ if (!(BIT(lru) & lru_mask))
+ continue;
+ if (tree)
+ nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
+ else
+ nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
+ }
+ return nr;
+}
+
+static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
+ unsigned int lru_mask,
+ bool tree)
+{
+ unsigned long nr = 0;
+ enum lru_list lru;
+
+ for_each_lru(lru) {
+ if (!(BIT(lru) & lru_mask))
+ continue;
+ if (tree)
+ nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
+ else
+ nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
+ }
+ return nr;
+}
static int memcg_numa_stat_show(struct seq_file *m, void *v)
{
diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h
index 7be4670d9abb..0a4d5092c51b 100644
--- a/mm/memcontrol-v1.h
+++ b/mm/memcontrol-v1.h
@@ -88,11 +88,6 @@ void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
void memcg1_oom_recover(struct mem_cgroup *memcg);
void drain_all_stock(struct mem_cgroup *root_memcg);
-unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
- unsigned int lru_mask, bool tree);
-unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
- int nid, unsigned int lru_mask,
- bool tree);
unsigned long memcg_events(struct mem_cgroup *memcg, int event);
unsigned long memcg_events_local(struct mem_cgroup *memcg, int event);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 11e4a3c65437..f35ed6655992 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3188,44 +3188,6 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
}
#endif /* CONFIG_MEMCG_KMEM */
-unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
- int nid, unsigned int lru_mask,
- bool tree)
-{
- struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
- unsigned long nr = 0;
- enum lru_list lru;
-
- VM_BUG_ON((unsigned)nid >= nr_node_ids);
-
- for_each_lru(lru) {
- if (!(BIT(lru) & lru_mask))
- continue;
- if (tree)
- nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
- else
- nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
- }
- return nr;
-}
-
-unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
- unsigned int lru_mask, bool tree)
-{
- unsigned long nr = 0;
- enum lru_list lru;
-
- for_each_lru(lru) {
- if (!(BIT(lru) & lru_mask))
- continue;
- if (tree)
- nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
- else
- nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
- }
- return nr;
-}
-
#ifdef CONFIG_CGROUP_WRITEBACK
#include <trace/events/writeback.h>
--
2.45.2.803.g4e1b14247a-goog
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] mm: memcg: move cgroup v1 oom handling code into memcontrol-v1.c: fixup
2024-07-04 0:27 [PATCH] mm: memcg: move cgroup v1 oom handling code into memcontrol-v1.c: fixup Roman Gushchin
@ 2024-07-04 1:52 ` Andrew Morton
2024-07-04 2:06 ` Roman Gushchin
0 siblings, 1 reply; 3+ messages in thread
From: Andrew Morton @ 2024-07-04 1:52 UTC (permalink / raw)
To: Roman Gushchin
Cc: Shakeel Butt, linux-mm, linux-kernel, Johannes Weiner,
Michal Hocko, Muchun Song
On Thu, 4 Jul 2024 00:27:12 +0000 Roman Gushchin <roman.gushchin@linux.dev> wrote:
> This is a small fixup for the commit
> "mm: memcg: move cgroup v1 oom handling code into memcontrol-v1.c".
>
> I forgot to actually move two functions mem_cgroup_node_nr_lru_pages()
> and mem_cgroup_nr_lru_pages() into mm/memcontrol-v1.c, so that they remain
> in mm/memcontrol.c and their commented out duplicated versions in
> mm/memcontrol-v1.c.
>
> Andrew, can you, please, squash it into the original commit?
>
Seems this wants to live behind "mm: memcg: move cgroup v1 interface
files to memcontrol-v1.c" so that's where I placed it.
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] mm: memcg: move cgroup v1 oom handling code into memcontrol-v1.c: fixup
2024-07-04 1:52 ` Andrew Morton
@ 2024-07-04 2:06 ` Roman Gushchin
0 siblings, 0 replies; 3+ messages in thread
From: Roman Gushchin @ 2024-07-04 2:06 UTC (permalink / raw)
To: Andrew Morton
Cc: Shakeel Butt, linux-mm, linux-kernel, Johannes Weiner,
Michal Hocko, Muchun Song
On Wed, Jul 03, 2024 at 06:52:50PM -0700, Andrew Morton wrote:
> On Thu, 4 Jul 2024 00:27:12 +0000 Roman Gushchin <roman.gushchin@linux.dev> wrote:
>
> > This is a small fixup for the commit
> > "mm: memcg: move cgroup v1 oom handling code into memcontrol-v1.c".
> >
> > I forgot to actually move two functions mem_cgroup_node_nr_lru_pages()
> > and mem_cgroup_nr_lru_pages() into mm/memcontrol-v1.c, so that they remain
> > in mm/memcontrol.c and their commented out duplicated versions in
> > mm/memcontrol-v1.c.
> >
> > Andrew, can you, please, squash it into the original commit?
> >
>
> Seems this wants to live behind "mm: memcg: move cgroup v1 interface
> files to memcontrol-v1.c" so that's where I placed it.
Right, this is the proper place for it.
Thank you!
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2024-07-04 2:06 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-07-04 0:27 [PATCH] mm: memcg: move cgroup v1 oom handling code into memcontrol-v1.c: fixup Roman Gushchin
2024-07-04 1:52 ` Andrew Morton
2024-07-04 2:06 ` Roman Gushchin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox