From: Muchun Song <songmuchun@bytedance.com>
To: hannes@cmpxchg.org, mhocko@kernel.org, roman.gushchin@linux.dev,
shakeelb@google.com
Cc: cgroups@vger.kernel.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org, duanxiongchun@bytedance.com,
longman@redhat.com, Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH v4 05/11] mm: thp: introduce folio_split_queue_lock{_irqsave}()
Date: Tue, 24 May 2022 14:05:45 +0800 [thread overview]
Message-ID: <20220524060551.80037-6-songmuchun@bytedance.com> (raw)
In-Reply-To: <20220524060551.80037-1-songmuchun@bytedance.com>
We should make thp deferred split queue lock safe when LRU pages
are reparented. Similar to folio_lruvec_lock{_irqsave, _irq}(), we
introduce folio_split_queue_lock{_irqsave}() to make the deferred
split queue lock easier to be reparented.
And in the next patch, we can use a similar approach (just like
lruvec lock does) to make thp deferred split queue lock safe when
the LRU pages reparented.
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
include/linux/memcontrol.h | 10 +++++
mm/huge_memory.c | 100 +++++++++++++++++++++++++++++++++------------
2 files changed, 84 insertions(+), 26 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 4042e4d21fe2..8c2f1ba2f471 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1650,6 +1650,11 @@ int alloc_shrinker_info(struct mem_cgroup *memcg);
void free_shrinker_info(struct mem_cgroup *memcg);
void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
void reparent_shrinker_deferred(struct mem_cgroup *memcg);
+
+static inline int shrinker_id(struct shrinker *shrinker)
+{
+ return shrinker->id;
+}
#else
#define mem_cgroup_sockets_enabled 0
static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
@@ -1663,6 +1668,11 @@ static inline void set_shrinker_bit(struct mem_cgroup *memcg,
int nid, int shrinker_id)
{
}
+
+static inline int shrinker_id(struct shrinker *shrinker)
+{
+ return -1;
+}
#endif
#ifdef CONFIG_MEMCG_KMEM
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 910a138e9859..ea152bde441e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -503,25 +503,74 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
}
#ifdef CONFIG_MEMCG
-static inline struct deferred_split *get_deferred_split_queue(struct page *page)
+static inline struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
+ struct deferred_split *queue)
{
- struct mem_cgroup *memcg = page_memcg(compound_head(page));
- struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
+ if (mem_cgroup_disabled())
+ return NULL;
+ if (&NODE_DATA(folio_nid(folio))->deferred_split_queue == queue)
+ return NULL;
+ return container_of(queue, struct mem_cgroup, deferred_split_queue);
+}
- if (memcg)
- return &memcg->deferred_split_queue;
- else
- return &pgdat->deferred_split_queue;
+static inline struct deferred_split *folio_memcg_split_queue(struct folio *folio)
+{
+ struct mem_cgroup *memcg = folio_memcg(folio);
+
+ return memcg ? &memcg->deferred_split_queue : NULL;
}
#else
-static inline struct deferred_split *get_deferred_split_queue(struct page *page)
+static inline struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
+ struct deferred_split *queue)
{
- struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
+ return NULL;
+}
- return &pgdat->deferred_split_queue;
+static inline struct deferred_split *folio_memcg_split_queue(struct folio *folio)
+{
+ return NULL;
}
#endif
+static struct deferred_split *folio_split_queue(struct folio *folio)
+{
+ struct deferred_split *queue = folio_memcg_split_queue(folio);
+
+ return queue ? : &NODE_DATA(folio_nid(folio))->deferred_split_queue;
+}
+
+static struct deferred_split *folio_split_queue_lock(struct folio *folio)
+{
+ struct deferred_split *queue;
+
+ queue = folio_split_queue(folio);
+ spin_lock(&queue->split_queue_lock);
+
+ return queue;
+}
+
+static struct deferred_split *
+folio_split_queue_lock_irqsave(struct folio *folio, unsigned long *flags)
+{
+ struct deferred_split *queue;
+
+ queue = folio_split_queue(folio);
+ spin_lock_irqsave(&queue->split_queue_lock, *flags);
+
+ return queue;
+}
+
+static inline void split_queue_unlock(struct deferred_split *queue)
+{
+ spin_unlock(&queue->split_queue_lock);
+}
+
+static inline void split_queue_unlock_irqrestore(struct deferred_split *queue,
+ unsigned long flags)
+{
+ spin_unlock_irqrestore(&queue->split_queue_lock, flags);
+}
+
void prep_transhuge_page(struct page *page)
{
/*
@@ -2489,7 +2538,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
{
struct folio *folio = page_folio(page);
struct page *head = &folio->page;
- struct deferred_split *ds_queue = get_deferred_split_queue(head);
+ struct deferred_split *ds_queue;
XA_STATE(xas, &head->mapping->i_pages, head->index);
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
@@ -2581,13 +2630,13 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
}
/* Prevent deferred_split_scan() touching ->_refcount */
- spin_lock(&ds_queue->split_queue_lock);
+ ds_queue = folio_split_queue_lock(folio);
if (page_ref_freeze(head, 1 + extra_pins)) {
if (!list_empty(page_deferred_list(head))) {
ds_queue->split_queue_len--;
list_del(page_deferred_list(head));
}
- spin_unlock(&ds_queue->split_queue_lock);
+ split_queue_unlock(ds_queue);
if (mapping) {
int nr = thp_nr_pages(head);
@@ -2605,7 +2654,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
__split_huge_page(page, list, end);
ret = 0;
} else {
- spin_unlock(&ds_queue->split_queue_lock);
+ split_queue_unlock(ds_queue);
fail:
if (mapping)
xas_unlock(&xas);
@@ -2630,25 +2679,23 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
void free_transhuge_page(struct page *page)
{
- struct deferred_split *ds_queue = get_deferred_split_queue(page);
+ struct deferred_split *ds_queue;
unsigned long flags;
- spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+ ds_queue = folio_split_queue_lock_irqsave(page_folio(page), &flags);
if (!list_empty(page_deferred_list(page))) {
ds_queue->split_queue_len--;
list_del(page_deferred_list(page));
}
- spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
+ split_queue_unlock_irqrestore(ds_queue, flags);
free_compound_page(page);
}
void deferred_split_huge_page(struct page *page)
{
- struct deferred_split *ds_queue = get_deferred_split_queue(page);
-#ifdef CONFIG_MEMCG
- struct mem_cgroup *memcg = page_memcg(compound_head(page));
-#endif
+ struct deferred_split *ds_queue;
unsigned long flags;
+ struct folio *folio = page_folio(page);
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
@@ -2665,18 +2712,19 @@ void deferred_split_huge_page(struct page *page)
if (PageSwapCache(page))
return;
- spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+ ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
if (list_empty(page_deferred_list(page))) {
+ struct mem_cgroup *memcg;
+
+ memcg = folio_split_queue_memcg(folio, ds_queue);
count_vm_event(THP_DEFERRED_SPLIT_PAGE);
list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
ds_queue->split_queue_len++;
-#ifdef CONFIG_MEMCG
if (memcg)
set_shrinker_bit(memcg, page_to_nid(page),
- deferred_split_shrinker.id);
-#endif
+ shrinker_id(&deferred_split_shrinker));
}
- spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
+ split_queue_unlock_irqrestore(ds_queue, flags);
}
static unsigned long deferred_split_count(struct shrinker *shrink,
--
2.11.0
next prev parent reply other threads:[~2022-05-24 6:27 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-05-24 6:05 [PATCH v4 00/11] Use obj_cgroup APIs to charge the LRU pages Muchun Song
2022-05-24 6:05 ` [PATCH v4 01/11] mm: memcontrol: prepare objcg API for non-kmem usage Muchun Song
2022-05-24 19:01 ` Johannes Weiner
2022-05-25 8:46 ` Muchun Song
2022-05-25 2:36 ` Roman Gushchin
2022-05-25 7:57 ` Muchun Song
2022-05-25 12:37 ` Johannes Weiner
2022-05-25 13:08 ` Muchun Song
2022-05-24 6:05 ` [PATCH v4 02/11] mm: memcontrol: introduce compact_folio_lruvec_lock_irqsave Muchun Song
2022-05-24 19:22 ` Johannes Weiner
2022-05-25 9:38 ` Muchun Song
2022-05-24 6:05 ` [PATCH v4 03/11] mm: memcontrol: make lruvec lock safe when LRU pages are reparented Muchun Song
2022-05-24 19:23 ` Waiman Long
2022-05-25 10:20 ` Muchun Song
2022-05-25 14:59 ` Waiman Long
2022-05-24 19:27 ` Johannes Weiner
2022-05-25 9:53 ` Muchun Song
2022-05-25 12:30 ` Johannes Weiner
2022-05-25 13:03 ` Muchun Song
2022-05-25 14:48 ` Johannes Weiner
2022-05-25 15:38 ` Muchun Song
2022-05-26 20:17 ` Waiman Long
2022-05-27 2:55 ` Muchun Song
2022-05-24 6:05 ` [PATCH v4 04/11] mm: vmscan: rework move_pages_to_lru() Muchun Song
2022-05-24 19:38 ` Johannes Weiner
2022-05-25 11:38 ` Muchun Song
2022-05-24 19:52 ` Waiman Long
2022-05-25 11:43 ` Muchun Song
2022-05-25 2:43 ` Roman Gushchin
2022-05-25 11:41 ` Muchun Song
2022-05-24 6:05 ` Muchun Song [this message]
2022-05-24 6:05 ` [PATCH v4 06/11] mm: thp: make split queue lock safe when LRU pages are reparented Muchun Song
2022-05-25 2:54 ` Roman Gushchin
2022-05-25 11:44 ` Muchun Song
2022-05-24 6:05 ` [PATCH v4 07/11] mm: memcontrol: make all the callers of {folio,page}_memcg() safe Muchun Song
2022-05-25 3:03 ` Roman Gushchin
2022-05-25 11:51 ` Muchun Song
2022-05-24 6:05 ` [PATCH v4 08/11] mm: memcontrol: introduce memcg_reparent_ops Muchun Song
2022-05-24 6:05 ` [PATCH v4 09/11] mm: memcontrol: use obj_cgroup APIs to charge the LRU pages Muchun Song
2022-05-24 12:29 ` kernel test robot
2022-05-24 18:16 ` kernel test robot
2022-05-25 7:14 ` [mm] bec0ae1210: WARNING:possible_recursive_locking_detected kernel test robot
2022-05-24 6:05 ` [PATCH v4 10/11] mm: lru: add VM_BUG_ON_FOLIO to lru maintenance function Muchun Song
2022-05-24 19:44 ` Johannes Weiner
2022-05-25 11:59 ` Muchun Song
2022-05-25 2:40 ` Roman Gushchin
2022-05-25 11:58 ` Muchun Song
2022-05-24 6:05 ` [PATCH v4 11/11] mm: lru: use lruvec lock to serialize memcg changes Muchun Song
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220524060551.80037-6-songmuchun@bytedance.com \
--to=songmuchun@bytedance.com \
--cc=cgroups@vger.kernel.org \
--cc=duanxiongchun@bytedance.com \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=longman@redhat.com \
--cc=mhocko@kernel.org \
--cc=roman.gushchin@linux.dev \
--cc=shakeelb@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox