linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Muchun Song <songmuchun@bytedance.com>
To: guro@fb.com, hannes@cmpxchg.org, mhocko@kernel.org,
	akpm@linux-foundation.org, shakeelb@google.com,
	vdavydov.dev@gmail.com
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	duanxiongchun@bytedance.com, fam.zheng@bytedance.com,
	bsingharora@gmail.com, shy828301@gmail.com, alexs@kernel.org,
	smuchun@gmail.com, zhengqi.arch@bytedance.com,
	Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH v2 06/13] mm: thp: introduce split_queue_lock/unlock{_irqsave}()
Date: Thu, 16 Sep 2021 21:47:41 +0800	[thread overview]
Message-ID: <20210916134748.67712-7-songmuchun@bytedance.com> (raw)
In-Reply-To: <20210916134748.67712-1-songmuchun@bytedance.com>

We should make thp deferred split queue lock safe when LRU pages
reparented. Similar to lock_page_lruvec{_irqsave, _irq}(), we
introduce split_queue_lock/unlock{_irqsave}() to make the deferred
split queue lock easier to be reparented.

And in the next patch, we can use a similar approach (just like
lruvec lock did) to make thp deferred split queue lock safe when
the LRU pages reparented.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 mm/huge_memory.c | 90 +++++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 67 insertions(+), 23 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5e9ef0fc261e..9d8dfa82991a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -499,25 +499,70 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
 }
 
 #ifdef CONFIG_MEMCG
-static inline struct deferred_split *get_deferred_split_queue(struct page *page)
+static inline struct mem_cgroup *split_queue_memcg(struct deferred_split *queue)
 {
-	struct mem_cgroup *memcg = page_memcg(compound_head(page));
-	struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
+	if (mem_cgroup_disabled())
+		return NULL;
+	return container_of(queue, struct mem_cgroup, deferred_split_queue);
+}
 
-	if (memcg)
-		return &memcg->deferred_split_queue;
-	else
-		return &pgdat->deferred_split_queue;
+static inline struct deferred_split *page_memcg_split_queue(struct page *head)
+{
+	struct mem_cgroup *memcg = page_memcg(head);
+
+	return memcg ? &memcg->deferred_split_queue : NULL;
 }
 #else
-static inline struct deferred_split *get_deferred_split_queue(struct page *page)
++static inline struct mem_cgroup *split_queue_memcg(struct deferred_split *queue)
 {
-	struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
+	return NULL;
+}
 
-	return &pgdat->deferred_split_queue;
+static inline struct deferred_split *page_memcg_split_queue(struct page *head)
+{
+	return NULL;
 }
 #endif
 
+static struct deferred_split *page_split_queue(struct page *head)
+{
+	struct deferred_split *queue = page_memcg_split_queue(head);
+
+	return queue ? : &NODE_DATA(page_to_nid(head))->deferred_split_queue;
+}
+
+static struct deferred_split *split_queue_lock(struct page *head)
+{
+	struct deferred_split *queue;
+
+	queue = page_split_queue(head);
+	spin_lock(&queue->split_queue_lock);
+
+	return queue;
+}
+
+static struct deferred_split *
+split_queue_lock_irqsave(struct page *head, unsigned long *flags)
+{
+	struct deferred_split *queue;
+
+	queue = page_split_queue(head);
+	spin_lock_irqsave(&queue->split_queue_lock, *flags);
+
+	return queue;
+}
+
+static inline void split_queue_unlock(struct deferred_split *queue)
+{
+	spin_unlock(&queue->split_queue_lock);
+}
+
+static inline void split_queue_unlock_irqrestore(struct deferred_split *queue,
+						 unsigned long flags)
+{
+	spin_unlock_irqrestore(&queue->split_queue_lock, flags);
+}
+
 void prep_transhuge_page(struct page *page)
 {
 	/*
@@ -2610,7 +2655,7 @@ bool can_split_huge_page(struct page *page, int *pextra_pins)
 int split_huge_page_to_list(struct page *page, struct list_head *list)
 {
 	struct page *head = compound_head(page);
-	struct deferred_split *ds_queue = get_deferred_split_queue(head);
+	struct deferred_split *ds_queue;
 	struct anon_vma *anon_vma = NULL;
 	struct address_space *mapping = NULL;
 	int extra_pins, ret;
@@ -2690,13 +2735,13 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 	}
 
 	/* Prevent deferred_split_scan() touching ->_refcount */
-	spin_lock(&ds_queue->split_queue_lock);
+	ds_queue = split_queue_lock(head);
 	if (page_ref_freeze(head, 1 + extra_pins)) {
 		if (!list_empty(page_deferred_list(head))) {
 			ds_queue->split_queue_len--;
 			list_del(page_deferred_list(head));
 		}
-		spin_unlock(&ds_queue->split_queue_lock);
+		split_queue_unlock(ds_queue);
 		if (mapping) {
 			int nr = thp_nr_pages(head);
 
@@ -2711,7 +2756,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 		__split_huge_page(page, list, end);
 		ret = 0;
 	} else {
-		spin_unlock(&ds_queue->split_queue_lock);
+		split_queue_unlock(ds_queue);
 fail:
 		if (mapping)
 			xa_unlock(&mapping->i_pages);
@@ -2734,24 +2779,22 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 
 void free_transhuge_page(struct page *page)
 {
-	struct deferred_split *ds_queue = get_deferred_split_queue(page);
+	struct deferred_split *ds_queue;
 	unsigned long flags;
 
-	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+	ds_queue = split_queue_lock_irqsave(page, &flags);
 	if (!list_empty(page_deferred_list(page))) {
 		ds_queue->split_queue_len--;
 		list_del(page_deferred_list(page));
 	}
-	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
+	split_queue_unlock_irqrestore(ds_queue, flags);
 	free_compound_page(page);
 }
 
 void deferred_split_huge_page(struct page *page)
 {
-	struct deferred_split *ds_queue = get_deferred_split_queue(page);
-#ifdef CONFIG_MEMCG
-	struct mem_cgroup *memcg = page_memcg(compound_head(page));
-#endif
+	struct deferred_split *ds_queue;
+	struct mem_cgroup __maybe_unused *memcg;
 	unsigned long flags;
 
 	VM_BUG_ON_PAGE(!PageTransHuge(page), page);
@@ -2769,7 +2812,8 @@ void deferred_split_huge_page(struct page *page)
 	if (PageSwapCache(page))
 		return;
 
-	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+	ds_queue = split_queue_lock_irqsave(page, &flags);
+	memcg = split_queue_memcg(ds_queue);
 	if (list_empty(page_deferred_list(page))) {
 		count_vm_event(THP_DEFERRED_SPLIT_PAGE);
 		list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
@@ -2780,7 +2824,7 @@ void deferred_split_huge_page(struct page *page)
 					 deferred_split_shrinker.id);
 #endif
 	}
-	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
+	split_queue_unlock_irqrestore(ds_queue, flags);
 }
 
 static unsigned long deferred_split_count(struct shrinker *shrink,
-- 
2.11.0



  parent reply	other threads:[~2021-09-16 13:52 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-16 13:47 [PATCH v2 00/13] Use obj_cgroup APIs to charge the LRU pages Muchun Song
2021-09-16 13:47 ` [PATCH v2 01/13] mm: move mem_cgroup_kmem_disabled() to memcontrol.h Muchun Song
2021-09-16 13:47 ` [PATCH v2 02/13] mm: memcontrol: prepare objcg API for non-kmem usage Muchun Song
2021-09-17 17:40   ` kernel test robot
2021-09-16 13:47 ` [PATCH v2 03/13] mm: memcontrol: introduce compact_lock_page_irqsave Muchun Song
2021-09-16 13:47 ` [PATCH v2 04/13] mm: memcontrol: make lruvec lock safe when the LRU pages reparented Muchun Song
2021-09-16 13:47 ` [PATCH v2 05/13] mm: vmscan: rework move_pages_to_lru() Muchun Song
2021-09-16 13:47 ` Muchun Song [this message]
2021-09-17  2:43   ` [PATCH v2 06/13] mm: thp: introduce split_queue_lock/unlock{_irqsave}() kernel test robot
2021-09-17 17:07   ` kernel test robot
2021-09-16 13:47 ` [PATCH v2 07/13] mm: thp: make split queue lock safe when LRU pages reparented Muchun Song
2021-09-17  6:38   ` kernel test robot
2021-09-16 13:47 ` [PATCH v2 08/13] mm: memcontrol: make all the callers of page_memcg() safe Muchun Song
2021-09-16 13:47 ` [PATCH v2 09/13] mm: memcontrol: introduce memcg_reparent_ops Muchun Song
2021-09-16 13:47 ` [PATCH v2 10/13] mm: memcontrol: use obj_cgroup APIs to charge the LRU pages Muchun Song
2021-09-17 16:31   ` kernel test robot
2021-09-16 13:47 ` [PATCH v2 11/13] mm: memcontrol: rename {un}lock_page_memcg() to {un}lock_page_objcg() Muchun Song
2021-09-16 13:47 ` [PATCH v2 12/13] mm: lru: add VM_BUG_ON_PAGE to lru maintenance function Muchun Song
2021-09-16 13:47 ` [PATCH v2 13/13] mm: lru: use lruvec lock to serialize memcg changes Muchun Song
2021-09-17  1:28 ` [PATCH v2 00/13] Use obj_cgroup APIs to charge the LRU pages Roman Gushchin
2021-09-17 10:49   ` Muchun Song
2021-09-18  0:13     ` Roman Gushchin
2021-09-18  7:55       ` Muchun Song

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210916134748.67712-7-songmuchun@bytedance.com \
    --to=songmuchun@bytedance.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexs@kernel.org \
    --cc=bsingharora@gmail.com \
    --cc=duanxiongchun@bytedance.com \
    --cc=fam.zheng@bytedance.com \
    --cc=guro@fb.com \
    --cc=hannes@cmpxchg.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=shakeelb@google.com \
    --cc=shy828301@gmail.com \
    --cc=smuchun@gmail.com \
    --cc=vdavydov.dev@gmail.com \
    --cc=zhengqi.arch@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox