From: Muchun Song <songmuchun@bytedance.com>
To: hannes@cmpxchg.org, mhocko@kernel.org, roman.gushchin@linux.dev,
shakeelb@google.com, akpm@linux-foundation.org
Cc: cgroups@vger.kernel.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org, duanxiongchun@bytedance.com,
longman@redhat.com, Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH v5 11/11] mm: lru: use lruvec lock to serialize memcg changes
Date: Mon, 30 May 2022 15:49:19 +0800 [thread overview]
Message-ID: <20220530074919.46352-12-songmuchun@bytedance.com> (raw)
In-Reply-To: <20220530074919.46352-1-songmuchun@bytedance.com>
As described by commit fc574c23558c ("mm/swap.c: serialize memcg
changes in pagevec_lru_move_fn"), TestClearPageLRU() aims to
serialize mem_cgroup_move_account() during pagevec_lru_move_fn().
Now folio_lruvec_lock*() has the ability to detect whether page
memcg has been changed. So we can use lruvec lock to serialize
mem_cgroup_move_account() during pagevec_lru_move_fn(). This
change is a partial revert of the commit fc574c23558c ("mm/swap.c:
serialize memcg changes in pagevec_lru_move_fn").
And pagevec_lru_move_fn() is more hot compare with
mem_cgroup_move_account(), removing an atomic operation would be
an optimization. Also this change would not dirty cacheline for a
page which isn't on the LRU.
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
mm/memcontrol.c | 34 ++++++++++++++++++++++++++++++++++
mm/swap.c | 45 ++++++++++++++-------------------------------
mm/vmscan.c | 9 ++++-----
3 files changed, 52 insertions(+), 36 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f4db3cb2aedc..3a0f3838f02d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1333,10 +1333,39 @@ struct lruvec *folio_lruvec_lock(struct folio *folio)
lruvec = folio_lruvec(folio);
spin_lock(&lruvec->lru_lock);
+ /*
+ * The memcg of the page can be changed by any the following routines:
+ *
+ * 1) mem_cgroup_move_account() or
+ * 2) memcg_reparent_objcgs()
+ *
+ * The possible bad scenario would like:
+ *
+ * CPU0: CPU1: CPU2:
+ * lruvec = folio_lruvec()
+ *
+ * if (!isolate_lru_page())
+ * mem_cgroup_move_account()
+ *
+ * memcg_reparent_objcgs()
+ *
+ * spin_lock(&lruvec->lru_lock)
+ * ^^^^^^
+ * wrong lock
+ *
+ * Either CPU1 or CPU2 can change page memcg, so we need to check
+ * whether page memcg is changed, if so, we should reacquire the
+ * new lruvec lock.
+ */
if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
spin_unlock(&lruvec->lru_lock);
goto retry;
}
+
+ /*
+ * When we reach here, it means that the folio_memcg(folio) is
+ * stable.
+ */
rcu_read_unlock();
return lruvec;
@@ -1364,6 +1393,7 @@ struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
lruvec = folio_lruvec(folio);
spin_lock_irq(&lruvec->lru_lock);
+ /* See the comments in folio_lruvec_lock(). */
if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
spin_unlock_irq(&lruvec->lru_lock);
goto retry;
@@ -1397,6 +1427,7 @@ struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
lruvec = folio_lruvec(folio);
spin_lock_irqsave(&lruvec->lru_lock, *flags);
+ /* See the comments in folio_lruvec_lock(). */
if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
spin_unlock_irqrestore(&lruvec->lru_lock, *flags);
goto retry;
@@ -5738,7 +5769,10 @@ static int mem_cgroup_move_account(struct page *page,
obj_cgroup_put(rcu_dereference(from->objcg));
rcu_read_unlock();
+ /* See the comments in folio_lruvec_lock(). */
+ spin_lock(&from_vec->lru_lock);
folio->memcg_data = (unsigned long)rcu_access_pointer(to->objcg);
+ spin_unlock(&from_vec->lru_lock);
__folio_memcg_unlock(from);
diff --git a/mm/swap.c b/mm/swap.c
index 6cea469b6ff2..1b893c157bd1 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -199,14 +199,8 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
struct page *page = pvec->pages[i];
struct folio *folio = page_folio(page);
- /* block memcg migration during page moving between lru */
- if (!TestClearPageLRU(page))
- continue;
-
lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
(*move_fn)(page, lruvec);
-
- SetPageLRU(page);
}
if (lruvec)
lruvec_unlock_irqrestore(lruvec, flags);
@@ -218,7 +212,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
{
struct folio *folio = page_folio(page);
- if (!folio_test_unevictable(folio)) {
+ if (folio_test_lru(folio) && !folio_test_unevictable(folio)) {
lruvec_del_folio(lruvec, folio);
folio_clear_active(folio);
lruvec_add_folio_tail(lruvec, folio);
@@ -314,7 +308,8 @@ void lru_note_cost_folio(struct folio *folio)
static void __folio_activate(struct folio *folio, struct lruvec *lruvec)
{
- if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
+ if (folio_test_lru(folio) && !folio_test_active(folio) &&
+ !folio_test_unevictable(folio)) {
long nr_pages = folio_nr_pages(folio);
lruvec_del_folio(lruvec, folio);
@@ -371,12 +366,9 @@ static void folio_activate(struct folio *folio)
{
struct lruvec *lruvec;
- if (folio_test_clear_lru(folio)) {
- lruvec = folio_lruvec_lock_irq(folio);
- __folio_activate(folio, lruvec);
- lruvec_unlock_irq(lruvec);
- folio_set_lru(folio);
- }
+ lruvec = folio_lruvec_lock_irq(folio);
+ __folio_activate(folio, lruvec);
+ lruvec_unlock_irq(lruvec);
}
#endif
@@ -519,6 +511,9 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
bool active = PageActive(page);
int nr_pages = thp_nr_pages(page);
+ if (!PageLRU(page))
+ return;
+
if (PageUnevictable(page))
return;
@@ -556,7 +551,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
{
- if (PageActive(page) && !PageUnevictable(page)) {
+ if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
int nr_pages = thp_nr_pages(page);
del_page_from_lru_list(page, lruvec);
@@ -572,7 +567,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
{
- if (PageAnon(page) && PageSwapBacked(page) &&
+ if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
!PageSwapCache(page) && !PageUnevictable(page)) {
int nr_pages = thp_nr_pages(page);
@@ -1007,8 +1002,9 @@ void __pagevec_release(struct pagevec *pvec)
}
EXPORT_SYMBOL(__pagevec_release);
-static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
+static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
{
+ struct folio *folio = page_folio(page);
int was_unevictable = folio_test_clear_unevictable(folio);
long nr_pages = folio_nr_pages(folio);
@@ -1054,20 +1050,7 @@ static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
*/
void __pagevec_lru_add(struct pagevec *pvec)
{
- int i;
- struct lruvec *lruvec = NULL;
- unsigned long flags = 0;
-
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct folio *folio = page_folio(pvec->pages[i]);
-
- lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
- __pagevec_lru_add_fn(folio, lruvec);
- }
- if (lruvec)
- lruvec_unlock_irqrestore(lruvec, flags);
- release_pages(pvec->pages, pvec->nr);
- pagevec_reinit(pvec);
+ pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn);
}
/**
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 51853d6df7b4..c591d071a598 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4789,18 +4789,17 @@ void check_move_unevictable_pages(struct pagevec *pvec)
nr_pages = thp_nr_pages(page);
pgscanned += nr_pages;
- /* block memcg migration during page moving between lru */
- if (!TestClearPageLRU(page))
+ lruvec = folio_lruvec_relock_irq(folio, lruvec);
+
+ if (!PageLRU(page) || !PageUnevictable(page))
continue;
- lruvec = folio_lruvec_relock_irq(folio, lruvec);
- if (page_evictable(page) && PageUnevictable(page)) {
+ if (page_evictable(page)) {
del_page_from_lru_list(page, lruvec);
ClearPageUnevictable(page);
add_page_to_lru_list(page, lruvec);
pgrescued += nr_pages;
}
- SetPageLRU(page);
}
if (lruvec) {
--
2.11.0
next prev parent reply other threads:[~2022-05-30 7:51 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-05-30 7:49 [PATCH v5 00/11] Use obj_cgroup APIs to charge the LRU pages Muchun Song
2022-05-30 7:49 ` [PATCH v5 01/11] mm: memcontrol: remove dead code and comments Muchun Song
2022-06-10 23:01 ` Roman Gushchin
2022-05-30 7:49 ` [PATCH v5 02/11] mm: rename unlock_page_lruvec{_irq, _irqrestore} to lruvec_unlock{_irq, _irqrestore} Muchun Song
2022-06-10 23:08 ` Roman Gushchin
2022-05-30 7:49 ` [PATCH v5 03/11] mm: memcontrol: prepare objcg API for non-kmem usage Muchun Song
2022-06-01 17:34 ` Michal Koutný
2022-06-01 18:33 ` Roman Gushchin
2022-06-02 10:48 ` Muchun Song
2022-06-02 3:06 ` Muchun Song
2022-06-10 23:13 ` Roman Gushchin
2022-05-30 7:49 ` [PATCH v5 04/11] mm: memcontrol: make lruvec lock safe when LRU pages are reparented Muchun Song
2022-05-30 7:49 ` [PATCH v5 05/11] mm: vmscan: rework move_pages_to_lru() Muchun Song
2022-05-30 7:49 ` [PATCH v5 06/11] mm: thp: make split queue lock safe when LRU pages are reparented Muchun Song
2022-05-30 7:49 ` [PATCH v5 07/11] mm: memcontrol: make all the callers of {folio,page}_memcg() safe Muchun Song
2022-06-19 19:37 ` Roman Gushchin
2022-06-20 6:13 ` Muchun Song
2022-05-30 7:49 ` [PATCH v5 08/11] mm: memcontrol: introduce memcg_reparent_ops Muchun Song
2022-06-19 19:47 ` Roman Gushchin
2022-06-20 7:14 ` Muchun Song
2022-05-30 7:49 ` [PATCH v5 09/11] mm: memcontrol: use obj_cgroup APIs to charge the LRU pages Muchun Song
2022-06-01 17:34 ` Michal Koutný
2022-06-02 4:14 ` Muchun Song
2022-06-19 20:32 ` Roman Gushchin
2022-05-30 7:49 ` [PATCH v5 10/11] mm: lru: add VM_BUG_ON_FOLIO to lru maintenance function Muchun Song
2022-06-19 19:49 ` Roman Gushchin
2022-05-30 7:49 ` Muchun Song [this message]
2022-05-30 21:17 ` [PATCH v5 00/11] Use obj_cgroup APIs to charge the LRU pages Andrew Morton
2022-05-31 2:26 ` Muchun Song
2022-05-31 2:46 ` Roman Gushchin
2022-05-31 2:41 ` Waiman Long
2022-05-31 7:29 ` Muchun Song
2022-06-09 2:43 ` Muchun Song
2022-06-09 2:53 ` Roman Gushchin
2022-06-09 3:09 ` Muchun Song
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220530074919.46352-12-songmuchun@bytedance.com \
--to=songmuchun@bytedance.com \
--cc=akpm@linux-foundation.org \
--cc=cgroups@vger.kernel.org \
--cc=duanxiongchun@bytedance.com \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=longman@redhat.com \
--cc=mhocko@kernel.org \
--cc=roman.gushchin@linux.dev \
--cc=shakeelb@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox