From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: akpm@linux-foundation.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
linux-fsdevel@vger.kernel.org, cgroups@vger.kernel.org
Subject: [PATCH v13 17/18] mm/memcg: Add folio_lruvec_relock_irq() and folio_lruvec_relock_irqsave()
Date: Mon, 12 Jul 2021 20:45:50 +0100 [thread overview]
Message-ID: <20210712194551.91920-18-willy@infradead.org> (raw)
In-Reply-To: <20210712194551.91920-1-willy@infradead.org>
These are the folio equivalents of relock_page_lruvec_irq() and
folio_lruvec_relock_irqsave(). Also convert page_matches_lruvec()
to folio_matches_lruvec().
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/memcontrol.h | 17 ++++++++---------
mm/mlock.c | 3 ++-
mm/swap.c | 11 +++++++----
mm/vmscan.c | 5 +++--
4 files changed, 20 insertions(+), 16 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 57b1bf457f51..469e5473bdfb 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1529,19 +1529,19 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
}
/* Test requires a stable page->memcg binding, see page_memcg() */
-static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec)
+static inline bool folio_matches_lruvec(struct folio *folio,
+ struct lruvec *lruvec)
{
- return lruvec_pgdat(lruvec) == page_pgdat(page) &&
- lruvec_memcg(lruvec) == page_memcg(page);
+ return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
+ lruvec_memcg(lruvec) == folio_memcg(folio);
}
/* Don't lock again iff page's lruvec locked */
-static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
+static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
struct lruvec *locked_lruvec)
{
- struct folio *folio = page_folio(page);
if (locked_lruvec) {
- if (page_matches_lruvec(page, locked_lruvec))
+ if (folio_matches_lruvec(folio, locked_lruvec))
return locked_lruvec;
unlock_page_lruvec_irq(locked_lruvec);
@@ -1551,12 +1551,11 @@ static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
}
/* Don't lock again iff page's lruvec locked */
-static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
+static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
struct lruvec *locked_lruvec, unsigned long *flags)
{
- struct folio *folio = page_folio(page);
if (locked_lruvec) {
- if (page_matches_lruvec(page, locked_lruvec))
+ if (folio_matches_lruvec(folio, locked_lruvec))
return locked_lruvec;
unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
diff --git a/mm/mlock.c b/mm/mlock.c
index 16d2ee160d43..e263d62ae2d0 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -271,6 +271,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
/* Phase 1: page isolation */
for (i = 0; i < nr; i++) {
struct page *page = pvec->pages[i];
+ struct folio *folio = page_folio(page);
if (TestClearPageMlocked(page)) {
/*
@@ -278,7 +279,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
* so we can spare the get_page() here.
*/
if (TestClearPageLRU(page)) {
- lruvec = relock_page_lruvec_irq(page, lruvec);
+ lruvec = folio_lruvec_relock_irq(folio, lruvec);
del_page_from_lru_list(page, lruvec);
continue;
} else
diff --git a/mm/swap.c b/mm/swap.c
index a82812caf409..42222653e6ef 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -211,12 +211,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
+ struct folio *folio = page_folio(page);
/* block memcg migration during page moving between lru */
if (!TestClearPageLRU(page))
continue;
- lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
+ lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
(*move_fn)(page, lruvec);
SetPageLRU(page);
@@ -907,6 +908,7 @@ void release_pages(struct page **pages, int nr)
for (i = 0; i < nr; i++) {
struct page *page = pages[i];
+ struct folio *folio = page_folio(page);
/*
* Make sure the IRQ-safe lock-holding time does not get
@@ -918,7 +920,7 @@ void release_pages(struct page **pages, int nr)
lruvec = NULL;
}
- page = compound_head(page);
+ page = &folio->page;
if (is_huge_zero_page(page))
continue;
@@ -957,7 +959,7 @@ void release_pages(struct page **pages, int nr)
if (PageLRU(page)) {
struct lruvec *prev_lruvec = lruvec;
- lruvec = relock_page_lruvec_irqsave(page, lruvec,
+ lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
&flags);
if (prev_lruvec != lruvec)
lock_batch = 0;
@@ -1061,8 +1063,9 @@ void __pagevec_lru_add(struct pagevec *pvec)
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
+ struct folio *folio = page_folio(page);
- lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
+ lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
__pagevec_lru_add_fn(page, lruvec);
}
if (lruvec)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0d48306d37dc..7a2f25b904d9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2075,7 +2075,7 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
* All pages were isolated from the same lruvec (and isolation
* inhibits memcg migration).
*/
- VM_BUG_ON_PAGE(!page_matches_lruvec(page, lruvec), page);
+ VM_BUG_ON_PAGE(!folio_matches_lruvec(page_folio(page), lruvec), page);
add_page_to_lru_list(page, lruvec);
nr_pages = thp_nr_pages(page);
nr_moved += nr_pages;
@@ -4514,6 +4514,7 @@ void check_move_unevictable_pages(struct pagevec *pvec)
for (i = 0; i < pvec->nr; i++) {
struct page *page = pvec->pages[i];
+ struct folio *folio = page_folio(page);
int nr_pages;
if (PageTransTail(page))
@@ -4526,7 +4527,7 @@ void check_move_unevictable_pages(struct pagevec *pvec)
if (!TestClearPageLRU(page))
continue;
- lruvec = relock_page_lruvec_irq(page, lruvec);
+ lruvec = folio_lruvec_relock_irq(folio, lruvec);
if (page_evictable(page) && PageUnevictable(page)) {
del_page_from_lru_list(page, lruvec);
ClearPageUnevictable(page);
--
2.30.2
next prev parent reply other threads:[~2021-07-12 19:55 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-12 19:45 [PATCH v13b 00/18] Convert memcg to folios Matthew Wilcox (Oracle)
2021-07-12 19:45 ` [PATCH v13 01/18] mm: Add folio_nid() Matthew Wilcox (Oracle)
2021-07-12 19:45 ` [PATCH v13 02/18] mm/memcg: Remove 'page' parameter to mem_cgroup_charge_statistics() Matthew Wilcox (Oracle)
2021-07-12 19:45 ` [PATCH v13 03/18] mm/memcg: Use the node id in mem_cgroup_update_tree() Matthew Wilcox (Oracle)
2021-07-12 19:45 ` [PATCH v13 04/18] mm/memcg: Remove soft_limit_tree_node() Matthew Wilcox (Oracle)
2021-07-12 19:45 ` [PATCH v13 05/18] mm/memcg: Convert memcg_check_events to take a node ID Matthew Wilcox (Oracle)
2021-07-12 19:45 ` [PATCH v13 06/18] mm/memcg: Add folio_memcg() and related functions Matthew Wilcox (Oracle)
2021-07-12 21:56 ` kernel test robot
2021-07-12 19:45 ` [PATCH v13 07/18] mm/memcg: Convert commit_charge() to take a folio Matthew Wilcox (Oracle)
2021-07-12 19:45 ` [PATCH v13 08/18] mm/memcg: Convert mem_cgroup_charge() " Matthew Wilcox (Oracle)
2021-07-12 22:22 ` kernel test robot
2021-07-12 19:45 ` [PATCH v13 09/18] mm/memcg: Convert uncharge_page() to uncharge_folio() Matthew Wilcox (Oracle)
2021-07-12 19:45 ` [PATCH v13 10/18] mm/memcg: Convert mem_cgroup_uncharge() to take a folio Matthew Wilcox (Oracle)
2021-07-12 19:45 ` [PATCH v13 11/18] mm/memcg: Convert mem_cgroup_migrate() to take folios Matthew Wilcox (Oracle)
2021-07-12 19:45 ` [PATCH v13 12/18] mm/memcg: Convert mem_cgroup_track_foreign_dirty_slowpath() to folio Matthew Wilcox (Oracle)
2021-07-12 19:45 ` [PATCH v13 13/18] mm/memcg: Add folio_memcg_lock() and folio_memcg_unlock() Matthew Wilcox (Oracle)
2021-07-13 12:58 ` Christoph Hellwig
2021-07-12 19:45 ` [PATCH v13 14/18] mm/memcg: Convert mem_cgroup_move_account() to use a folio Matthew Wilcox (Oracle)
2021-07-13 13:00 ` Christoph Hellwig
2021-07-12 19:45 ` [PATCH v13 15/18] mm/memcg: Add folio_lruvec() Matthew Wilcox (Oracle)
2021-07-12 22:41 ` kernel test robot
2021-07-13 13:02 ` Christoph Hellwig
2021-07-12 19:45 ` [PATCH v13 16/18] mm/memcg: Add folio_lruvec_lock() and similar functions Matthew Wilcox (Oracle)
2021-07-13 13:03 ` Christoph Hellwig
2021-07-12 19:45 ` Matthew Wilcox (Oracle) [this message]
2021-07-13 13:03 ` [PATCH v13 17/18] mm/memcg: Add folio_lruvec_relock_irq() and folio_lruvec_relock_irqsave() Christoph Hellwig
2021-07-12 19:45 ` [PATCH v13 18/18] mm/workingset: Convert workingset_activation to take a folio Matthew Wilcox (Oracle)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210712194551.91920-18-willy@infradead.org \
--to=willy@infradead.org \
--cc=akpm@linux-foundation.org \
--cc=cgroups@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox