From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>, linux-mm@kvack.org
Subject: [PATCH 6/7] khugepaged: Use a folio throughout collapse_file()
Date: Wed, 3 Apr 2024 18:18:35 +0100 [thread overview]
Message-ID: <20240403171838.1445826-7-willy@infradead.org> (raw)
In-Reply-To: <20240403171838.1445826-1-willy@infradead.org>
Pull folios from the page cache instead of pages. Half of this work
had been done already, but we were still operating on pages for a large
chunk of this function. There is no attempt in this patch to handle
large folios that are smaller than a THP; that will have to wait for a
future patch.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/khugepaged.c | 113 +++++++++++++++++++++++-------------------------
1 file changed, 54 insertions(+), 59 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index d44584b5e004..0b0053fb30c0 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1780,9 +1780,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
struct collapse_control *cc)
{
struct address_space *mapping = file->f_mapping;
- struct page *page;
- struct page *tmp, *dst;
- struct folio *folio, *new_folio;
+ struct page *dst;
+ struct folio *folio, *tmp, *new_folio;
pgoff_t index = 0, end = start + HPAGE_PMD_NR;
LIST_HEAD(pagelist);
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
@@ -1820,11 +1819,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
for (index = start; index < end; index++) {
xas_set(&xas, index);
- page = xas_load(&xas);
+ folio = xas_load(&xas);
VM_BUG_ON(index != xas.xa_index);
if (is_shmem) {
- if (!page) {
+ if (!folio) {
/*
* Stop if extent has been truncated or
* hole-punched, and is now completely
@@ -1840,7 +1839,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
continue;
}
- if (xa_is_value(page) || !PageUptodate(page)) {
+ if (xa_is_value(folio) || !folio_test_uptodate(folio)) {
xas_unlock_irq(&xas);
/* swap in or instantiate fallocated page */
if (shmem_get_folio(mapping->host, index,
@@ -1850,28 +1849,27 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
}
/* drain lru cache to help isolate_lru_page() */
lru_add_drain();
- page = folio_file_page(folio, index);
- } else if (trylock_page(page)) {
- get_page(page);
+ } else if (folio_trylock(folio)) {
+ folio_get(folio);
xas_unlock_irq(&xas);
} else {
result = SCAN_PAGE_LOCK;
goto xa_locked;
}
} else { /* !is_shmem */
- if (!page || xa_is_value(page)) {
+ if (!folio || xa_is_value(folio)) {
xas_unlock_irq(&xas);
page_cache_sync_readahead(mapping, &file->f_ra,
file, index,
end - index);
/* drain lru cache to help isolate_lru_page() */
lru_add_drain();
- page = find_lock_page(mapping, index);
- if (unlikely(page == NULL)) {
+ folio = filemap_lock_folio(mapping, index);
+ if (unlikely(folio == NULL)) {
result = SCAN_FAIL;
goto xa_unlocked;
}
- } else if (PageDirty(page)) {
+ } else if (folio_test_dirty(folio)) {
/*
* khugepaged only works on read-only fd,
* so this page is dirty because it hasn't
@@ -1889,12 +1887,12 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
filemap_flush(mapping);
result = SCAN_FAIL;
goto xa_unlocked;
- } else if (PageWriteback(page)) {
+ } else if (folio_test_writeback(folio)) {
xas_unlock_irq(&xas);
result = SCAN_FAIL;
goto xa_unlocked;
- } else if (trylock_page(page)) {
- get_page(page);
+ } else if (folio_trylock(folio)) {
+ folio_get(folio);
xas_unlock_irq(&xas);
} else {
result = SCAN_PAGE_LOCK;
@@ -1903,35 +1901,31 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
}
/*
- * The page must be locked, so we can drop the i_pages lock
+ * The folio must be locked, so we can drop the i_pages lock
* without racing with truncate.
*/
- VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
- /* make sure the page is up to date */
- if (unlikely(!PageUptodate(page))) {
+ /* make sure the folio is up to date */
+ if (unlikely(!folio_test_uptodate(folio))) {
result = SCAN_FAIL;
goto out_unlock;
}
/*
* If file was truncated then extended, or hole-punched, before
- * we locked the first page, then a THP might be there already.
+ * we locked the first folio, then a THP might be there already.
* This will be discovered on the first iteration.
*/
- if (PageTransCompound(page)) {
- struct page *head = compound_head(page);
-
- result = compound_order(head) == HPAGE_PMD_ORDER &&
- head->index == start
+ if (folio_test_large(folio)) {
+ result = folio_order(folio) == HPAGE_PMD_ORDER &&
+ folio->index == start
/* Maybe PMD-mapped */
? SCAN_PTE_MAPPED_HUGEPAGE
: SCAN_PAGE_COMPOUND;
goto out_unlock;
}
- folio = page_folio(page);
-
if (folio_mapping(folio) != mapping) {
result = SCAN_TRUNCATED;
goto out_unlock;
@@ -1941,7 +1935,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
folio_test_writeback(folio))) {
/*
* khugepaged only works on read-only fd, so this
- * page is dirty because it hasn't been flushed
+ * folio is dirty because it hasn't been flushed
* since first write.
*/
result = SCAN_FAIL;
@@ -1965,33 +1959,34 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
xas_lock_irq(&xas);
- VM_BUG_ON_PAGE(page != xa_load(xas.xa, index), page);
+ VM_BUG_ON_FOLIO(folio != xa_load(xas.xa, index), folio);
/*
- * We control three references to the page:
+ * We control three references to the folio:
* - we hold a pin on it;
* - one reference from page cache;
- * - one from isolate_lru_page;
- * If those are the only references, then any new usage of the
- * page will have to fetch it from the page cache. That requires
- * locking the page to handle truncate, so any new usage will be
- * blocked until we unlock page after collapse/during rollback.
+ * - one from lru_isolate_folio;
+ * If those are the only references, then any new usage
+ * of the folio will have to fetch it from the page
+ * cache. That requires locking the folio to handle
+ * truncate, so any new usage will be blocked until we
+ * unlock folio after collapse/during rollback.
*/
- if (page_count(page) != 3) {
+ if (folio_ref_count(folio) != 3) {
result = SCAN_PAGE_COUNT;
xas_unlock_irq(&xas);
- putback_lru_page(page);
+ folio_putback_lru(folio);
goto out_unlock;
}
/*
- * Accumulate the pages that are being collapsed.
+ * Accumulate the folios that are being collapsed.
*/
- list_add_tail(&page->lru, &pagelist);
+ list_add_tail(&folio->lru, &pagelist);
continue;
out_unlock:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
goto xa_unlocked;
}
@@ -2030,17 +2025,17 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
}
/*
- * The old pages are locked, so they won't change anymore.
+ * The old folios are locked, so they won't change anymore.
*/
index = start;
dst = folio_page(new_folio, 0);
- list_for_each_entry(page, &pagelist, lru) {
- while (index < page->index) {
+ list_for_each_entry(folio, &pagelist, lru) {
+ while (index < folio->index) {
clear_highpage(dst);
index++;
dst++;
}
- if (copy_mc_highpage(dst, page) > 0) {
+ if (copy_mc_highpage(dst, folio_page(folio, 0)) > 0) {
result = SCAN_COPY_MC;
goto rollback;
}
@@ -2152,15 +2147,15 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
folio_unlock(new_folio);
/*
- * The collapse has succeeded, so free the old pages.
+ * The collapse has succeeded, so free the old folios.
*/
- list_for_each_entry_safe(page, tmp, &pagelist, lru) {
- list_del(&page->lru);
- page->mapping = NULL;
- ClearPageActive(page);
- ClearPageUnevictable(page);
- unlock_page(page);
- folio_put_refs(page_folio(page), 3);
+ list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
+ list_del(&folio->lru);
+ folio->mapping = NULL;
+ folio_clear_active(folio);
+ folio_clear_unevictable(folio);
+ folio_unlock(folio);
+ folio_put_refs(folio, 3);
}
goto out;
@@ -2174,11 +2169,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
shmem_uncharge(mapping->host, nr_none);
}
- list_for_each_entry_safe(page, tmp, &pagelist, lru) {
- list_del(&page->lru);
- unlock_page(page);
- putback_lru_page(page);
- put_page(page);
+ list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
+ list_del(&folio->lru);
+ folio_unlock(folio);
+ folio_putback_lru(folio);
+ folio_put(folio);
}
/*
* Undo the updates of filemap_nr_thps_inc for non-SHMEM
--
2.43.0
next prev parent reply other threads:[~2024-04-03 17:18 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-03 17:18 [PATCH 0/7] khugepaged folio conversions Matthew Wilcox (Oracle)
2024-04-03 17:18 ` [PATCH 1/7] khugepaged: Inline hpage_collapse_alloc_folio() Matthew Wilcox (Oracle)
2024-04-05 21:13 ` Vishal Moola
2024-04-03 17:18 ` [PATCH 2/7] khugepaged: Convert alloc_charge_hpage to alloc_charge_folio Matthew Wilcox (Oracle)
2024-04-05 21:14 ` Vishal Moola
2024-04-07 3:44 ` Matthew Wilcox
2024-04-03 17:18 ` [PATCH 3/7] khugepaged: Remove hpage from collapse_huge_page() Matthew Wilcox (Oracle)
2024-04-05 21:14 ` Vishal Moola
2024-04-03 17:18 ` [PATCH 4/7] khugepaged: Pass a folio to __collapse_huge_page_copy() Matthew Wilcox (Oracle)
2024-04-05 21:19 ` Vishal Moola
2024-04-07 3:46 ` Matthew Wilcox
2024-04-03 17:18 ` [PATCH 5/7] khugepaged: Remove hpage from collapse_file() Matthew Wilcox (Oracle)
2024-04-05 21:19 ` Vishal Moola
2024-04-03 17:18 ` Matthew Wilcox (Oracle) [this message]
2024-04-05 21:20 ` [PATCH 6/7] khugepaged: Use a folio throughout collapse_file() Vishal Moola
2024-04-07 3:43 ` Matthew Wilcox
2024-04-03 17:18 ` [PATCH 7/7] khugepaged: Use a folio throughout hpage_collapse_scan_file() Matthew Wilcox (Oracle)
2024-04-05 21:21 ` Vishal Moola
2024-04-08 20:07 ` David Hildenbrand
2024-04-08 20:28 ` Matthew Wilcox
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240403171838.1445826-7-willy@infradead.org \
--to=willy@infradead.org \
--cc=akpm@linux-foundation.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox