From: Yin Fengwei <fengwei.yin@intel.com>
To: linux-mm@kvack.org, akpm@linux-foundation.org,
willy@infradead.org, sidhartha.kumar@oracle.com,
mike.kravetz@oracle.com, jane.chu@oracle.com,
naoya.horiguchi@nec.com
Cc: fengwei.yin@intel.com
Subject: [PATCH v2 3/5] rmap: cleanup exit path of try_to_unmap_one_page()
Date: Tue, 28 Feb 2023 20:23:06 +0800 [thread overview]
Message-ID: <20230228122308.2972219-4-fengwei.yin@intel.com> (raw)
In-Reply-To: <20230228122308.2972219-1-fengwei.yin@intel.com>
Cleanup exit path of try_to_unmap_one_page() by removing
some duplicated code.
Move page_vma_mapped_walk_done() back to try_to_unmap_one().
Change subpage to page as folio has no concept of subpage.
Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
---
mm/rmap.c | 74 ++++++++++++++++++++++---------------------------------
1 file changed, 30 insertions(+), 44 deletions(-)
diff --git a/mm/rmap.c b/mm/rmap.c
index 987ab402392f..d243e557c6e4 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1530,7 +1530,7 @@ static bool try_to_unmap_one_hugetlb(struct folio *folio,
*
* See Documentation/mm/mmu_notifier.rst
*/
- page_remove_rmap(&folio->page, vma, folio_test_hugetlb(folio));
+ page_remove_rmap(&folio->page, vma, true);
/* No VM_LOCKED set in vma->vm_flags for hugetlb. So not
* necessary to call mlock_drain_local().
*/
@@ -1545,15 +1545,13 @@ static bool try_to_unmap_one_page(struct folio *folio,
struct page_vma_mapped_walk pvmw, unsigned long address,
enum ttu_flags flags)
{
- bool anon_exclusive, ret = true;
- struct page *subpage;
+ bool anon_exclusive;
+ struct page *page;
struct mm_struct *mm = vma->vm_mm;
pte_t pteval;
- subpage = folio_page(folio,
- pte_pfn(*pvmw.pte) - folio_pfn(folio));
- anon_exclusive = folio_test_anon(folio) &&
- PageAnonExclusive(subpage);
+ page = folio_page(folio, pte_pfn(*pvmw.pte) - folio_pfn(folio));
+ anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
/* Nuke the page table entry. */
@@ -1581,15 +1579,14 @@ static bool try_to_unmap_one_page(struct folio *folio,
pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
/* Set the dirty flag on the folio now the pte is gone. */
- if (pte_dirty(pteval))
+ if (pte_dirty(pteval) && !folio_test_dirty(folio))
folio_mark_dirty(folio);
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
- if (PageHWPoison(subpage) && !(flags & TTU_HWPOISON)) {
- pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
- dec_mm_counter(mm, mm_counter(&folio->page));
+ if (PageHWPoison(page) && !(flags & TTU_HWPOISON)) {
+ pteval = swp_entry_to_pte(make_hwpoison_entry(page));
set_pte_at(mm, address, pvmw.pte, pteval);
} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
/*
@@ -1602,12 +1599,11 @@ static bool try_to_unmap_one_page(struct folio *folio,
* migration) will not expect userfaults on already
* copied pages.
*/
- dec_mm_counter(mm, mm_counter(&folio->page));
/* We have to invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
} else if (folio_test_anon(folio)) {
- swp_entry_t entry = { .val = page_private(subpage) };
+ swp_entry_t entry = { .val = page_private(page) };
pte_t swp_pte;
/*
* Store the swap location in the pte.
@@ -1616,12 +1612,10 @@ static bool try_to_unmap_one_page(struct folio *folio,
if (unlikely(folio_test_swapbacked(folio) !=
folio_test_swapcache(folio))) {
WARN_ON_ONCE(1);
- ret = false;
/* We have to invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
- page_vma_mapped_walk_done(&pvmw);
- goto discard;
+ goto exit;
}
/* MADV_FREE page check */
@@ -1653,7 +1647,6 @@ static bool try_to_unmap_one_page(struct folio *folio,
/* Invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm,
address, address + PAGE_SIZE);
- dec_mm_counter(mm, MM_ANONPAGES);
goto discard;
}
@@ -1661,43 +1654,30 @@ static bool try_to_unmap_one_page(struct folio *folio,
* If the folio was redirtied, it cannot be
* discarded. Remap the page to page table.
*/
- set_pte_at(mm, address, pvmw.pte, pteval);
folio_set_swapbacked(folio);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- goto discard;
+ goto exit_restore_pte;
}
- if (swap_duplicate(entry) < 0) {
- set_pte_at(mm, address, pvmw.pte, pteval);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- goto discard;
- }
+ if (swap_duplicate(entry) < 0)
+ goto exit_restore_pte;
+
if (arch_unmap_one(mm, vma, address, pteval) < 0) {
swap_free(entry);
- set_pte_at(mm, address, pvmw.pte, pteval);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- goto discard;
+ goto exit_restore_pte;
}
/* See page_try_share_anon_rmap(): clear PTE first. */
- if (anon_exclusive &&
- page_try_share_anon_rmap(subpage)) {
+ if (anon_exclusive && page_try_share_anon_rmap(page)) {
swap_free(entry);
- set_pte_at(mm, address, pvmw.pte, pteval);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- goto discard;
+ goto exit_restore_pte;
}
+
if (list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
if (list_empty(&mm->mmlist))
list_add(&mm->mmlist, &init_mm.mmlist);
spin_unlock(&mmlist_lock);
}
- dec_mm_counter(mm, MM_ANONPAGES);
inc_mm_counter(mm, MM_SWAPENTS);
swp_pte = swp_entry_to_pte(entry);
if (anon_exclusive)
@@ -1708,8 +1688,7 @@ static bool try_to_unmap_one_page(struct folio *folio,
swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, address, pvmw.pte, swp_pte);
/* Invalidate as we cleared the pte */
- mmu_notifier_invalidate_range(mm, address,
- address + PAGE_SIZE);
+ mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE);
} else {
/*
* This is a locked file-backed folio,
@@ -1722,11 +1701,16 @@ static bool try_to_unmap_one_page(struct folio *folio,
*
* See Documentation/mm/mmu_notifier.rst
*/
- dec_mm_counter(mm, mm_counter_file(&folio->page));
}
discard:
- return ret;
+ dec_mm_counter(vma->vm_mm, mm_counter(&folio->page));
+ return true;
+
+exit_restore_pte:
+ set_pte_at(mm, address, pvmw.pte, pteval);
+exit:
+ return false;
}
/*
@@ -1804,8 +1788,10 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
pte_pfn(*pvmw.pte) - folio_pfn(folio));
ret = try_to_unmap_one_page(folio, vma,
range, pvmw, address, flags);
- if (!ret)
+ if (!ret) {
+ page_vma_mapped_walk_done(&pvmw);
break;
+ }
/*
* No need to call mmu_notifier_invalidate_range() it has be
@@ -1814,7 +1800,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*
* See Documentation/mm/mmu_notifier.rst
*/
- page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
+ page_remove_rmap(subpage, vma, false);
if (vma->vm_flags & VM_LOCKED)
mlock_drain_local();
folio_put(folio);
--
2.30.2
next prev parent reply other threads:[~2023-02-28 12:22 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-28 12:23 [PATCH v2 0/5] batched remove rmap in try_to_unmap_one() Yin Fengwei
2023-02-28 12:23 ` [PATCH v2 1/5] rmap: move hugetlb try_to_unmap to dedicated function Yin Fengwei
2023-02-28 12:23 ` [PATCH v2 2/5] rmap: move page unmap operation " Yin Fengwei
2023-02-28 12:23 ` Yin Fengwei [this message]
2023-02-28 12:23 ` [PATCH v2 4/5] rmap:addd folio_remove_rmap_range() Yin Fengwei
2023-02-28 12:23 ` [PATCH v2 5/5] try_to_unmap_one: batched remove rmap, update folio refcount Yin Fengwei
2023-02-28 20:28 ` [PATCH v2 0/5] batched remove rmap in try_to_unmap_one() Andrew Morton
2023-03-01 1:44 ` Yin, Fengwei
2023-03-02 10:04 ` David Hildenbrand
2023-03-02 13:32 ` Yin, Fengwei
2023-03-02 14:23 ` David Hildenbrand
2023-03-02 14:33 ` Matthew Wilcox
2023-03-02 14:55 ` David Hildenbrand
2023-03-03 2:44 ` Yin, Fengwei
2023-03-03 2:26 ` Yin, Fengwei
2023-03-06 9:11 ` Yin Fengwei
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230228122308.2972219-4-fengwei.yin@intel.com \
--to=fengwei.yin@intel.com \
--cc=akpm@linux-foundation.org \
--cc=jane.chu@oracle.com \
--cc=linux-mm@kvack.org \
--cc=mike.kravetz@oracle.com \
--cc=naoya.horiguchi@nec.com \
--cc=sidhartha.kumar@oracle.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox