From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
linux-kernel@vger.kernel.org
Subject: [PATCH 40/75] mm: Add pvmw_set_page() and pvmw_set_folio()
Date: Fri, 4 Feb 2022 19:58:17 +0000 [thread overview]
Message-ID: <20220204195852.1751729-41-willy@infradead.org> (raw)
In-Reply-To: <20220204195852.1751729-1-willy@infradead.org>
Instead of setting the page directly in struct page_vma_mapped_walk,
use this helper to allow us to transition to a PFN approach in the
next patch.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/rmap.h | 12 ++++++++++++
kernel/events/uprobes.c | 2 +-
mm/damon/paddr.c | 4 ++--
mm/ksm.c | 2 +-
mm/migrate.c | 2 +-
mm/page_idle.c | 2 +-
mm/rmap.c | 12 ++++++------
7 files changed, 24 insertions(+), 12 deletions(-)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index e704b1a4c06c..e076aca3a203 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -213,6 +213,18 @@ struct page_vma_mapped_walk {
unsigned int flags;
};
+static inline void pvmw_set_page(struct page_vma_mapped_walk *pvmw,
+ struct page *page)
+{
+ pvmw->page = page;
+}
+
+static inline void pvmw_set_folio(struct page_vma_mapped_walk *pvmw,
+ struct folio *folio)
+{
+ pvmw->page = &folio->page;
+}
+
static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
{
/* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 6357c3580d07..5f74671b0066 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -156,13 +156,13 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
{
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
- .page = compound_head(old_page),
.vma = vma,
.address = addr,
};
int err;
struct mmu_notifier_range range;
+ pvmw_set_page(&pvmw, compound_head(old_page));
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
addr + PAGE_SIZE);
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 5e8244f65a1a..4e27d64abbb7 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -20,11 +20,11 @@ static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = addr,
};
+ pvmw_set_page(&pvmw, page);
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
if (pvmw.pte)
@@ -94,11 +94,11 @@ static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
{
struct damon_pa_access_chk_result *result = arg;
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = addr,
};
+ pvmw_set_page(&pvmw, page);
result->accessed = false;
result->page_sz = PAGE_SIZE;
while (page_vma_mapped_walk(&pvmw)) {
diff --git a/mm/ksm.c b/mm/ksm.c
index c20bd4d9a0d9..1639160c9e9a 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1035,13 +1035,13 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
{
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
};
int swapped;
int err = -EFAULT;
struct mmu_notifier_range range;
+ pvmw_set_page(&pvmw, page);
pvmw.address = page_address_in_vma(page, vma);
if (pvmw.address == -EFAULT)
goto out;
diff --git a/mm/migrate.c b/mm/migrate.c
index c7da064b4781..07464fd45925 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -177,7 +177,6 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *old)
{
struct page_vma_mapped_walk pvmw = {
- .page = old,
.vma = vma,
.address = addr,
.flags = PVMW_SYNC | PVMW_MIGRATION,
@@ -187,6 +186,7 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
swp_entry_t entry;
VM_BUG_ON_PAGE(PageTail(page), page);
+ pvmw_set_page(&pvmw, old);
while (page_vma_mapped_walk(&pvmw)) {
if (PageKsm(page))
new = page;
diff --git a/mm/page_idle.c b/mm/page_idle.c
index edead6a8a5f9..20d35d720872 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -49,12 +49,12 @@ static bool page_idle_clear_pte_refs_one(struct page *page,
unsigned long addr, void *arg)
{
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = addr,
};
bool referenced = false;
+ pvmw_set_page(&pvmw, page);
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
if (pvmw.pte) {
diff --git a/mm/rmap.c b/mm/rmap.c
index a531b64d53fa..fa8478372e94 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -803,12 +803,12 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
{
struct page_referenced_arg *pra = arg;
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = address,
};
int referenced = 0;
+ pvmw_set_page(&pvmw, page);
while (page_vma_mapped_walk(&pvmw)) {
address = pvmw.address;
@@ -932,7 +932,6 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = address,
.flags = PVMW_SYNC,
@@ -940,6 +939,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
struct mmu_notifier_range range;
int *cleaned = arg;
+ pvmw_set_page(&pvmw, page);
/*
* We have to assume the worse case ie pmd for invalidation. Note that
* the page can not be free from this function.
@@ -1423,7 +1423,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = address,
};
@@ -1433,6 +1432,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
+ pvmw_set_page(&pvmw, page);
/*
* When racing against e.g. zap_pte_range() on another cpu,
* in between its ptep_get_and_clear_full() and page_remove_rmap(),
@@ -1723,7 +1723,6 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = address,
};
@@ -1733,6 +1732,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
+ pvmw_set_page(&pvmw, page);
/*
* When racing against e.g. zap_pte_range() on another cpu,
* in between its ptep_get_and_clear_full() and page_remove_rmap(),
@@ -2003,11 +2003,11 @@ static bool page_mlock_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *unused)
{
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = address,
};
+ pvmw_set_page(&pvmw, page);
/* An un-locked vma doesn't have any pages to lock, continue the scan */
if (!(vma->vm_flags & VM_LOCKED))
return true;
@@ -2078,7 +2078,6 @@ static bool page_make_device_exclusive_one(struct page *page,
{
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = address,
};
@@ -2090,6 +2089,7 @@ static bool page_make_device_exclusive_one(struct page *page,
swp_entry_t entry;
pte_t swp_pte;
+ pvmw_set_page(&pvmw, page);
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
vma->vm_mm, address, min(vma->vm_end,
address + page_size(page)), args->owner);
--
2.34.1
next prev parent reply other threads:[~2022-02-04 19:59 UTC|newest]
Thread overview: 115+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-04 19:57 [PATCH 00/75] MM folio patches for 5.18 Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 01/75] mm/gup: Increment the page refcount before the pincount Matthew Wilcox (Oracle)
2022-02-04 21:13 ` John Hubbard
2022-02-04 21:28 ` Matthew Wilcox
2022-02-07 7:45 ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 02/75] mm/gup: Remove for_each_compound_range() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 03/75] mm/gup: Remove for_each_compound_head() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 04/75] mm/gup: Change the calling convention for compound_range_next() Matthew Wilcox (Oracle)
2022-02-07 7:45 ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 05/75] mm/gup: Optimise compound_range_next() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 06/75] mm/gup: Change the calling convention for compound_next() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 07/75] mm/gup: Fix some contiguous memmap assumptions Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 08/75] mm/gup: Remove an assumption of a contiguous memmap Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 09/75] mm/gup: Handle page split race more efficiently Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 10/75] mm/gup: Remove hpage_pincount_add() Matthew Wilcox (Oracle)
2022-02-04 21:29 ` John Hubbard
2022-02-07 7:46 ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 11/75] mm/gup: Remove hpage_pincount_sub() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 12/75] mm: Make compound_pincount always available Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 13/75] mm: Add folio_pincount_ptr() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 14/75] mm: Turn page_maybe_dma_pinned() into folio_maybe_dma_pinned() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 15/75] mm/gup: Add try_get_folio() and try_grab_folio() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 16/75] mm/gup: Convert try_grab_page() to use a folio Matthew Wilcox (Oracle)
2022-02-06 2:12 ` John Hubbard
2022-02-07 7:47 ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 17/75] mm: Remove page_cache_add_speculative() and page_cache_get_speculative() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 18/75] mm/gup: Add gup_put_folio() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 19/75] mm/hugetlb: Use try_grab_folio() instead of try_grab_compound_head() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 20/75] mm/gup: Convert gup_pte_range() to use a folio Matthew Wilcox (Oracle)
2022-02-06 14:52 ` Mark Hemment
2022-02-11 20:20 ` Matthew Wilcox
2022-02-04 19:57 ` [PATCH 21/75] mm/gup: Convert gup_hugepte() " Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 22/75] mm/gup: Convert gup_huge_pmd() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 23/75] mm/gup: Convert gup_huge_pud() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 24/75] mm/gup: Convert gup_huge_pgd() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 25/75] mm/gup: Turn compound_next() into gup_folio_next() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 26/75] mm/gup: Turn compound_range_next() into gup_folio_range_next() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 27/75] mm: Turn isolate_lru_page() into folio_isolate_lru() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 28/75] mm/gup: Convert check_and_migrate_movable_pages() to use a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 29/75] mm/workingset: Convert workingset_eviction() to take " Matthew Wilcox (Oracle)
2022-02-07 7:49 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 30/75] mm/memcg: Convert mem_cgroup_swapout() " Matthew Wilcox (Oracle)
2022-02-07 7:49 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 31/75] mm: Add lru_to_folio() Matthew Wilcox (Oracle)
2022-02-07 7:50 ` Christoph Hellwig
2022-02-11 20:24 ` Matthew Wilcox
2022-02-04 19:58 ` [PATCH 32/75] mm: Turn putback_lru_page() into folio_putback_lru() Matthew Wilcox (Oracle)
2022-02-07 7:50 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 33/75] mm/vmscan: Convert __remove_mapping() to take a folio Matthew Wilcox (Oracle)
2022-02-07 7:51 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 34/75] mm/vmscan: Turn page_check_dirty_writeback() into folio_check_dirty_writeback() Matthew Wilcox (Oracle)
2022-02-07 7:51 ` Christoph Hellwig
2022-02-12 1:49 ` Matthew Wilcox
2022-02-04 19:58 ` [PATCH 35/75] mm: Turn head_compound_mapcount() into folio_entire_mapcount() Matthew Wilcox (Oracle)
2022-02-07 7:52 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 36/75] mm: Add folio_mapcount() Matthew Wilcox (Oracle)
2022-02-07 7:53 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 37/75] mm: Add split_folio_to_list() Matthew Wilcox (Oracle)
2022-02-07 7:54 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 38/75] mm: Add folio_is_zone_device() and folio_is_device_private() Matthew Wilcox (Oracle)
2022-02-07 7:54 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 39/75] mm: Add folio_pgoff() Matthew Wilcox (Oracle)
2022-02-07 7:55 ` Christoph Hellwig
2022-02-04 19:58 ` Matthew Wilcox (Oracle) [this message]
2022-02-07 7:55 ` [PATCH 40/75] mm: Add pvmw_set_page() and pvmw_set_folio() Christoph Hellwig
2022-02-04 19:58 ` [PATCH 41/75] hexagon: Add pmd_pfn() Matthew Wilcox (Oracle)
2022-02-06 18:13 ` Mike Rapoport
2022-02-06 20:46 ` Matthew Wilcox
2022-02-06 21:33 ` Mike Rapoport
2022-02-06 22:05 ` Matthew Wilcox
2022-02-07 14:24 ` Mike Rapoport
2022-02-04 19:58 ` [PATCH 42/75] mm: Convert page_vma_mapped_walk to work on PFNs Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 43/75] mm/page_idle: Convert page_idle_clear_pte_refs() to use a folio Matthew Wilcox (Oracle)
2022-02-07 7:57 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 44/75] mm/rmap: Use a folio in page_mkclean_one() Matthew Wilcox (Oracle)
2022-02-07 7:57 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 45/75] mm/rmap: Turn page_referenced() into folio_referenced() Matthew Wilcox (Oracle)
2022-02-07 7:58 ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 46/75] mm/mlock: Turn clear_page_mlock() into folio_end_mlock() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 47/75] mm/mlock: Turn mlock_vma_page() into mlock_vma_folio() Matthew Wilcox (Oracle)
2022-02-07 10:46 ` Mike Rapoport
2022-02-04 19:58 ` [PATCH 48/75] mm/rmap: Turn page_mlock() into folio_mlock() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 49/75] mm/mlock: Turn munlock_vma_page() into munlock_vma_folio() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 50/75] mm/huge_memory: Convert __split_huge_pmd() to take a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 51/75] mm/rmap: Convert try_to_unmap() " Matthew Wilcox (Oracle)
2022-02-09 14:24 ` Mauricio Faria de Oliveira
2022-02-09 14:29 ` Matthew Wilcox
2022-02-04 19:58 ` [PATCH 52/75] mm/rmap: Convert try_to_migrate() to folios Matthew Wilcox (Oracle)
2022-02-09 15:27 ` Zi Yan
2022-02-04 19:58 ` [PATCH 53/75] mm/rmap: Convert make_device_exclusive_range() to use folios Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 54/75] mm/migrate: Convert remove_migration_ptes() to folios Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 55/75] mm/damon: Convert damon_pa_mkold() to use a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 56/75] mm/damon: Convert damon_pa_young() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 57/75] mm/rmap: Turn page_lock_anon_vma_read() into folio_lock_anon_vma_read() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 58/75] mm: Turn page_anon_vma() into folio_anon_vma() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 59/75] mm/rmap: Convert rmap_walk() to take a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 60/75] mm/rmap: Constify the rmap_walk_control argument Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 61/75] mm/vmscan: Free non-shmem folios without splitting them Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 62/75] mm/vmscan: Optimise shrink_page_list for non-PMD-sized folios Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 63/75] mm/vmscan: Account large folios correctly Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 64/75] mm/vmscan: Turn page_check_references() into folio_check_references() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 65/75] mm/vmscan: Convert pageout() to take a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 66/75] mm: Turn can_split_huge_page() into can_split_folio() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 67/75] mm/filemap: Allow large folios to be added to the page cache Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 68/75] mm: Fix READ_ONLY_THP warning Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 69/75] mm: Make large folios depend on THP Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 70/75] mm: Support arbitrary THP sizes Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 71/75] mm/readahead: Add large folio readahead Matthew Wilcox (Oracle)
2022-02-06 13:10 ` Mark Hemment
2022-02-04 19:58 ` [PATCH 72/75] mm/readahead: Align file mappings for non-DAX Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 73/75] mm/readahead: Switch to page_cache_ra_order Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 74/75] mm/filemap: Support VM_HUGEPAGE for file mappings Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 75/75] selftests/vm/transhuge-stress: Support file-backed PMD folios Matthew Wilcox (Oracle)
2022-02-13 22:31 ` [PATCH 00/75] MM folio patches for 5.18 John Hubbard
2022-02-14 4:33 ` Matthew Wilcox
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220204195852.1751729-41-willy@infradead.org \
--to=willy@infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox