From: Pasha Tatashin <pasha.tatashin@soleen.com>
To: pasha.tatashin@soleen.com, linux-kernel@vger.kernel.org,
linux-mm@kvack.org, linux-doc@vger.kernel.org,
akpm@linux-foundation.org, rientjes@google.com, pjt@google.com,
weixugc@google.com, gthelen@google.com, mingo@redhat.com,
corbet@lwn.net, will@kernel.org, rppt@kernel.org,
keescook@chromium.org, tglx@linutronix.de, peterz@infradead.org,
masahiroy@kernel.org, samitolvanen@google.com,
dave.hansen@linux.intel.com, x86@kernel.org, frederic@kernel.org,
hpa@zytor.com, aneesh.kumar@linux.ibm.com, jirislaby@kernel.org,
songmuchun@bytedance.com, qydwhotmail@gmail.com,
hughd@google.com
Subject: [PATCH v3 1/4] mm: change page type prior to adding page table entry
Date: Tue, 21 Dec 2021 15:46:47 +0000 [thread overview]
Message-ID: <20211221154650.1047963-2-pasha.tatashin@soleen.com> (raw)
In-Reply-To: <20211221154650.1047963-1-pasha.tatashin@soleen.com>
There are a few places where we first update the entry in the user page
table, and later change the struct page to indicate that this is
anonymous or file page.
In most places, however, we first configure the page metadata and then
insert entries into the page table. Page table check, will use
the information from struct page to verify the type of entry is
inserted.
Change the order in all places to first update struct page, and later to
update page table.
This means that we first do calls that may change the type of page (anon
or file):
page_move_anon_rmap
page_add_anon_rmap
do_page_add_anon_rmap
page_add_new_anon_rmap
page_add_file_rmap
hugepage_add_anon_rmap
hugepage_add_new_anon_rmap
And after that do calls that add entries to the page table:
set_huge_pte_at
set_pte_at
Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
---
mm/hugetlb.c | 6 +++---
mm/memory.c | 9 +++++----
mm/migrate.c | 5 ++---
mm/swapfile.c | 4 ++--
4 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a1baa198519a..61895cc01d09 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4684,8 +4684,8 @@ hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr
struct page *new_page)
{
__SetPageUptodate(new_page);
- set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
hugepage_add_new_anon_rmap(new_page, vma, addr);
+ set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
ClearHPageRestoreReserve(new_page);
SetHPageMigratable(new_page);
@@ -5259,10 +5259,10 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
/* Break COW */
huge_ptep_clear_flush(vma, haddr, ptep);
mmu_notifier_invalidate_range(mm, range.start, range.end);
- set_huge_pte_at(mm, haddr, ptep,
- make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page, true);
hugepage_add_new_anon_rmap(new_page, vma, haddr);
+ set_huge_pte_at(mm, haddr, ptep,
+ make_huge_pte(vma, new_page, 1));
SetHPageMigratable(new_page);
/* Make the old page be freed below */
new_page = old_page;
diff --git a/mm/memory.c b/mm/memory.c
index 71e475d440b0..11cb28a2ca54 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -720,8 +720,6 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
else if (is_writable_device_exclusive_entry(entry))
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
- set_pte_at(vma->vm_mm, address, ptep, pte);
-
/*
* No need to take a page reference as one was already
* created when the swap entry was made.
@@ -735,6 +733,8 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
*/
WARN_ON_ONCE(!PageAnon(page));
+ set_pte_at(vma->vm_mm, address, ptep, pte);
+
if (vma->vm_flags & VM_LOCKED)
mlock_vma_page(page);
@@ -3635,8 +3635,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
pte = pte_mkuffd_wp(pte);
pte = pte_wrprotect(pte);
}
- set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
- arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
vmf->orig_pte = pte;
/* ksm created a completely new copy */
@@ -3647,6 +3645,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
}
+ set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
+ arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
+
swap_free(entry);
if (mem_cgroup_swap_full(page) ||
(vma->vm_flags & VM_LOCKED) || PageMlocked(page))
diff --git a/mm/migrate.c b/mm/migrate.c
index c9296d63878d..f943a2d99de7 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -237,20 +237,19 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
pte = pte_mkhuge(pte);
pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
- set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
if (PageAnon(new))
hugepage_add_anon_rmap(new, vma, pvmw.address);
else
page_dup_rmap(new, true);
+ set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
} else
#endif
{
- set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
-
if (PageAnon(new))
page_add_anon_rmap(new, vma, pvmw.address, false);
else
page_add_file_rmap(new, false);
+ set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
}
if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
mlock_vma_page(new);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e59e08ef46e1..e64207e2ef1d 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1917,14 +1917,14 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
get_page(page);
- set_pte_at(vma->vm_mm, addr, pte,
- pte_mkold(mk_pte(page, vma->vm_page_prot)));
if (page == swapcache) {
page_add_anon_rmap(page, vma, addr, false);
} else { /* ksm created a completely new copy */
page_add_new_anon_rmap(page, vma, addr, false);
lru_cache_add_inactive_or_unevictable(page, vma);
}
+ set_pte_at(vma->vm_mm, addr, pte,
+ pte_mkold(mk_pte(page, vma->vm_page_prot)));
swap_free(entry);
out:
pte_unmap_unlock(pte, ptl);
--
2.34.1.307.g9b7440fafd-goog
next prev parent reply other threads:[~2021-12-21 15:46 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-12-21 15:46 [PATCH v3 0/4] page table check Pasha Tatashin
2021-12-21 15:46 ` Pasha Tatashin [this message]
2021-12-21 15:46 ` [PATCH v3 2/4] mm: ptep_clear() page table helper Pasha Tatashin
2021-12-21 15:46 ` [PATCH v3 3/4] mm: page table check Pasha Tatashin
2021-12-21 15:46 ` [PATCH v3 4/4] x86: mm: add x86_64 support for " Pasha Tatashin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211221154650.1047963-2-pasha.tatashin@soleen.com \
--to=pasha.tatashin@soleen.com \
--cc=akpm@linux-foundation.org \
--cc=aneesh.kumar@linux.ibm.com \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=frederic@kernel.org \
--cc=gthelen@google.com \
--cc=hpa@zytor.com \
--cc=hughd@google.com \
--cc=jirislaby@kernel.org \
--cc=keescook@chromium.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=masahiroy@kernel.org \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=pjt@google.com \
--cc=qydwhotmail@gmail.com \
--cc=rientjes@google.com \
--cc=rppt@kernel.org \
--cc=samitolvanen@google.com \
--cc=songmuchun@bytedance.com \
--cc=tglx@linutronix.de \
--cc=weixugc@google.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox