From: Anthony Yznaga <anthony.yznaga@oracle.com>
To: akpm@linux-foundation.org, willy@infradead.org,
markhemm@googlemail.com, viro@zeniv.linux.org.uk,
david@redhat.com, khalid@kernel.org
Cc: anthony.yznaga@oracle.com, andreyknvl@gmail.com,
dave.hansen@intel.com, luto@kernel.org, brauner@kernel.org,
arnd@arndb.de, ebiederm@xmission.com, catalin.marinas@arm.com,
linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-mm@kvack.org, mhiramat@kernel.org, rostedt@goodmis.org,
vasily.averin@linux.dev, xhao@linux.alibaba.com, pcc@google.com,
neilb@suse.de, maz@kernel.org
Subject: [PATCH v2 12/20] mm/mshare: prepare for page table sharing support
Date: Thu, 3 Apr 2025 19:18:54 -0700 [thread overview]
Message-ID: <20250404021902.48863-13-anthony.yznaga@oracle.com> (raw)
In-Reply-To: <20250404021902.48863-1-anthony.yznaga@oracle.com>
From: Khalid Aziz <khalid@kernel.org>
In preparation for enabling the handling of page faults in an mshare
region provide a way to link an mshare shared page table to a process
page table and otherwise find the actual vma in order to handle a page
fault. Modify the unmap path to ensure that page tables in mshare regions
are unlinked and kept intact when a process exits or an mshare region
is explicitly unmapped.
Signed-off-by: Khalid Aziz <khalid@kernel.org>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com>
---
include/linux/mm.h | 6 +++++
mm/memory.c | 45 +++++++++++++++++++++++++++------
mm/mshare.c | 62 ++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 105 insertions(+), 8 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d6cac2cca4da..f06be2f20c20 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1179,11 +1179,17 @@ static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false;
int vma_is_stack_for_current(struct vm_area_struct *vma);
#ifdef CONFIG_MSHARE
+vm_fault_t find_shared_vma(struct vm_area_struct **vma, unsigned long *addrp);
static inline bool vma_is_mshare(const struct vm_area_struct *vma)
{
return vma->vm_flags & VM_MSHARE;
}
#else
+static inline vm_fault_t find_shared_vma(struct vm_area_struct **vma, unsigned long *addrp)
+{
+ WARN_ON_ONCE(1);
+ return VM_FAULT_SIGBUS;
+}
static inline bool vma_is_mshare(const struct vm_area_struct *vma)
{
return false;
diff --git a/mm/memory.c b/mm/memory.c
index db558fe43088..68422b606819 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -247,7 +247,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
+ unsigned long floor, unsigned long ceiling,
+ bool shared_pud)
{
p4d_t *p4d;
unsigned long next;
@@ -259,7 +260,10 @@ static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
next = p4d_addr_end(addr, end);
if (p4d_none_or_clear_bad(p4d))
continue;
- free_pud_range(tlb, p4d, addr, next, floor, ceiling);
+ if (unlikely(shared_pud))
+ p4d_clear(p4d);
+ else
+ free_pud_range(tlb, p4d, addr, next, floor, ceiling);
} while (p4d++, addr = next, addr != end);
start &= PGDIR_MASK;
@@ -281,9 +285,10 @@ static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
/*
* This function frees user-level page tables of a process.
*/
-void free_pgd_range(struct mmu_gather *tlb,
+static void __free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
+ unsigned long floor, unsigned long ceiling,
+ bool shared_pud)
{
pgd_t *pgd;
unsigned long next;
@@ -339,10 +344,17 @@ void free_pgd_range(struct mmu_gather *tlb,
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
+ free_p4d_range(tlb, pgd, addr, next, floor, ceiling, shared_pud);
} while (pgd++, addr = next, addr != end);
}
+void free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
+{
+ __free_pgd_range(tlb, addr, end, floor, ceiling, false);
+}
+
void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *vma, unsigned long floor,
unsigned long ceiling, bool mm_wr_locked)
@@ -379,9 +391,12 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
/*
* Optimization: gather nearby vmas into one call down
+ *
+ * Do not free the shared page tables of an mshare region.
*/
while (next && next->vm_start <= vma->vm_end + PMD_SIZE
- && !is_vm_hugetlb_page(next)) {
+ && !is_vm_hugetlb_page(next)
+ && !vma_is_mshare(next)) {
vma = next;
next = mas_find(mas, ceiling - 1);
if (unlikely(xa_is_zero(next)))
@@ -392,9 +407,11 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
unlink_file_vma_batch_add(&vb, vma);
}
unlink_file_vma_batch_final(&vb);
- free_pgd_range(tlb, addr, vma->vm_end,
- floor, next ? next->vm_start : ceiling);
+ __free_pgd_range(tlb, addr, vma->vm_end,
+ floor, next ? next->vm_start : ceiling,
+ vma_is_mshare(vma));
}
+
vma = next;
} while (vma);
}
@@ -1884,6 +1901,13 @@ void __unmap_page_range(struct mmu_gather *tlb,
pgd_t *pgd;
unsigned long next;
+ /*
+ * Do not unmap vmas that share page tables through an
+ * mshare region.
+ */
+ if (vma_is_mshare(vma))
+ return;
+
BUG_ON(addr >= end);
tlb_start_vma(tlb, vma);
pgd = pgd_offset(vma->vm_mm, addr);
@@ -6275,6 +6299,11 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
if (ret)
goto out;
+ if (unlikely(vma_is_mshare(vma))) {
+ WARN_ON_ONCE(1);
+ return VM_FAULT_SIGBUS;
+ }
+
if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
flags & FAULT_FLAG_INSTRUCTION,
flags & FAULT_FLAG_REMOTE)) {
diff --git a/mm/mshare.c b/mm/mshare.c
index 792d86c61042..4ddaa0d41070 100644
--- a/mm/mshare.c
+++ b/mm/mshare.c
@@ -44,6 +44,56 @@ static const struct mmu_notifier_ops mshare_mmu_ops = {
.arch_invalidate_secondary_tlbs = mshare_invalidate_tlbs,
};
+static p4d_t *walk_to_p4d(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+
+ pgd = pgd_offset(mm, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ if (!p4d)
+ return NULL;
+
+ return p4d;
+}
+
+/* Returns holding the host mm's lock for read. Caller must release. */
+vm_fault_t
+find_shared_vma(struct vm_area_struct **vmap, unsigned long *addrp)
+{
+ struct vm_area_struct *vma, *guest = *vmap;
+ struct mshare_data *m_data = guest->vm_private_data;
+ struct mm_struct *host_mm = m_data->mm;
+ unsigned long host_addr;
+ p4d_t *p4d, *guest_p4d;
+
+ mmap_read_lock_nested(host_mm, SINGLE_DEPTH_NESTING);
+ host_addr = *addrp - guest->vm_start + host_mm->mmap_base;
+ p4d = walk_to_p4d(host_mm, host_addr);
+ guest_p4d = walk_to_p4d(guest->vm_mm, *addrp);
+ if (!p4d_same(*guest_p4d, *p4d)) {
+ set_p4d(guest_p4d, *p4d);
+ mmap_read_unlock(host_mm);
+ return VM_FAULT_NOPAGE;
+ }
+
+ *addrp = host_addr;
+ vma = find_vma(host_mm, host_addr);
+
+ /* XXX: expand stack? */
+ if (vma && vma->vm_start > host_addr)
+ vma = NULL;
+
+ *vmap = vma;
+
+ /*
+ * release host mm lock unless a matching vma is found
+ */
+ if (!vma)
+ mmap_read_unlock(host_mm);
+ return 0;
+}
+
static int mshare_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
{
return -EINVAL;
@@ -55,9 +105,21 @@ static int mshare_vm_op_mprotect(struct vm_area_struct *vma, unsigned long start
return -EINVAL;
}
+static void mshare_vm_op_unmap_page_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end,
+ struct zap_details *details)
+{
+ /*
+ * The msharefs vma is being unmapped. Do not unmap pages in the
+ * mshare region itself.
+ */
+}
+
static const struct vm_operations_struct msharefs_vm_ops = {
.may_split = mshare_vm_op_split,
.mprotect = mshare_vm_op_mprotect,
+ .unmap_page_range = mshare_vm_op_unmap_page_range,
};
/*
--
2.43.5
next prev parent reply other threads:[~2025-04-04 2:19 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-04-04 2:18 [PATCH v2 00/20] Add support for shared PTEs across processes Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 01/20] mm: Add msharefs filesystem Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 02/20] mm/mshare: pre-populate msharefs with information file Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 03/20] mm/mshare: make msharefs writable and support directories Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 04/20] mm/mshare: allocate an mm_struct for msharefs files Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 05/20] mm/mshare: add ways to set the size of an mshare region Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 06/20] mm/mshare: Add a vma flag to indicate " Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 07/20] mm/mshare: Add mmap support Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 08/20] mm/mshare: flush all TLBs when updating PTEs in an mshare range Anthony Yznaga
2025-05-30 14:41 ` Jann Horn
2025-05-30 16:29 ` Anthony Yznaga
2025-05-30 17:46 ` Jann Horn
2025-05-30 22:47 ` Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 09/20] sched/numa: do not scan msharefs vmas Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 10/20] mm: add mmap_read_lock_killable_nested() Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 11/20] mm: add and use unmap_page_range vm_ops hook Anthony Yznaga
2025-04-04 2:18 ` Anthony Yznaga [this message]
2025-05-30 14:56 ` [PATCH v2 12/20] mm/mshare: prepare for page table sharing support Jann Horn
2025-05-30 16:41 ` Anthony Yznaga
2025-06-02 15:26 ` Jann Horn
2025-06-02 22:02 ` Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 13/20] x86/mm: enable page table sharing Anthony Yznaga
2025-08-12 13:46 ` Yongting Lin
2025-08-12 17:12 ` Anthony Yznaga
2025-08-18 9:44 ` Yongting Lin
2025-08-20 1:32 ` Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 14/20] mm: create __do_mmap() to take an mm_struct * arg Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 15/20] mm: pass the mm in vma_munmap_struct Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 16/20] mm/mshare: Add an ioctl for mapping objects in an mshare region Anthony Yznaga
2025-04-04 2:18 ` [PATCH v2 17/20] mm/mshare: Add an ioctl for unmapping " Anthony Yznaga
2025-04-04 2:19 ` [PATCH v2 18/20] mm/mshare: provide a way to identify an mm as an mshare host mm Anthony Yznaga
2025-04-04 2:19 ` [PATCH v2 19/20] mm/mshare: get memcg from current->mm instead of mshare mm Anthony Yznaga
2025-04-04 2:19 ` [PATCH v2 20/20] mm/mshare: associate a mem cgroup with an mshare file Anthony Yznaga
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250404021902.48863-13-anthony.yznaga@oracle.com \
--to=anthony.yznaga@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=andreyknvl@gmail.com \
--cc=arnd@arndb.de \
--cc=brauner@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=dave.hansen@intel.com \
--cc=david@redhat.com \
--cc=ebiederm@xmission.com \
--cc=khalid@kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=markhemm@googlemail.com \
--cc=maz@kernel.org \
--cc=mhiramat@kernel.org \
--cc=neilb@suse.de \
--cc=pcc@google.com \
--cc=rostedt@goodmis.org \
--cc=vasily.averin@linux.dev \
--cc=viro@zeniv.linux.org.uk \
--cc=willy@infradead.org \
--cc=xhao@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox