From: Anthony Yznaga <anthony.yznaga@oracle.com>
To: linux-mm@kvack.org
Cc: akpm@linux-foundation.org, andreyknvl@gmail.com, arnd@arndb.de,
bp@alien8.de, brauner@kernel.org, bsegall@google.com,
corbet@lwn.net, dave.hansen@linux.intel.com, david@redhat.com,
dietmar.eggemann@arm.com, ebiederm@xmission.com, hpa@zytor.com,
jakub.wartak@mailbox.org, jannh@google.com,
juri.lelli@redhat.com, khalid@kernel.org,
liam.howlett@oracle.com, linyongting@bytedance.com,
lorenzo.stoakes@oracle.com, luto@kernel.org,
markhemm@googlemail.com, maz@kernel.org, mhiramat@kernel.org,
mgorman@suse.de, mhocko@suse.com, mingo@redhat.com,
muchun.song@linux.dev, neilb@suse.de, osalvador@suse.de,
pcc@google.com, peterz@infradead.org, pfalcato@suse.de,
rostedt@goodmis.org, rppt@kernel.org, shakeel.butt@linux.dev,
surenb@google.com, tglx@linutronix.de, vasily.averin@linux.dev,
vbabka@suse.cz, vincent.guittot@linaro.org,
viro@zeniv.linux.org.uk, vschneid@redhat.com,
willy@infradead.org, x86@kernel.org, xhao@linux.alibaba.com,
linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-arch@vger.kernel.org
Subject: [PATCH v3 13/22] mm/mshare: prepare for page table sharing support
Date: Tue, 19 Aug 2025 18:04:06 -0700 [thread overview]
Message-ID: <20250820010415.699353-14-anthony.yznaga@oracle.com> (raw)
In-Reply-To: <20250820010415.699353-1-anthony.yznaga@oracle.com>
From: Khalid Aziz <khalid@kernel.org>
In preparation for enabling the handling of page faults in an mshare
region provide a way to link an mshare shared page table to a process
page table and otherwise find the actual vma in order to handle a page
fault. Implement an unmap_page_range vm_ops function for msharefs VMAs
to unlink shared page tables when a process exits or an mshare region
is explicitly unmapped.
Signed-off-by: Khalid Aziz <khalid@kernel.org>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com>
---
include/linux/mm.h | 6 +++
mm/memory.c | 6 +++
mm/mshare.c | 107 +++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 119 insertions(+)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c8dfa5c6e7d4..3a8dddb5925a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1009,11 +1009,17 @@ static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false;
int vma_is_stack_for_current(struct vm_area_struct *vma);
#ifdef CONFIG_MSHARE
+vm_fault_t find_shared_vma(struct vm_area_struct **vma, unsigned long *addrp);
static inline bool vma_is_mshare(const struct vm_area_struct *vma)
{
return vma->vm_flags & VM_MSHARE;
}
#else
+static inline vm_fault_t find_shared_vma(struct vm_area_struct **vma, unsigned long *addrp)
+{
+ WARN_ON_ONCE(1);
+ return VM_FAULT_SIGBUS;
+}
static inline bool vma_is_mshare(const struct vm_area_struct *vma)
{
return false;
diff --git a/mm/memory.c b/mm/memory.c
index 4e3bb49b95e2..177eb53475cb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -6475,6 +6475,12 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
if (ret)
goto out;
+ if (unlikely(vma_is_mshare(vma))) {
+ WARN_ON_ONCE(1);
+ ret = VM_FAULT_SIGBUS;
+ goto out;
+ }
+
if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
flags & FAULT_FLAG_INSTRUCTION,
flags & FAULT_FLAG_REMOTE)) {
diff --git a/mm/mshare.c b/mm/mshare.c
index be7cae739225..f7b7904f0405 100644
--- a/mm/mshare.c
+++ b/mm/mshare.c
@@ -21,6 +21,8 @@
#include <linux/falloc.h>
#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+
const unsigned long mshare_align = P4D_SIZE;
const unsigned long mshare_base = mshare_align;
@@ -50,6 +52,66 @@ static const struct mmu_notifier_ops mshare_mmu_ops = {
.arch_invalidate_secondary_tlbs = mshare_invalidate_tlbs,
};
+static p4d_t *walk_to_p4d(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+
+ pgd = pgd_offset(mm, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ if (!p4d)
+ return NULL;
+
+ return p4d;
+}
+
+/* Returns holding the host mm's lock for read. Caller must release. */
+vm_fault_t
+find_shared_vma(struct vm_area_struct **vmap, unsigned long *addrp)
+{
+ struct vm_area_struct *vma, *guest = *vmap;
+ struct mshare_data *m_data = guest->vm_private_data;
+ struct mm_struct *host_mm = m_data->mm;
+ unsigned long host_addr;
+ p4d_t *p4d, *guest_p4d;
+
+ mmap_read_lock_nested(host_mm, SINGLE_DEPTH_NESTING);
+ host_addr = *addrp - guest->vm_start + host_mm->mmap_base;
+ p4d = walk_to_p4d(host_mm, host_addr);
+ guest_p4d = walk_to_p4d(guest->vm_mm, *addrp);
+ if (!p4d_same(*guest_p4d, *p4d)) {
+ spinlock_t *guest_ptl = &guest->vm_mm->page_table_lock;
+
+ spin_lock(guest_ptl);
+ if (!p4d_same(*guest_p4d, *p4d)) {
+ pud_t *pud = p4d_pgtable(*p4d);
+
+ ptdesc_pud_pts_inc(virt_to_ptdesc(pud));
+ set_p4d(guest_p4d, *p4d);
+ spin_unlock(guest_ptl);
+ mmap_read_unlock(host_mm);
+ return VM_FAULT_NOPAGE;
+ }
+ spin_unlock(guest_ptl);
+ }
+
+ *addrp = host_addr;
+ vma = find_vma(host_mm, host_addr);
+
+ /* XXX: expand stack? */
+ if (vma && vma->vm_start > host_addr)
+ vma = NULL;
+
+ *vmap = vma;
+
+ /*
+ * release host mm lock unless a matching vma is found
+ */
+ if (!vma)
+ mmap_read_unlock(host_mm);
+ return 0;
+}
+
static int mshare_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
{
return -EINVAL;
@@ -61,9 +123,54 @@ static int mshare_vm_op_mprotect(struct vm_area_struct *vma, unsigned long start
return -EINVAL;
}
+/*
+ * Unlink any shared page tables in the range and ensure TLBs are flushed.
+ * Pages in the mshare region itself are not unmapped.
+ */
+static void mshare_vm_op_unshare_page_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end,
+ struct zap_details *details)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ spinlock_t *ptl = &mm->page_table_lock;
+ unsigned long sz = mshare_align;
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+
+ WARN_ON(!vma_is_mshare(vma));
+
+ tlb_start_vma(tlb, vma);
+
+ for (; addr < end ; addr += sz) {
+ spin_lock(ptl);
+
+ pgd = pgd_offset(mm, addr);
+ if (!pgd_present(*pgd)) {
+ spin_unlock(ptl);
+ continue;
+ }
+ p4d = p4d_offset(pgd, addr);
+ if (!p4d_present(*p4d)) {
+ spin_unlock(ptl);
+ continue;
+ }
+ pud = p4d_pgtable(*p4d);
+ ptdesc_pud_pts_dec(virt_to_ptdesc(pud));
+
+ p4d_clear(p4d);
+ spin_unlock(ptl);
+ tlb_flush_p4d_range(tlb, addr, sz);
+ }
+
+ tlb_end_vma(tlb, vma);
+}
+
static const struct vm_operations_struct msharefs_vm_ops = {
.may_split = mshare_vm_op_split,
.mprotect = mshare_vm_op_mprotect,
+ .unmap_page_range = mshare_vm_op_unshare_page_range,
};
/*
--
2.47.1
next prev parent reply other threads:[~2025-08-20 1:05 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-20 1:03 [PATCH v3 00/22] Add support for shared PTEs across processes Anthony Yznaga
2025-08-20 1:03 ` [PATCH v3 01/22] mm: Add msharefs filesystem Anthony Yznaga
2025-09-08 18:29 ` Liam R. Howlett
2025-09-08 19:09 ` Anthony Yznaga
2025-09-10 12:14 ` Pedro Falcato
2025-09-10 12:46 ` David Hildenbrand
2025-08-20 1:03 ` [PATCH v3 02/22] mm/mshare: pre-populate msharefs with information file Anthony Yznaga
2025-08-20 1:03 ` [PATCH v3 03/22] mm/mshare: make msharefs writable and support directories Anthony Yznaga
2025-08-20 1:03 ` [PATCH v3 04/22] mm/mshare: allocate an mm_struct for msharefs files Anthony Yznaga
2025-08-20 1:03 ` [PATCH v3 05/22] mm/mshare: add ways to set the size of an mshare region Anthony Yznaga
2025-08-20 1:03 ` [PATCH v3 06/22] mm/mshare: Add a vma flag to indicate " Anthony Yznaga
2025-09-08 18:45 ` David Hildenbrand
2025-09-08 18:56 ` Anthony Yznaga
2025-09-08 19:02 ` David Hildenbrand
2025-09-08 19:03 ` Anthony Yznaga
2025-08-20 1:04 ` [PATCH v3 07/22] mm/mshare: Add mmap support Anthony Yznaga
2025-08-20 19:02 ` kernel test robot
2025-08-20 1:04 ` [PATCH v3 08/22] mm/mshare: flush all TLBs when updating PTEs in an mshare range Anthony Yznaga
2025-08-20 1:04 ` [PATCH v3 09/22] sched/numa: do not scan msharefs vmas Anthony Yznaga
2025-08-20 1:04 ` [PATCH v3 10/22] mm: add mmap_read_lock_killable_nested() Anthony Yznaga
2025-08-20 1:04 ` [PATCH v3 11/22] mm: add and use unmap_page_range vm_ops hook Anthony Yznaga
2025-08-21 15:40 ` kernel test robot
2025-08-20 1:04 ` [PATCH v3 12/22] mm: introduce PUD page table shared count Anthony Yznaga
2025-08-20 1:04 ` Anthony Yznaga [this message]
2025-09-15 15:27 ` [PATCH v3 13/22] mm/mshare: prepare for page table sharing support Lorenzo Stoakes
2025-08-20 1:04 ` [PATCH v3 14/22] x86/mm: enable page table sharing Anthony Yznaga
2025-08-20 1:04 ` [PATCH v3 15/22] mm: create __do_mmap() to take an mm_struct * arg Anthony Yznaga
2025-08-20 1:04 ` [PATCH v3 16/22] mm: pass the mm in vma_munmap_struct Anthony Yznaga
2025-08-20 1:04 ` [PATCH v3 17/22] sched/mshare: mshare ownership Anthony Yznaga
2025-08-20 1:04 ` [PATCH v3 18/22] mm/mshare: Add an ioctl for mapping objects in an mshare region Anthony Yznaga
2025-08-20 20:36 ` kernel test robot
2025-08-20 1:04 ` [PATCH v3 19/22] mm/mshare: Add an ioctl for unmapping " Anthony Yznaga
2025-08-20 1:04 ` [PATCH v3 20/22] mm/mshare: support mapping files and anon hugetlb " Anthony Yznaga
2025-08-20 1:04 ` [PATCH v3 21/22] mm/mshare: provide a way to identify an mm as an mshare host mm Anthony Yznaga
2025-08-20 1:04 ` [PATCH v3 22/22] mm/mshare: charge fault handling allocations to the mshare owner Anthony Yznaga
2025-09-08 18:50 ` David Hildenbrand
2025-09-08 19:21 ` Anthony Yznaga
2025-09-08 20:28 ` David Hildenbrand
2025-09-08 20:55 ` Anthony Yznaga
2025-09-08 20:32 ` [PATCH v3 00/22] Add support for shared PTEs across processes David Hildenbrand
2025-09-08 20:59 ` Matthew Wilcox
2025-09-08 21:14 ` Anthony Yznaga
2025-09-09 7:53 ` David Hildenbrand
2025-09-09 18:29 ` Anthony Yznaga
2025-09-09 19:06 ` Lorenzo Stoakes
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250820010415.699353-14-anthony.yznaga@oracle.com \
--to=anthony.yznaga@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=andreyknvl@gmail.com \
--cc=arnd@arndb.de \
--cc=bp@alien8.de \
--cc=brauner@kernel.org \
--cc=bsegall@google.com \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=david@redhat.com \
--cc=dietmar.eggemann@arm.com \
--cc=ebiederm@xmission.com \
--cc=hpa@zytor.com \
--cc=jakub.wartak@mailbox.org \
--cc=jannh@google.com \
--cc=juri.lelli@redhat.com \
--cc=khalid@kernel.org \
--cc=liam.howlett@oracle.com \
--cc=linux-arch@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linyongting@bytedance.com \
--cc=lorenzo.stoakes@oracle.com \
--cc=luto@kernel.org \
--cc=markhemm@googlemail.com \
--cc=maz@kernel.org \
--cc=mgorman@suse.de \
--cc=mhiramat@kernel.org \
--cc=mhocko@suse.com \
--cc=mingo@redhat.com \
--cc=muchun.song@linux.dev \
--cc=neilb@suse.de \
--cc=osalvador@suse.de \
--cc=pcc@google.com \
--cc=peterz@infradead.org \
--cc=pfalcato@suse.de \
--cc=rostedt@goodmis.org \
--cc=rppt@kernel.org \
--cc=shakeel.butt@linux.dev \
--cc=surenb@google.com \
--cc=tglx@linutronix.de \
--cc=vasily.averin@linux.dev \
--cc=vbabka@suse.cz \
--cc=vincent.guittot@linaro.org \
--cc=viro@zeniv.linux.org.uk \
--cc=vschneid@redhat.com \
--cc=willy@infradead.org \
--cc=x86@kernel.org \
--cc=xhao@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox