From: Anthony Yznaga <anthony.yznaga@oracle.com>
To: akpm@linux-foundation.org, willy@infradead.org,
markhemm@googlemail.com, viro@zeniv.linux.org.uk,
david@redhat.com, khalid@kernel.org
Cc: anthony.yznaga@oracle.com, jthoughton@google.com, corbet@lwn.net,
dave.hansen@intel.com, kirill@shutemov.name, luto@kernel.org,
brauner@kernel.org, arnd@arndb.de, ebiederm@xmission.com,
catalin.marinas@arm.com, mingo@redhat.com, peterz@infradead.org,
liam.howlett@oracle.com, lorenzo.stoakes@oracle.com,
vbabka@suse.cz, jannh@google.com, hannes@cmpxchg.org,
mhocko@kernel.org, roman.gushchin@linux.dev,
shakeel.butt@linux.dev, muchun.song@linux.dev,
tglx@linutronix.de, cgroups@vger.kernel.org, x86@kernel.org,
linux-doc@vger.kernel.org, linux-arch@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
mhiramat@kernel.org, rostedt@goodmis.org,
vasily.averin@linux.dev, xhao@linux.alibaba.com, pcc@google.com,
neilb@suse.de, maz@kernel.org
Subject: [PATCH 13/20] x86/mm: enable page table sharing
Date: Fri, 24 Jan 2025 15:54:47 -0800 [thread overview]
Message-ID: <20250124235454.84587-14-anthony.yznaga@oracle.com> (raw)
In-Reply-To: <20250124235454.84587-1-anthony.yznaga@oracle.com>
Enable x86 support for handling page faults in an mshare region by
redirecting page faults to operate on the mshare mm_struct and vmas
contained in it.
Some permissions checks are done using vma flags in architecture-specfic
fault handling code so the actual vma needed to complete the handling
is acquired before calling handle_mm_fault(). Because of this an
ARCH_SUPPORTS_MSHARE config option is added.
Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com>
---
arch/Kconfig | 3 +++
arch/x86/Kconfig | 1 +
arch/x86/mm/fault.c | 37 ++++++++++++++++++++++++++++++++++++-
mm/Kconfig | 2 +-
4 files changed, 41 insertions(+), 2 deletions(-)
diff --git a/arch/Kconfig b/arch/Kconfig
index 6682b2a53e34..32474cdcb882 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -1640,6 +1640,9 @@ config HAVE_ARCH_PFN_VALID
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
bool
+config ARCH_SUPPORTS_MSHARE
+ bool
+
config ARCH_SUPPORTS_PAGE_TABLE_CHECK
bool
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 2e1a3e4386de..453a39098dfa 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -120,6 +120,7 @@ config X86
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ select ARCH_SUPPORTS_MSHARE if X86_64
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if X86_64
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index e6c469b323cc..4b55ade61a01 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1217,6 +1217,8 @@ void do_user_addr_fault(struct pt_regs *regs,
struct mm_struct *mm;
vm_fault_t fault;
unsigned int flags = FAULT_FLAG_DEFAULT;
+ bool is_shared_vma;
+ unsigned long addr;
tsk = current;
mm = tsk->mm;
@@ -1330,6 +1332,12 @@ void do_user_addr_fault(struct pt_regs *regs,
if (!vma)
goto lock_mmap;
+ /* mshare does not support per-VMA locks yet */
+ if (vma_is_mshare(vma)) {
+ vma_end_read(vma);
+ goto lock_mmap;
+ }
+
if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, error_code, address, NULL, vma);
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
@@ -1358,17 +1366,38 @@ void do_user_addr_fault(struct pt_regs *regs,
lock_mmap:
retry:
+ addr = address;
+ is_shared_vma = false;
vma = lock_mm_and_find_vma(mm, address, regs);
if (unlikely(!vma)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
+ if (unlikely(vma_is_mshare(vma))) {
+ fault = find_shared_vma(&vma, &addr);
+
+ if (fault) {
+ mmap_read_unlock(mm);
+ goto done;
+ }
+
+ if (!vma) {
+ mmap_read_unlock(mm);
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
+ }
+
+ is_shared_vma = true;
+ }
+
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
if (unlikely(access_error(error_code, vma))) {
+ if (unlikely(is_shared_vma))
+ mmap_read_unlock(vma->vm_mm);
bad_area_access_error(regs, error_code, address, mm, vma);
return;
}
@@ -1386,7 +1415,11 @@ void do_user_addr_fault(struct pt_regs *regs,
* userland). The return to userland is identified whenever
* FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
*/
- fault = handle_mm_fault(vma, address, flags, regs);
+ fault = handle_mm_fault(vma, addr, flags, regs);
+
+ if (unlikely(is_shared_vma) && ((fault & VM_FAULT_COMPLETED) ||
+ (fault & VM_FAULT_RETRY) || fault_signal_pending(fault, regs)))
+ mmap_read_unlock(mm);
if (fault_signal_pending(fault, regs)) {
/*
@@ -1414,6 +1447,8 @@ void do_user_addr_fault(struct pt_regs *regs,
goto retry;
}
+ if (unlikely(is_shared_vma))
+ mmap_read_unlock(vma->vm_mm);
mmap_read_unlock(mm);
done:
if (likely(!(fault & VM_FAULT_ERROR)))
diff --git a/mm/Kconfig b/mm/Kconfig
index ba3dbe31f86a..4fc056bb5643 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1360,7 +1360,7 @@ config PT_RECLAIM
config MSHARE
bool "Mshare"
- depends on MMU
+ depends on MMU && ARCH_SUPPORTS_MSHARE
help
Enable msharefs: A ram-based filesystem that allows multiple
processes to share page table entries for shared pages. A file
--
2.43.5
next prev parent reply other threads:[~2025-01-24 23:56 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-24 23:54 [PATCH 00/20] Add support for shared PTEs across processes Anthony Yznaga
2025-01-24 23:54 ` [PATCH 01/20] mm: Add msharefs filesystem Anthony Yznaga
2025-01-25 3:13 ` Randy Dunlap
2025-01-25 20:05 ` Anthony Yznaga
2025-01-25 21:10 ` Matthew Wilcox
2025-01-27 17:01 ` Anthony Yznaga
2025-02-04 1:52 ` Bagas Sanjaya
2025-02-04 16:41 ` Anthony Yznaga
2025-01-24 23:54 ` [PATCH 02/20] mm/mshare: pre-populate msharefs with information file Anthony Yznaga
2025-01-24 23:54 ` [PATCH 03/20] mm/mshare: make msharefs writable and support directories Anthony Yznaga
2025-01-24 23:54 ` [PATCH 04/20] mm/mshare: allocate an mm_struct for msharefs files Anthony Yznaga
2025-01-24 23:54 ` [PATCH 05/20] mm/mshare: Add ioctl support Anthony Yznaga
2025-01-24 23:54 ` [PATCH 06/20] mm/mshare: Add a vma flag to indicate an mshare region Anthony Yznaga
2025-01-24 23:54 ` [PATCH 07/20] mm/mshare: Add mmap support Anthony Yznaga
2025-01-24 23:54 ` [PATCH 08/20] mm/mshare: flush all TLBs when updating PTEs in an mshare range Anthony Yznaga
2025-01-24 23:54 ` [PATCH 09/20] sched/numa: do not scan msharefs vmas Anthony Yznaga
2025-01-24 23:54 ` [PATCH 10/20] mm: add mmap_read_lock_killable_nested() Anthony Yznaga
2025-01-24 23:54 ` [PATCH 11/20] mm: add and use unmap_page_range vm_ops hook Anthony Yznaga
2025-01-24 23:54 ` [PATCH 12/20] mm/mshare: prepare for page table sharing support Anthony Yznaga
2025-01-24 23:54 ` Anthony Yznaga [this message]
2025-01-24 23:54 ` [PATCH 14/20] mm: create __do_mmap() to take an mm_struct * arg Anthony Yznaga
2025-01-24 23:54 ` [PATCH 15/20] mm: pass the mm in vma_munmap_struct Anthony Yznaga
2025-01-24 23:54 ` [PATCH 16/20] mshare: add MSHAREFS_CREATE_MAPPING Anthony Yznaga
2025-01-24 23:54 ` [PATCH 17/20] mshare: add MSHAREFS_UNMAP Anthony Yznaga
2025-01-24 23:54 ` [PATCH 18/20] mm/mshare: provide a way to identify an mm as an mshare host mm Anthony Yznaga
2025-01-24 23:54 ` [PATCH 19/20] mm/mshare: get memcg from current->mm instead of mshare mm Anthony Yznaga
2025-01-24 23:54 ` [PATCH 20/20] mm/mshare: associate a mem cgroup with an mshare file Anthony Yznaga
2025-01-27 22:33 ` [PATCH 00/20] Add support for shared PTEs across processes Andrew Morton
2025-01-27 23:59 ` Anthony Yznaga
2025-01-28 9:21 ` David Hildenbrand
2025-01-28 7:11 ` Bagas Sanjaya
2025-01-28 19:53 ` Anthony Yznaga
2025-01-28 9:36 ` David Hildenbrand
2025-01-28 19:40 ` Anthony Yznaga
2025-01-29 0:11 ` Andrew Morton
2025-01-29 0:25 ` Anthony Yznaga
2025-01-29 0:59 ` Matthew Wilcox
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250124235454.84587-14-anthony.yznaga@oracle.com \
--to=anthony.yznaga@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=arnd@arndb.de \
--cc=brauner@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=cgroups@vger.kernel.org \
--cc=corbet@lwn.net \
--cc=dave.hansen@intel.com \
--cc=david@redhat.com \
--cc=ebiederm@xmission.com \
--cc=hannes@cmpxchg.org \
--cc=jannh@google.com \
--cc=jthoughton@google.com \
--cc=khalid@kernel.org \
--cc=kirill@shutemov.name \
--cc=liam.howlett@oracle.com \
--cc=linux-arch@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=luto@kernel.org \
--cc=markhemm@googlemail.com \
--cc=maz@kernel.org \
--cc=mhiramat@kernel.org \
--cc=mhocko@kernel.org \
--cc=mingo@redhat.com \
--cc=muchun.song@linux.dev \
--cc=neilb@suse.de \
--cc=pcc@google.com \
--cc=peterz@infradead.org \
--cc=roman.gushchin@linux.dev \
--cc=rostedt@goodmis.org \
--cc=shakeel.butt@linux.dev \
--cc=tglx@linutronix.de \
--cc=vasily.averin@linux.dev \
--cc=vbabka@suse.cz \
--cc=viro@zeniv.linux.org.uk \
--cc=willy@infradead.org \
--cc=x86@kernel.org \
--cc=xhao@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox