From: Mike Rapoport <rppt@kernel.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>,
Axel Rasmussen <axelrasmussen@google.com>,
Baolin Wang <baolin.wang@linux.alibaba.com>,
David Hildenbrand <david@kernel.org>,
Hugh Dickins <hughd@google.com>,
James Houghton <jthoughton@google.com>,
"Liam R. Howlett" <Liam.Howlett@oracle.com>,
Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,
"Matthew Wilcox (Oracle)" <willy@infradead.org>,
Michal Hocko <mhocko@suse.com>, Mike Rapoport <rppt@kernel.org>,
Muchun Song <muchun.song@linux.dev>,
Nikita Kalyazin <kalyazin@amazon.com>,
Oscar Salvador <osalvador@suse.de>,
Paolo Bonzini <pbonzini@redhat.com>, Peter Xu <peterx@redhat.com>,
Sean Christopherson <seanjc@google.com>,
Shuah Khan <shuah@kernel.org>,
Suren Baghdasaryan <surenb@google.com>,
Vlastimil Babka <vbabka@suse.cz>,
kvm@vger.kernel.org, linux-fsdevel@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-kselftest@vger.kernel.org,
linux-mm@kvack.org
Subject: [PATCH v2 13/15] KVM: guest_memfd: implement userfaultfd operations
Date: Fri, 6 Mar 2026 19:18:13 +0200 [thread overview]
Message-ID: <20260306171815.3160826-14-rppt@kernel.org> (raw)
In-Reply-To: <20260306171815.3160826-1-rppt@kernel.org>
From: Nikita Kalyazin <kalyazin@amazon.com>
userfaultfd notifications about page faults used for live migration
and snapshotting of VMs.
MISSING mode allows post-copy live migration and MINOR mode allows
optimization for post-copy live migration for VMs backed with shared
hugetlbfs or tmpfs mappings as described in detail in commit
7677f7fd8be7 ("userfaultfd: add minor fault registration mode").
To use the same mechanisms for VMs that use guest_memfd to map their
memory, guest_memfd should support userfaultfd operations.
Add implementation of vm_uffd_ops to guest_memfd.
Signed-off-by: Nikita Kalyazin <kalyazin@amazon.com>
Co-developed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
---
mm/filemap.c | 1 +
virt/kvm/guest_memfd.c | 84 +++++++++++++++++++++++++++++++++++++++++-
2 files changed, 83 insertions(+), 2 deletions(-)
diff --git a/mm/filemap.c b/mm/filemap.c
index 6cd7974d4ada..19dfcebcd23f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -262,6 +262,7 @@ void filemap_remove_folio(struct folio *folio)
filemap_free_folio(mapping, folio);
}
+EXPORT_SYMBOL_FOR_MODULES(filemap_remove_folio, "kvm");
/*
* page_cache_delete_batch - delete several folios from page cache
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 017d84a7adf3..46582feeed75 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -7,6 +7,7 @@
#include <linux/mempolicy.h>
#include <linux/pseudo_fs.h>
#include <linux/pagemap.h>
+#include <linux/userfaultfd_k.h>
#include "kvm_mm.h"
@@ -107,6 +108,12 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
return __kvm_gmem_prepare_folio(kvm, slot, index, folio);
}
+static struct folio *kvm_gmem_get_folio_noalloc(struct inode *inode, pgoff_t pgoff)
+{
+ return __filemap_get_folio(inode->i_mapping, pgoff,
+ FGP_LOCK | FGP_ACCESSED, 0);
+}
+
/*
* Returns a locked folio on success. The caller is responsible for
* setting the up-to-date flag before the memory is mapped into the guest.
@@ -126,8 +133,7 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
* Fast-path: See if folio is already present in mapping to avoid
* policy_lookup.
*/
- folio = __filemap_get_folio(inode->i_mapping, index,
- FGP_LOCK | FGP_ACCESSED, 0);
+ folio = kvm_gmem_get_folio_noalloc(inode, index);
if (!IS_ERR(folio))
return folio;
@@ -457,12 +463,86 @@ static struct mempolicy *kvm_gmem_get_policy(struct vm_area_struct *vma,
}
#endif /* CONFIG_NUMA */
+#ifdef CONFIG_USERFAULTFD
+static bool kvm_gmem_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags)
+{
+ struct inode *inode = file_inode(vma->vm_file);
+
+ /*
+ * Only support userfaultfd for guest_memfd with INIT_SHARED flag.
+ * This ensures the memory can be mapped to userspace.
+ */
+ if (!(GMEM_I(inode)->flags & GUEST_MEMFD_FLAG_INIT_SHARED))
+ return false;
+
+ return true;
+}
+
+static struct folio *kvm_gmem_folio_alloc(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct inode *inode = file_inode(vma->vm_file);
+ pgoff_t pgoff = linear_page_index(vma, addr);
+ struct mempolicy *mpol;
+ struct folio *folio;
+ gfp_t gfp;
+
+ if (unlikely(pgoff >= (i_size_read(inode) >> PAGE_SHIFT)))
+ return NULL;
+
+ gfp = mapping_gfp_mask(inode->i_mapping);
+ mpol = mpol_shared_policy_lookup(&GMEM_I(inode)->policy, pgoff);
+ mpol = mpol ?: get_task_policy(current);
+ folio = filemap_alloc_folio(gfp, 0, mpol);
+ mpol_cond_put(mpol);
+
+ return folio;
+}
+
+static int kvm_gmem_filemap_add(struct folio *folio,
+ struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct inode *inode = file_inode(vma->vm_file);
+ struct address_space *mapping = inode->i_mapping;
+ pgoff_t pgoff = linear_page_index(vma, addr);
+ int err;
+
+ __folio_set_locked(folio);
+ err = filemap_add_folio(mapping, folio, pgoff, GFP_KERNEL);
+ if (err) {
+ folio_unlock(folio);
+ return err;
+ }
+
+ return 0;
+}
+
+static void kvm_gmem_filemap_remove(struct folio *folio,
+ struct vm_area_struct *vma)
+{
+ filemap_remove_folio(folio);
+ folio_unlock(folio);
+}
+
+static const struct vm_uffd_ops kvm_gmem_uffd_ops = {
+ .can_userfault = kvm_gmem_can_userfault,
+ .get_folio_noalloc = kvm_gmem_get_folio_noalloc,
+ .alloc_folio = kvm_gmem_folio_alloc,
+ .filemap_add = kvm_gmem_filemap_add,
+ .filemap_remove = kvm_gmem_filemap_remove,
+};
+#endif /* CONFIG_USERFAULTFD */
+
static const struct vm_operations_struct kvm_gmem_vm_ops = {
.fault = kvm_gmem_fault_user_mapping,
#ifdef CONFIG_NUMA
.get_policy = kvm_gmem_get_policy,
.set_policy = kvm_gmem_set_policy,
#endif
+#ifdef CONFIG_USERFAULTFD
+ .uffd_ops = &kvm_gmem_uffd_ops,
+#endif
};
static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma)
--
2.51.0
next prev parent reply other threads:[~2026-03-06 17:19 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-06 17:18 [PATCH v2 00/15] mm, kvm: allow uffd support in guest_memfd Mike Rapoport
2026-03-06 17:18 ` [PATCH v2 01/15] userfaultfd: introduce mfill_copy_folio_locked() helper Mike Rapoport
2026-03-06 17:18 ` [PATCH v2 02/15] userfaultfd: introduce struct mfill_state Mike Rapoport
2026-03-06 17:18 ` [PATCH v2 03/15] userfaultfd: introduce mfill_get_pmd() helper Mike Rapoport
2026-03-06 17:18 ` [PATCH v2 04/15] userfaultfd: introduce mfill_get_vma() and mfill_put_vma() Mike Rapoport
2026-03-06 17:18 ` [PATCH v2 05/15] userfaultfd: retry copying with locks dropped in mfill_atomic_pte_copy() Mike Rapoport
2026-03-06 17:18 ` [PATCH v2 06/15] userfaultfd: move vma_can_userfault out of line Mike Rapoport
2026-03-06 17:18 ` [PATCH v2 07/15] userfaultfd: introduce vm_uffd_ops Mike Rapoport
2026-03-06 17:18 ` [PATCH v2 08/15] shmem, userfaultfd: use a VMA callback to handle UFFDIO_CONTINUE Mike Rapoport
2026-03-06 17:18 ` [PATCH v2 09/15] userfaultfd: introduce vm_uffd_ops->alloc_folio() Mike Rapoport
2026-03-06 17:18 ` [PATCH v2 10/15] shmem, userfaultfd: implement shmem uffd operations using vm_uffd_ops Mike Rapoport
2026-03-06 17:18 ` [PATCH v2 11/15] userfaultfd: mfill_atomic(): remove retry logic Mike Rapoport
2026-03-06 17:18 ` [PATCH v2 12/15] mm: generalize handling of userfaults in __do_fault() Mike Rapoport
2026-03-06 17:18 ` Mike Rapoport [this message]
2026-03-06 17:18 ` [PATCH v2 14/15] KVM: selftests: test userfaultfd minor for guest_memfd Mike Rapoport
2026-03-06 17:18 ` [PATCH v2 15/15] KVM: selftests: test userfaultfd missing " Mike Rapoport
2026-03-06 22:21 ` [PATCH v2 00/15] mm, kvm: allow uffd support in guest_memfd Andrew Morton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260306171815.3160826-14-rppt@kernel.org \
--to=rppt@kernel.org \
--cc=Liam.Howlett@oracle.com \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=axelrasmussen@google.com \
--cc=baolin.wang@linux.alibaba.com \
--cc=david@kernel.org \
--cc=hughd@google.com \
--cc=jthoughton@google.com \
--cc=kalyazin@amazon.com \
--cc=kvm@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mhocko@suse.com \
--cc=muchun.song@linux.dev \
--cc=osalvador@suse.de \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=seanjc@google.com \
--cc=shuah@kernel.org \
--cc=surenb@google.com \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox