From: Shivank Garg <shivankg@amd.com>
To: <akpm@linux-foundation.org>, <willy@infradead.org>,
<pbonzini@redhat.com>
Cc: <linux-fsdevel@vger.kernel.org>, <linux-mm@kvack.org>,
<linux-kernel@vger.kernel.org>, <kvm@vger.kernel.org>,
<linux-coco@lists.linux.dev>, <chao.gao@intel.com>,
<seanjc@google.com>, <ackerleytng@google.com>, <david@redhat.com>,
<vbabka@suse.cz>, <bharata@amd.com>, <nikunj@amd.com>,
<michael.day@amd.com>, <Neeraj.Upadhyay@amd.com>,
<thomas.lendacky@amd.com>, <michael.roth@amd.com>,
<shivankg@amd.com>, <tabba@google.com>
Subject: [PATCH v6 4/5] KVM: guest_memfd: Enforce NUMA mempolicy using shared policy
Date: Wed, 26 Feb 2025 08:25:48 +0000 [thread overview]
Message-ID: <20250226082549.6034-5-shivankg@amd.com> (raw)
In-Reply-To: <20250226082549.6034-1-shivankg@amd.com>
Previously, guest-memfd allocations followed local NUMA node id in absence
of process mempolicy, resulting in arbitrary memory allocation.
Moreover, mbind() couldn't be used since memory wasn't mapped to userspace
in the VMM.
Enable NUMA policy support by implementing vm_ops for guest-memfd mmap
operation. This allows the VMM to map the memory and use mbind() to set
the desired NUMA policy. The policy is then retrieved via
mpol_shared_policy_lookup() and passed to filemap_grab_folio_mpol() to
ensure that allocations follow the specified memory policy.
This enables the VMM to control guest memory NUMA placement by calling
mbind() on the mapped memory regions, providing fine-grained control over
guest memory allocation across NUMA nodes.
The policy change only affect future allocations and does not migrate
existing memory. This matches mbind(2)'s default behavior which affects
only new allocations unless overridden with MPOL_MF_MOVE/MPOL_MF_MOVE_ALL
flags, which are not supported for guest_memfd as it is unmovable.
Suggested-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Shivank Garg <shivankg@amd.com>
---
virt/kvm/guest_memfd.c | 76 +++++++++++++++++++++++++++++++++++++++++-
1 file changed, 75 insertions(+), 1 deletion(-)
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index f18176976ae3..b3a8819117a0 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -2,6 +2,7 @@
#include <linux/backing-dev.h>
#include <linux/falloc.h>
#include <linux/kvm_host.h>
+#include <linux/mempolicy.h>
#include <linux/pagemap.h>
#include <linux/anon_inodes.h>
@@ -11,8 +12,12 @@ struct kvm_gmem {
struct kvm *kvm;
struct xarray bindings;
struct list_head entry;
+ struct shared_policy policy;
};
+static struct mempolicy *kvm_gmem_get_pgoff_policy(struct kvm_gmem *gmem,
+ pgoff_t index);
+
/**
* folio_file_pfn - like folio_file_page, but return a pfn.
* @folio: The folio which contains this index.
@@ -99,7 +104,25 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
static struct folio *kvm_gmem_get_folio(struct file *file, pgoff_t index)
{
/* TODO: Support huge pages. */
- return filemap_grab_folio(file_inode(file)->i_mapping, index);
+ struct kvm_gmem *gmem = file->private_data;
+ struct inode *inode = file_inode(file);
+ struct mempolicy *policy;
+ struct folio *folio;
+
+ /*
+ * Fast-path: See if folio is already present in mapping to avoid
+ * policy_lookup.
+ */
+ folio = __filemap_get_folio(inode->i_mapping, index,
+ FGP_LOCK | FGP_ACCESSED, 0);
+ if (!IS_ERR(folio))
+ return folio;
+
+ policy = kvm_gmem_get_pgoff_policy(gmem, index);
+ folio = filemap_grab_folio_mpol(inode->i_mapping, index, policy);
+ mpol_cond_put(policy);
+
+ return folio;
}
static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
@@ -291,6 +314,7 @@ static int kvm_gmem_release(struct inode *inode, struct file *file)
mutex_unlock(&kvm->slots_lock);
xa_destroy(&gmem->bindings);
+ mpol_free_shared_policy(&gmem->policy);
kfree(gmem);
kvm_put_kvm(kvm);
@@ -312,8 +336,57 @@ static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn)
{
return gfn - slot->base_gfn + slot->gmem.pgoff;
}
+#ifdef CONFIG_NUMA
+static int kvm_gmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
+{
+ struct file *file = vma->vm_file;
+ struct kvm_gmem *gmem = file->private_data;
+
+ return mpol_set_shared_policy(&gmem->policy, vma, new);
+}
+
+static struct mempolicy *kvm_gmem_get_policy(struct vm_area_struct *vma,
+ unsigned long addr, pgoff_t *pgoff)
+{
+ struct file *file = vma->vm_file;
+ struct kvm_gmem *gmem = file->private_data;
+
+ *pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
+ return mpol_shared_policy_lookup(&gmem->policy, *pgoff);
+}
+
+static struct mempolicy *kvm_gmem_get_pgoff_policy(struct kvm_gmem *gmem,
+ pgoff_t index)
+{
+ struct mempolicy *mpol;
+
+ mpol = mpol_shared_policy_lookup(&gmem->policy, index);
+ return mpol ? mpol : get_task_policy(current);
+}
+#else
+static struct mempolicy *kvm_gmem_get_pgoff_policy(struct kvm_gmem *gmem,
+ pgoff_t index)
+{
+ return NULL;
+}
+#endif /* CONFIG_NUMA */
+
+static const struct vm_operations_struct kvm_gmem_vm_ops = {
+#ifdef CONFIG_NUMA
+ .get_policy = kvm_gmem_get_policy,
+ .set_policy = kvm_gmem_set_policy,
+#endif
+};
+
+static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ file_accessed(file);
+ vma->vm_ops = &kvm_gmem_vm_ops;
+ return 0;
+}
static struct file_operations kvm_gmem_fops = {
+ .mmap = kvm_gmem_mmap,
.open = generic_file_open,
.release = kvm_gmem_release,
.fallocate = kvm_gmem_fallocate,
@@ -446,6 +519,7 @@ static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
kvm_get_kvm(kvm);
gmem->kvm = kvm;
xa_init(&gmem->bindings);
+ mpol_shared_policy_init(&gmem->policy, NULL);
list_add(&gmem->entry, &inode->i_mapping->i_private_list);
fd_install(fd, file);
--
2.34.1
next prev parent reply other threads:[~2025-02-26 8:28 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-26 8:25 [PATCH v6 0/5] Add NUMA mempolicy support for KVM guest-memfd Shivank Garg
2025-02-26 8:25 ` [PATCH v6 1/5] mm/filemap: add mempolicy support to the filemap layer Shivank Garg
2025-02-28 14:17 ` Vlastimil Babka
2025-02-28 17:51 ` Ackerley Tng
2025-02-26 8:25 ` [PATCH v6 2/5] mm/mempolicy: export memory policy symbols Shivank Garg
2025-02-26 13:59 ` Vlastimil Babka
2025-02-26 8:25 ` [PATCH v6 3/5] KVM: guest_memfd: Pass file pointer instead of inode pointer Shivank Garg
2025-02-26 8:25 ` Shivank Garg [this message]
2025-02-28 17:25 ` [PATCH v6 4/5] KVM: guest_memfd: Enforce NUMA mempolicy using shared policy Ackerley Tng
2025-03-03 8:58 ` Vlastimil Babka
2025-03-04 0:19 ` Ackerley Tng
2025-03-04 15:30 ` Sean Christopherson
2025-03-04 15:51 ` David Hildenbrand
2025-03-04 16:59 ` Sean Christopherson
2025-02-26 8:25 ` [PATCH v6 5/5] KVM: guest_memfd: selftests: add tests for mmap and NUMA policy support Shivank Garg
2025-03-09 1:09 ` [PATCH v6 0/5] Add NUMA mempolicy support for KVM guest-memfd Vishal Annapurve
2025-03-09 18:52 ` Vishal Annapurve
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250226082549.6034-5-shivankg@amd.com \
--to=shivankg@amd.com \
--cc=Neeraj.Upadhyay@amd.com \
--cc=ackerleytng@google.com \
--cc=akpm@linux-foundation.org \
--cc=bharata@amd.com \
--cc=chao.gao@intel.com \
--cc=david@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=linux-coco@lists.linux.dev \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=michael.day@amd.com \
--cc=michael.roth@amd.com \
--cc=nikunj@amd.com \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=tabba@google.com \
--cc=thomas.lendacky@amd.com \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox