From: Shivank Garg <shivankg@amd.com>
To: <x86@kernel.org>, <viro@zeniv.linux.org.uk>, <brauner@kernel.org>,
<jack@suse.cz>, <akpm@linux-foundation.org>,
<linux-kernel@vger.kernel.org>, <linux-fsdevel@vger.kernel.org>,
<linux-mm@kvack.org>, <linux-api@vger.kernel.org>,
<linux-arch@vger.kernel.org>, <kvm@vger.kernel.org>
Cc: <chao.gao@intel.com>, <pgonda@google.com>,
<thomas.lendacky@amd.com>, <seanjc@google.com>, <luto@kernel.org>,
<tglx@linutronix.de>, <mingo@redhat.com>, <bp@alien8.de>,
<dave.hansen@linux.intel.com>, <willy@infradead.org>,
<arnd@arndb.de>, <pbonzini@redhat.com>, <kees@kernel.org>,
<shivankg@amd.com>, <bharata@amd.com>, <nikunj@amd.com>,
<michael.day@amd.com>, <Neeraj.Upadhyay@amd.com>
Subject: [RFC PATCH 4/4] KVM: guest_memfd: Enforce NUMA mempolicy if available
Date: Tue, 5 Nov 2024 16:55:15 +0000 [thread overview]
Message-ID: <20241105165515.154941-3-shivankg@amd.com> (raw)
In-Reply-To: <20241105165515.154941-1-shivankg@amd.com>
Enforce memory policy on guest-memfd to provide proper NUMA support.
Previously, guest-memfd allocations were following local NUMA node id
in absence of process mempolicy, resulting in random memory allocation.
Moreover, it cannot use mbind() since memory isn't mapped to userspace.
To support NUMA policies, call fbind() syscall from VMM to store
mempolicy as f_policy in struct kvm_gmem of guest_memfd. The f_policy
is retrieved to pass in filemap_grab_folio_mpol() to ensure that
allocations follow the specified memory policy.
Signed-off-by: Shivank Garg <shivankg@amd.com>
---
mm/mempolicy.c | 2 ++
virt/kvm/guest_memfd.c | 49 ++++++++++++++++++++++++++++++++++++++----
2 files changed, 47 insertions(+), 4 deletions(-)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 3a697080ecad..af2e1ef4dae7 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -347,6 +347,7 @@ void __mpol_put(struct mempolicy *pol)
return;
kmem_cache_free(policy_cache, pol);
}
+EXPORT_SYMBOL(__mpol_put);
static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
{
@@ -2599,6 +2600,7 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
atomic_set(&new->refcnt, 1);
return new;
}
+EXPORT_SYMBOL(__mpol_dup);
/* Slow path of a mempolicy comparison */
bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 2c6fcf7c3ec9..0237bda4382c 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -4,6 +4,7 @@
#include <linux/kvm_host.h>
#include <linux/pagemap.h>
#include <linux/anon_inodes.h>
+#include <linux/mempolicy.h>
#include "kvm_mm.h"
@@ -11,6 +12,7 @@ struct kvm_gmem {
struct kvm *kvm;
struct xarray bindings;
struct list_head entry;
+ struct mempolicy *f_policy;
};
/**
@@ -87,7 +89,8 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
}
static struct folio *kvm_gmem_get_huge_folio(struct inode *inode, pgoff_t index,
- unsigned int order)
+ unsigned int order,
+ struct mempolicy *policy)
{
pgoff_t npages = 1UL << order;
pgoff_t huge_index = round_down(index, npages);
@@ -104,7 +107,7 @@ static struct folio *kvm_gmem_get_huge_folio(struct inode *inode, pgoff_t index,
(loff_t)(huge_index + npages - 1) << PAGE_SHIFT))
return NULL;
- folio = filemap_alloc_folio(gfp, order);
+ folio = filemap_alloc_folio_mpol(gfp, order, policy);
if (!folio)
return NULL;
@@ -129,12 +132,26 @@ static struct folio *__kvm_gmem_get_folio(struct file *file, pgoff_t index,
bool allow_huge)
{
struct folio *folio = NULL;
+ struct kvm_gmem *gmem = file->private_data;
+ struct mempolicy *policy = NULL;
+
+ /*
+ * RCU lock is required to prevent any race condition with set_policy().
+ */
+ if (IS_ENABLED(CONFIG_NUMA)) {
+ rcu_read_lock();
+ policy = READ_ONCE(gmem->f_policy);
+ mpol_get(policy);
+ rcu_read_unlock();
+ }
if (gmem_2m_enabled && allow_huge)
- folio = kvm_gmem_get_huge_folio(file_inode(file), index, PMD_ORDER);
+ folio = kvm_gmem_get_huge_folio(file_inode(file), index, PMD_ORDER, policy);
if (!folio)
- folio = filemap_grab_folio(file_inode(file)->i_mapping, index);
+ folio = filemap_grab_folio_mpol(file_inode(file)->i_mapping, index, policy);
+
+ mpol_put(policy);
pr_debug("%s: allocate folio with PFN %lx order %d\n",
__func__, folio_pfn(folio), folio_order(folio));
@@ -338,6 +355,7 @@ static int kvm_gmem_release(struct inode *inode, struct file *file)
mutex_unlock(&kvm->slots_lock);
xa_destroy(&gmem->bindings);
+ mpol_put(gmem->f_policy);
kfree(gmem);
kvm_put_kvm(kvm);
@@ -356,10 +374,32 @@ static inline struct file *kvm_gmem_get_file(struct kvm_memory_slot *slot)
return get_file_active(&slot->gmem.file);
}
+#ifdef CONFIG_NUMA
+static int kvm_gmem_set_policy(struct file *file, struct mempolicy *mpol)
+{
+ struct mempolicy *old, *new;
+ struct kvm_gmem *gmem = file->private_data;
+
+ new = mpol_dup(mpol);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+
+ old = gmem->f_policy;
+ WRITE_ONCE(gmem->f_policy, new);
+ synchronize_rcu();
+ mpol_put(old);
+
+ return 0;
+}
+#endif
+
static struct file_operations kvm_gmem_fops = {
.open = generic_file_open,
.release = kvm_gmem_release,
.fallocate = kvm_gmem_fallocate,
+#ifdef CONFIG_NUMA
+ .set_policy = kvm_gmem_set_policy,
+#endif
};
void kvm_gmem_init(struct module *module)
@@ -489,6 +529,7 @@ static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
kvm_get_kvm(kvm);
gmem->kvm = kvm;
+ gmem->f_policy = NULL;
xa_init(&gmem->bindings);
list_add(&gmem->entry, &inode->i_mapping->i_private_list);
--
2.34.1
next prev parent reply other threads:[~2024-11-05 16:56 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-05 16:45 [RFC PATCH 0/4] Add fbind() and NUMA mempolicy support for KVM guest_memfd Shivank Garg
2024-11-05 16:45 ` [RFC PATCH 1/4] mm: Add mempolicy support to the filemap layer Shivank Garg
2024-11-05 16:55 ` [RFC PATCH 2/4] Introduce fbind syscall Shivank Garg
2024-11-05 16:55 ` [RFC PATCH 3/4] KVM: guest_memfd: Pass file pointer instead of inode in guest_memfd APIs Shivank Garg
2024-11-05 16:55 ` Shivank Garg [this message]
2024-11-05 18:55 ` [RFC PATCH 0/4] Add fbind() and NUMA mempolicy support for KVM guest_memfd Matthew Wilcox
2024-11-07 8:54 ` Shivank Garg
2024-11-07 15:10 ` Matthew Wilcox
2024-11-08 9:21 ` Shivank Garg
2024-11-08 17:31 ` Paolo Bonzini
2024-11-11 11:02 ` Vlastimil Babka
2024-11-11 22:14 ` David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241105165515.154941-3-shivankg@amd.com \
--to=shivankg@amd.com \
--cc=Neeraj.Upadhyay@amd.com \
--cc=akpm@linux-foundation.org \
--cc=arnd@arndb.de \
--cc=bharata@amd.com \
--cc=bp@alien8.de \
--cc=brauner@kernel.org \
--cc=chao.gao@intel.com \
--cc=dave.hansen@linux.intel.com \
--cc=jack@suse.cz \
--cc=kees@kernel.org \
--cc=kvm@vger.kernel.org \
--cc=linux-api@vger.kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=michael.day@amd.com \
--cc=mingo@redhat.com \
--cc=nikunj@amd.com \
--cc=pbonzini@redhat.com \
--cc=pgonda@google.com \
--cc=seanjc@google.com \
--cc=tglx@linutronix.de \
--cc=thomas.lendacky@amd.com \
--cc=viro@zeniv.linux.org.uk \
--cc=willy@infradead.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox