From: Ackerley Tng <ackerleytng@google.com>
To: tabba@google.com, quic_eberman@quicinc.com, roypat@amazon.co.uk,
jgg@nvidia.com, peterx@redhat.com, david@redhat.com,
rientjes@google.com, fvdl@google.com, jthoughton@google.com,
seanjc@google.com, pbonzini@redhat.com, zhiquan1.li@intel.com,
fan.du@intel.com, jun.miao@intel.com, isaku.yamahata@intel.com,
muchun.song@linux.dev, mike.kravetz@oracle.com
Cc: erdemaktas@google.com, vannapurve@google.com,
ackerleytng@google.com, qperret@google.com, jhubbard@nvidia.com,
willy@infradead.org, shuah@kernel.org, brauner@kernel.org,
bfoster@redhat.com, kent.overstreet@linux.dev, pvorel@suse.cz,
rppt@kernel.org, richard.weiyang@gmail.com, anup@brainfault.org,
haibo1.xu@intel.com, ajones@ventanamicro.com,
vkuznets@redhat.com, maciej.wieczor-retman@intel.com,
pgonda@google.com, oliver.upton@linux.dev,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
kvm@vger.kernel.org, linux-kselftest@vger.kernel.org,
linux-fsdevel@kvack.org
Subject: [RFC PATCH 25/39] KVM: guest_memfd: Split HugeTLB pages for guest_memfd use
Date: Tue, 10 Sep 2024 23:43:56 +0000 [thread overview]
Message-ID: <c488244d78c74992ac1f07564617a4758951e596.1726009989.git.ackerleytng@google.com> (raw)
In-Reply-To: <cover.1726009989.git.ackerleytng@google.com>
From: Vishal Annapurve <vannapurve@google.com>
In this patch, newly allocated HugeTLB pages are split to 4K regular
pages before providing them to the requester (fallocate() or KVM).
The pages are then reconstructed/merged to HugeTLB pages before
the HugeTLB pages are returned to HugeTLB.
This is an intermediate step to build page splitting/merging
functionality before allowing guest_memfd files to be mmap()ed.
Co-developed-by: Ackerley Tng <ackerleytng@google.com>
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
Co-developed-by: Vishal Annapurve <vannapurve@google.com>
Signed-off-by: Vishal Annapurve <vannapurve@google.com>
---
virt/kvm/guest_memfd.c | 299 ++++++++++++++++++++++++++++++++++++++---
1 file changed, 281 insertions(+), 18 deletions(-)
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index eacbfdb950d1..8151df2c03e5 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -229,31 +229,206 @@ static int kvm_gmem_hugetlb_filemap_add_folio(struct address_space *mapping,
return 0;
}
+struct kvm_gmem_split_stash {
+ struct {
+ unsigned long _flags_2;
+ unsigned long _head_2;
+
+ void *_hugetlb_subpool;
+ void *_hugetlb_cgroup;
+ void *_hugetlb_cgroup_rsvd;
+ void *_hugetlb_hwpoison;
+ };
+ void *hugetlb_private;
+};
+
+static int kvm_gmem_hugetlb_stash_metadata(struct folio *folio)
+{
+ struct kvm_gmem_split_stash *stash;
+
+ stash = kmalloc(sizeof(*stash), GFP_KERNEL);
+ if (!stash)
+ return -ENOMEM;
+
+ stash->_flags_2 = folio->_flags_2;
+ stash->_head_2 = folio->_head_2;
+ stash->_hugetlb_subpool = folio->_hugetlb_subpool;
+ stash->_hugetlb_cgroup = folio->_hugetlb_cgroup;
+ stash->_hugetlb_cgroup_rsvd = folio->_hugetlb_cgroup_rsvd;
+ stash->_hugetlb_hwpoison = folio->_hugetlb_hwpoison;
+ stash->hugetlb_private = folio_get_private(folio);
+
+ folio_change_private(folio, (void *)stash);
+
+ return 0;
+}
+
+static int kvm_gmem_hugetlb_unstash_metadata(struct folio *folio)
+{
+ struct kvm_gmem_split_stash *stash;
+
+ stash = folio_get_private(folio);
+
+ if (!stash)
+ return -EINVAL;
+
+ folio->_flags_2 = stash->_flags_2;
+ folio->_head_2 = stash->_head_2;
+ folio->_hugetlb_subpool = stash->_hugetlb_subpool;
+ folio->_hugetlb_cgroup = stash->_hugetlb_cgroup;
+ folio->_hugetlb_cgroup_rsvd = stash->_hugetlb_cgroup_rsvd;
+ folio->_hugetlb_hwpoison = stash->_hugetlb_hwpoison;
+ folio_change_private(folio, stash->hugetlb_private);
+
+ kfree(stash);
+
+ return 0;
+}
+
+/**
+ * Reconstruct a HugeTLB folio from a contiguous block of folios where the first
+ * of the contiguous folios is @folio.
+ *
+ * The size of the contiguous block is of huge_page_size(@h). All the folios in
+ * the block are checked to have a refcount of 1 before reconstruction. After
+ * reconstruction, the reconstructed folio has a refcount of 1.
+ *
+ * Return 0 on success and negative error otherwise.
+ */
+static int kvm_gmem_hugetlb_reconstruct_folio(struct hstate *h, struct folio *folio)
+{
+ int ret;
+
+ WARN_ON((folio->index & (huge_page_order(h) - 1)) != 0);
+
+ ret = kvm_gmem_hugetlb_unstash_metadata(folio);
+ if (ret)
+ return ret;
+
+ if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
+ kvm_gmem_hugetlb_stash_metadata(folio);
+ return -ENOMEM;
+ }
+
+ __folio_set_hugetlb(folio);
+
+ folio_set_count(folio, 1);
+
+ hugetlb_vmemmap_optimize_folio(h, folio);
+
+ return 0;
+}
+
+/* Basically folio_set_order(folio, 1) without the checks. */
+static inline void kvm_gmem_folio_set_order(struct folio *folio, unsigned int order)
+{
+ folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
+#ifdef CONFIG_64BIT
+ folio->_folio_nr_pages = 1U << order;
+#endif
+}
+
+/**
+ * Split a HugeTLB @folio of size huge_page_size(@h).
+ *
+ * After splitting, each split folio has a refcount of 1. There are no checks on
+ * refcounts before splitting.
+ *
+ * Return 0 on success and negative error otherwise.
+ */
+static int kvm_gmem_hugetlb_split_folio(struct hstate *h, struct folio *folio)
+{
+ int ret;
+
+ ret = hugetlb_vmemmap_restore_folio(h, folio);
+ if (ret)
+ return ret;
+
+ ret = kvm_gmem_hugetlb_stash_metadata(folio);
+ if (ret) {
+ hugetlb_vmemmap_optimize_folio(h, folio);
+ return ret;
+ }
+
+ kvm_gmem_folio_set_order(folio, 0);
+
+ destroy_compound_gigantic_folio(folio, huge_page_order(h));
+ __folio_clear_hugetlb(folio);
+
+ /*
+ * Remove the first folio from h->hugepage_activelist since it is no
+ * longer a HugeTLB page. The other split pages should not be on any
+ * lists.
+ */
+ hugetlb_folio_list_del(folio);
+
+ return 0;
+}
+
static struct folio *kvm_gmem_hugetlb_alloc_and_cache_folio(struct inode *inode,
pgoff_t index)
{
+ struct folio *allocated_hugetlb_folio;
+ pgoff_t hugetlb_first_subpage_index;
+ struct page *hugetlb_first_subpage;
struct kvm_gmem_hugetlb *hgmem;
- struct folio *folio;
+ struct page *requested_page;
int ret;
+ int i;
hgmem = kvm_gmem_hgmem(inode);
- folio = kvm_gmem_hugetlb_alloc_folio(hgmem->h, hgmem->spool);
- if (IS_ERR(folio))
- return folio;
+ allocated_hugetlb_folio = kvm_gmem_hugetlb_alloc_folio(hgmem->h, hgmem->spool);
+ if (IS_ERR(allocated_hugetlb_folio))
+ return allocated_hugetlb_folio;
+
+ requested_page = folio_file_page(allocated_hugetlb_folio, index);
+ hugetlb_first_subpage = folio_file_page(allocated_hugetlb_folio, 0);
+ hugetlb_first_subpage_index = index & (huge_page_mask(hgmem->h) >> PAGE_SHIFT);
- /* TODO: Fix index here to be aligned to huge page size. */
- ret = kvm_gmem_hugetlb_filemap_add_folio(
- inode->i_mapping, folio, index, htlb_alloc_mask(hgmem->h));
+ ret = kvm_gmem_hugetlb_split_folio(hgmem->h, allocated_hugetlb_folio);
if (ret) {
- folio_put(folio);
+ folio_put(allocated_hugetlb_folio);
return ERR_PTR(ret);
}
+ for (i = 0; i < pages_per_huge_page(hgmem->h); ++i) {
+ struct folio *folio = page_folio(nth_page(hugetlb_first_subpage, i));
+
+ ret = kvm_gmem_hugetlb_filemap_add_folio(inode->i_mapping,
+ folio,
+ hugetlb_first_subpage_index + i,
+ htlb_alloc_mask(hgmem->h));
+ if (ret) {
+ /* TODO: handle cleanup properly. */
+ pr_err("Handle cleanup properly index=%lx, ret=%d\n",
+ hugetlb_first_subpage_index + i, ret);
+ dump_page(nth_page(hugetlb_first_subpage, i), "check");
+ return ERR_PTR(ret);
+ }
+
+ /*
+ * Skip unlocking for the requested index since
+ * kvm_gmem_get_folio() returns a locked folio.
+ *
+ * Do folio_put() to drop the refcount that came with the folio,
+ * from splitting the folio. Splitting the folio has a refcount
+ * to be in line with hugetlb_alloc_folio(), which returns a
+ * folio with refcount 1.
+ *
+ * Skip folio_put() for requested index since
+ * kvm_gmem_get_folio() returns a folio with refcount 1.
+ */
+ if (hugetlb_first_subpage_index + i != index) {
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+ }
+
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(hgmem->h);
spin_unlock(&inode->i_lock);
- return folio;
+ return page_folio(requested_page);
}
static struct folio *kvm_gmem_get_hugetlb_folio(struct inode *inode,
@@ -365,7 +540,9 @@ static inline void kvm_gmem_hugetlb_filemap_remove_folio(struct folio *folio)
/**
* Removes folios in range [@lstart, @lend) from page cache/filemap (@mapping),
- * returning the number of pages freed.
+ * returning the number of HugeTLB pages freed.
+ *
+ * @lend - @lstart must be a multiple of the HugeTLB page size.
*/
static int kvm_gmem_hugetlb_filemap_remove_folios(struct address_space *mapping,
struct hstate *h,
@@ -373,37 +550,69 @@ static int kvm_gmem_hugetlb_filemap_remove_folios(struct address_space *mapping,
{
const pgoff_t end = lend >> PAGE_SHIFT;
pgoff_t next = lstart >> PAGE_SHIFT;
+ LIST_HEAD(folios_to_reconstruct);
struct folio_batch fbatch;
+ struct folio *folio, *tmp;
int num_freed = 0;
+ int i;
+ /*
+ * TODO: Iterate over huge_page_size(h) blocks to avoid taking and
+ * releasing hugetlb_fault_mutex_table[hash] lock so often. When
+ * truncating, lstart and lend should be clipped to the size of this
+ * guest_memfd file, otherwise there would be too many iterations.
+ */
folio_batch_init(&fbatch);
while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) {
- int i;
for (i = 0; i < folio_batch_count(&fbatch); ++i) {
struct folio *folio;
pgoff_t hindex;
u32 hash;
folio = fbatch.folios[i];
+
hindex = folio->index >> huge_page_order(h);
hash = hugetlb_fault_mutex_hash(mapping, hindex);
-
mutex_lock(&hugetlb_fault_mutex_table[hash]);
+
+ /*
+ * Collect first pages of HugeTLB folios for
+ * reconstruction later.
+ */
+ if ((folio->index & ~(huge_page_mask(h) >> PAGE_SHIFT)) == 0)
+ list_add(&folio->lru, &folios_to_reconstruct);
+
+ /*
+ * Before removing from filemap, take a reference so
+ * sub-folios don't get freed. Don't free the sub-folios
+ * until after reconstruction.
+ */
+ folio_get(folio);
+
kvm_gmem_hugetlb_filemap_remove_folio(folio);
- mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- num_freed++;
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
}
folio_batch_release(&fbatch);
cond_resched();
}
+ list_for_each_entry_safe(folio, tmp, &folios_to_reconstruct, lru) {
+ kvm_gmem_hugetlb_reconstruct_folio(h, folio);
+ hugetlb_folio_list_move(folio, &h->hugepage_activelist);
+
+ folio_put(folio);
+ num_freed++;
+ }
+
return num_freed;
}
/**
* Removes folios in range [@lstart, @lend) from page cache of inode, updates
* inode metadata and hugetlb reservations.
+ *
+ * @lend - @lstart must be a multiple of the HugeTLB page size.
*/
static void kvm_gmem_hugetlb_truncate_folios_range(struct inode *inode,
loff_t lstart, loff_t lend)
@@ -427,6 +636,56 @@ static void kvm_gmem_hugetlb_truncate_folios_range(struct inode *inode,
spin_unlock(&inode->i_lock);
}
+/**
+ * Zeroes offsets [@start, @end) in a folio from @mapping.
+ *
+ * [@start, @end) must be within the same folio.
+ */
+static void kvm_gmem_zero_partial_page(
+ struct address_space *mapping, loff_t start, loff_t end)
+{
+ struct folio *folio;
+ pgoff_t idx = start >> PAGE_SHIFT;
+
+ folio = filemap_lock_folio(mapping, idx);
+ if (IS_ERR(folio))
+ return;
+
+ start = offset_in_folio(folio, start);
+ end = offset_in_folio(folio, end);
+ if (!end)
+ end = folio_size(folio);
+
+ folio_zero_segment(folio, (size_t)start, (size_t)end);
+ folio_unlock(folio);
+ folio_put(folio);
+}
+
+/**
+ * Zeroes all pages in range [@start, @end) in @mapping.
+ *
+ * hugetlb_zero_partial_page() would work if this had been a full page, but is
+ * not suitable since the pages have been split.
+ *
+ * truncate_inode_pages_range() isn't the right function because it removes
+ * pages from the page cache; this function only zeroes the pages.
+ */
+static void kvm_gmem_hugetlb_zero_split_pages(struct address_space *mapping,
+ loff_t start, loff_t end)
+{
+ loff_t aligned_start;
+ loff_t index;
+
+ aligned_start = round_up(start, PAGE_SIZE);
+
+ kvm_gmem_zero_partial_page(mapping, start, min(aligned_start, end));
+
+ for (index = aligned_start; index < end; index += PAGE_SIZE) {
+ kvm_gmem_zero_partial_page(mapping, index,
+ min((loff_t)(index + PAGE_SIZE), end));
+ }
+}
+
static void kvm_gmem_hugetlb_truncate_range(struct inode *inode, loff_t lstart,
loff_t lend)
{
@@ -442,8 +701,8 @@ static void kvm_gmem_hugetlb_truncate_range(struct inode *inode, loff_t lstart,
full_hpage_end = round_down(lend, hsize);
if (lstart < full_hpage_start) {
- hugetlb_zero_partial_page(h, inode->i_mapping, lstart,
- full_hpage_start);
+ kvm_gmem_hugetlb_zero_split_pages(inode->i_mapping, lstart,
+ full_hpage_start);
}
if (full_hpage_end > full_hpage_start) {
@@ -452,8 +711,8 @@ static void kvm_gmem_hugetlb_truncate_range(struct inode *inode, loff_t lstart,
}
if (lend > full_hpage_end) {
- hugetlb_zero_partial_page(h, inode->i_mapping, full_hpage_end,
- lend);
+ kvm_gmem_hugetlb_zero_split_pages(inode->i_mapping, full_hpage_end,
+ lend);
}
}
@@ -1060,6 +1319,10 @@ __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
if (folio_test_hwpoison(folio)) {
folio_unlock(folio);
+ /*
+ * TODO: this folio may be part of a HugeTLB folio. Perhaps
+ * reconstruct and then free page?
+ */
folio_put(folio);
return ERR_PTR(-EHWPOISON);
}
--
2.46.0.598.g6f2099f65c-goog
next prev parent reply other threads:[~2024-09-10 23:45 UTC|newest]
Thread overview: 130+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-10 23:43 [RFC PATCH 00/39] 1G page support for guest_memfd Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 01/39] mm: hugetlb: Simplify logic in dequeue_hugetlb_folio_vma() Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 02/39] mm: hugetlb: Refactor vma_has_reserves() to should_use_hstate_resv() Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 03/39] mm: hugetlb: Remove unnecessary check for avoid_reserve Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 04/39] mm: mempolicy: Refactor out policy_node_nodemask() Ackerley Tng
2024-09-11 16:46 ` Gregory Price
2024-09-10 23:43 ` [RFC PATCH 05/39] mm: hugetlb: Refactor alloc_buddy_hugetlb_folio_with_mpol() to interpret mempolicy instead of vma Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 06/39] mm: hugetlb: Refactor dequeue_hugetlb_folio_vma() to use mpol Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 07/39] mm: hugetlb: Refactor out hugetlb_alloc_folio Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 08/39] mm: truncate: Expose preparation steps for truncate_inode_pages_final Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 09/39] mm: hugetlb: Expose hugetlb_subpool_{get,put}_pages() Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 10/39] mm: hugetlb: Add option to create new subpool without using surplus Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 11/39] mm: hugetlb: Expose hugetlb_acct_memory() Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 12/39] mm: hugetlb: Move and expose hugetlb_zero_partial_page() Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 13/39] KVM: guest_memfd: Make guest mem use guest mem inodes instead of anonymous inodes Ackerley Tng
2025-04-02 4:01 ` Yan Zhao
2025-04-23 20:22 ` Ackerley Tng
2025-04-24 3:53 ` Yan Zhao
2024-09-10 23:43 ` [RFC PATCH 14/39] KVM: guest_memfd: hugetlb: initialization and cleanup Ackerley Tng
2024-09-20 9:17 ` Vishal Annapurve
2024-10-01 23:00 ` Ackerley Tng
2024-12-01 17:59 ` Peter Xu
2025-02-13 9:47 ` Ackerley Tng
2025-02-26 18:55 ` Ackerley Tng
2025-03-06 17:33 ` Peter Xu
2024-09-10 23:43 ` [RFC PATCH 15/39] KVM: guest_memfd: hugetlb: allocate and truncate from hugetlb Ackerley Tng
2024-09-13 22:26 ` Elliot Berman
2024-10-03 20:23 ` Ackerley Tng
2024-10-30 9:01 ` Jun Miao
2025-02-11 1:21 ` Ackerley Tng
2024-12-01 17:55 ` Peter Xu
2025-02-13 7:52 ` Ackerley Tng
2025-02-13 16:48 ` Peter Xu
2024-09-10 23:43 ` [RFC PATCH 16/39] KVM: guest_memfd: Add page alignment check for hugetlb guest_memfd Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 17/39] KVM: selftests: Add basic selftests for hugetlb-backed guest_memfd Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 18/39] KVM: selftests: Support various types of backing sources for private memory Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 19/39] KVM: selftests: Update test for various private memory backing source types Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 20/39] KVM: selftests: Add private_mem_conversions_test.sh Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 21/39] KVM: selftests: Test that guest_memfd usage is reported via hugetlb Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 22/39] mm: hugetlb: Expose vmemmap optimization functions Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 23/39] mm: hugetlb: Expose HugeTLB functions for promoting/demoting pages Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 24/39] mm: hugetlb: Add functions to add/move/remove from hugetlb lists Ackerley Tng
2024-09-10 23:43 ` Ackerley Tng [this message]
2024-09-10 23:43 ` [RFC PATCH 26/39] KVM: guest_memfd: Track faultability within a struct kvm_gmem_private Ackerley Tng
2024-10-10 16:06 ` Peter Xu
2024-10-11 23:32 ` Ackerley Tng
2024-10-15 21:34 ` Peter Xu
2024-10-15 23:42 ` Ackerley Tng
2024-10-16 8:45 ` David Hildenbrand
2024-10-16 20:16 ` Peter Xu
2024-10-16 22:51 ` Jason Gunthorpe
2024-10-16 23:49 ` Peter Xu
2024-10-16 23:54 ` Jason Gunthorpe
2024-10-17 14:58 ` Peter Xu
2024-10-17 16:47 ` Jason Gunthorpe
2024-10-17 17:05 ` Peter Xu
2024-10-17 17:10 ` Jason Gunthorpe
2024-10-17 19:11 ` Peter Xu
2024-10-17 19:18 ` Jason Gunthorpe
2024-10-17 19:29 ` David Hildenbrand
2024-10-18 7:15 ` Patrick Roy
2024-10-18 7:50 ` David Hildenbrand
2024-10-18 9:34 ` Patrick Roy
2024-10-17 17:11 ` David Hildenbrand
2024-10-17 17:16 ` Jason Gunthorpe
2024-10-17 17:55 ` David Hildenbrand
2024-10-17 18:26 ` Vishal Annapurve
2024-10-17 14:56 ` David Hildenbrand
2024-10-17 15:02 ` David Hildenbrand
2024-10-16 8:50 ` David Hildenbrand
2024-10-16 10:48 ` Vishal Annapurve
2024-10-16 11:54 ` David Hildenbrand
2024-10-16 11:57 ` Jason Gunthorpe
2025-02-25 20:37 ` Peter Xu
2025-04-23 22:07 ` Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 27/39] KVM: guest_memfd: Allow mmapping guest_memfd files Ackerley Tng
2025-01-20 22:42 ` Peter Xu
2025-04-23 20:25 ` Ackerley Tng
2025-03-04 23:24 ` Peter Xu
2025-04-02 4:07 ` Yan Zhao
2025-04-23 20:28 ` Ackerley Tng
2024-09-10 23:43 ` [RFC PATCH 28/39] KVM: guest_memfd: Use vm_type to determine default faultability Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 29/39] KVM: Handle conversions in the SET_MEMORY_ATTRIBUTES ioctl Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 30/39] KVM: guest_memfd: Handle folio preparation for guest_memfd mmap Ackerley Tng
2024-09-16 20:00 ` Elliot Berman
2024-10-03 21:32 ` Ackerley Tng
2024-10-03 23:43 ` Ackerley Tng
2024-10-08 19:30 ` Sean Christopherson
2024-10-07 15:56 ` Patrick Roy
2024-10-08 18:07 ` Ackerley Tng
2024-10-08 19:56 ` Sean Christopherson
2024-10-09 3:51 ` Manwaring, Derek
2024-10-09 13:52 ` Andrew Cooper
2024-10-10 16:21 ` Patrick Roy
2024-10-10 19:27 ` Manwaring, Derek
2024-10-17 23:16 ` Ackerley Tng
2024-10-18 7:10 ` Patrick Roy
2024-09-10 23:44 ` [RFC PATCH 31/39] KVM: selftests: Allow vm_set_memory_attributes to be used without asserting return value of 0 Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 32/39] KVM: selftests: Test using guest_memfd memory from userspace Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 33/39] KVM: selftests: Test guest_memfd memory sharing between guest and host Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 34/39] KVM: selftests: Add notes in private_mem_kvm_exits_test for mmap-able guest_memfd Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 35/39] KVM: selftests: Test that pinned pages block KVM from setting memory attributes to PRIVATE Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 36/39] KVM: selftests: Refactor vm_mem_add to be more flexible Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 37/39] KVM: selftests: Add helper to perform madvise by memslots Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 38/39] KVM: selftests: Update private_mem_conversions_test for mmap()able guest_memfd Ackerley Tng
2024-09-10 23:44 ` [RFC PATCH 39/39] KVM: guest_memfd: Dynamically split/reconstruct HugeTLB page Ackerley Tng
2025-04-03 12:33 ` Yan Zhao
2025-04-23 22:02 ` Ackerley Tng
2025-04-24 1:09 ` Yan Zhao
2025-04-24 4:25 ` Yan Zhao
2025-04-24 5:55 ` Chenyi Qiang
2025-04-24 8:13 ` Yan Zhao
2025-04-24 14:10 ` Vishal Annapurve
2025-04-24 18:15 ` Ackerley Tng
2025-04-25 4:02 ` Yan Zhao
2025-04-25 22:45 ` Ackerley Tng
2025-04-28 1:05 ` Yan Zhao
2025-04-28 19:02 ` Vishal Annapurve
2025-04-30 20:09 ` Ackerley Tng
2025-05-06 1:23 ` Yan Zhao
2025-05-06 19:22 ` Ackerley Tng
2025-05-07 3:15 ` Yan Zhao
2025-05-13 17:33 ` Ackerley Tng
2024-09-11 6:56 ` [RFC PATCH 00/39] 1G page support for guest_memfd Michal Hocko
2024-09-14 1:08 ` Du, Fan
2024-09-14 13:34 ` Vishal Annapurve
2025-01-28 9:42 ` Amit Shah
2025-02-03 8:35 ` Ackerley Tng
2025-02-06 11:07 ` Amit Shah
2025-02-07 6:25 ` Ackerley Tng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=c488244d78c74992ac1f07564617a4758951e596.1726009989.git.ackerleytng@google.com \
--to=ackerleytng@google.com \
--cc=ajones@ventanamicro.com \
--cc=anup@brainfault.org \
--cc=bfoster@redhat.com \
--cc=brauner@kernel.org \
--cc=david@redhat.com \
--cc=erdemaktas@google.com \
--cc=fan.du@intel.com \
--cc=fvdl@google.com \
--cc=haibo1.xu@intel.com \
--cc=isaku.yamahata@intel.com \
--cc=jgg@nvidia.com \
--cc=jhubbard@nvidia.com \
--cc=jthoughton@google.com \
--cc=jun.miao@intel.com \
--cc=kent.overstreet@linux.dev \
--cc=kvm@vger.kernel.org \
--cc=linux-fsdevel@kvack.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=maciej.wieczor-retman@intel.com \
--cc=mike.kravetz@oracle.com \
--cc=muchun.song@linux.dev \
--cc=oliver.upton@linux.dev \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=pgonda@google.com \
--cc=pvorel@suse.cz \
--cc=qperret@google.com \
--cc=quic_eberman@quicinc.com \
--cc=richard.weiyang@gmail.com \
--cc=rientjes@google.com \
--cc=roypat@amazon.co.uk \
--cc=rppt@kernel.org \
--cc=seanjc@google.com \
--cc=shuah@kernel.org \
--cc=tabba@google.com \
--cc=vannapurve@google.com \
--cc=vkuznets@redhat.com \
--cc=willy@infradead.org \
--cc=zhiquan1.li@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox