* [PATCH RFC v2 1/6] KVM: guest_memfd: Don't set FGP_ACCESSED when getting folios
2026-02-25 7:20 [PATCH RFC v2 0/6] guest_memfd: Track amount of memory allocated on inode Ackerley Tng
@ 2026-02-25 7:20 ` Ackerley Tng
2026-02-25 7:20 ` [PATCH RFC v2 2/6] KVM: guest_memfd: Directly allocate folios with filemap_alloc_folio() Ackerley Tng
` (4 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Ackerley Tng @ 2026-02-25 7:20 UTC (permalink / raw)
To: Paolo Bonzini, Andrew Morton, David Hildenbrand, Lorenzo Stoakes,
Liam R. Howlett, Vlastimil Babka, Mike Rapoport,
Suren Baghdasaryan, Michal Hocko, Matthew Wilcox (Oracle),
Shuah Khan, Jonathan Corbet, Alexander Viro, Christian Brauner,
Jan Kara, seanjc, rientjes, rick.p.edgecombe, yan.y.zhao, fvdl,
jthoughton, vannapurve, shivankg, michael.roth, pratyush,
pasha.tatashin, kalyazin, tabba
Cc: kvm, linux-kernel, linux-mm, linux-fsdevel, linux-kselftest,
linux-doc, Ackerley Tng
guest_memfd folios don't care about accessed flags since the memory is
unevictable and there is no storage to write back to, hence, cleanup the
allocation path by not setting FGP_ACCESSED.
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: David Hildenbrand (arm) <david@kernel.org>
---
virt/kvm/guest_memfd.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 923c51a3a5256..2df27b6443115 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -126,14 +126,13 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
* Fast-path: See if folio is already present in mapping to avoid
* policy_lookup.
*/
- folio = __filemap_get_folio(inode->i_mapping, index,
- FGP_LOCK | FGP_ACCESSED, 0);
+ folio = filemap_lock_folio(inode->i_mapping, index);
if (!IS_ERR(folio))
return folio;
policy = mpol_shared_policy_lookup(&GMEM_I(inode)->policy, index);
folio = __filemap_get_folio_mpol(inode->i_mapping, index,
- FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ FGP_LOCK | FGP_CREAT,
mapping_gfp_mask(inode->i_mapping), policy);
mpol_cond_put(policy);
--
2.53.0.414.gf7e9f6c205-goog
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH RFC v2 2/6] KVM: guest_memfd: Directly allocate folios with filemap_alloc_folio()
2026-02-25 7:20 [PATCH RFC v2 0/6] guest_memfd: Track amount of memory allocated on inode Ackerley Tng
2026-02-25 7:20 ` [PATCH RFC v2 1/6] KVM: guest_memfd: Don't set FGP_ACCESSED when getting folios Ackerley Tng
@ 2026-02-25 7:20 ` Ackerley Tng
2026-02-25 7:20 ` [PATCH RFC v2 3/6] fs: Add .unaccount_folio callback Ackerley Tng
` (3 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Ackerley Tng @ 2026-02-25 7:20 UTC (permalink / raw)
To: Paolo Bonzini, Andrew Morton, David Hildenbrand, Lorenzo Stoakes,
Liam R. Howlett, Vlastimil Babka, Mike Rapoport,
Suren Baghdasaryan, Michal Hocko, Matthew Wilcox (Oracle),
Shuah Khan, Jonathan Corbet, Alexander Viro, Christian Brauner,
Jan Kara, seanjc, rientjes, rick.p.edgecombe, yan.y.zhao, fvdl,
jthoughton, vannapurve, shivankg, michael.roth, pratyush,
pasha.tatashin, kalyazin, tabba
Cc: kvm, linux-kernel, linux-mm, linux-fsdevel, linux-kselftest,
linux-doc, Ackerley Tng
__filemap_get_folio_mpol() is parametrized by a bunch of GFP flags, which
adds complexity for the reader. Since guest_memfd doesn't meaningfully use
any of the other FGP flags, undo that complexity by directly calling
filemap_alloc_folio().
Directly calling filemap_alloc_folio() also allows the order of 0 to be
explicitly specified, which is the only order guest_memfd supports. This is
easier to understand, and removes the chance of anything else being able to
unintentionally influence allocated folio size.
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
---
virt/kvm/guest_memfd.c | 51 +++++++++++++++++++++++++++++++++++---------------
1 file changed, 36 insertions(+), 15 deletions(-)
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 2df27b6443115..2488d7b8f2b0d 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -107,6 +107,39 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
return __kvm_gmem_prepare_folio(kvm, slot, index, folio);
}
+static struct folio *__kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
+{
+ /* TODO: Support huge pages. */
+ struct mempolicy *policy;
+ struct folio *folio;
+ gfp_t gfp;
+ int ret;
+
+ /*
+ * Fast-path: See if folio is already present in mapping to avoid
+ * policy_lookup.
+ */
+ folio = filemap_lock_folio(inode->i_mapping, index);
+ if (!IS_ERR(folio))
+ return folio;
+
+ gfp = mapping_gfp_mask(inode->i_mapping);
+
+ policy = mpol_shared_policy_lookup(&GMEM_I(inode)->policy, index);
+ folio = filemap_alloc_folio(gfp, 0, policy);
+ mpol_cond_put(policy);
+ if (!folio)
+ return ERR_PTR(-ENOMEM);
+
+ ret = filemap_add_folio(inode->i_mapping, folio, index, gfp);
+ if (ret) {
+ folio_put(folio);
+ return ERR_PTR(ret);
+ }
+
+ return folio;
+}
+
/*
* Returns a locked folio on success. The caller is responsible for
* setting the up-to-date flag before the memory is mapped into the guest.
@@ -118,23 +151,11 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
*/
static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
{
- /* TODO: Support huge pages. */
- struct mempolicy *policy;
struct folio *folio;
- /*
- * Fast-path: See if folio is already present in mapping to avoid
- * policy_lookup.
- */
- folio = filemap_lock_folio(inode->i_mapping, index);
- if (!IS_ERR(folio))
- return folio;
-
- policy = mpol_shared_policy_lookup(&GMEM_I(inode)->policy, index);
- folio = __filemap_get_folio_mpol(inode->i_mapping, index,
- FGP_LOCK | FGP_CREAT,
- mapping_gfp_mask(inode->i_mapping), policy);
- mpol_cond_put(policy);
+ do {
+ folio = __kvm_gmem_get_folio(inode, index);
+ } while (PTR_ERR(folio) == -EEXIST);
/*
* External interfaces like kvm_gmem_get_pfn() support dealing
--
2.53.0.414.gf7e9f6c205-goog
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH RFC v2 3/6] fs: Add .unaccount_folio callback
2026-02-25 7:20 [PATCH RFC v2 0/6] guest_memfd: Track amount of memory allocated on inode Ackerley Tng
2026-02-25 7:20 ` [PATCH RFC v2 1/6] KVM: guest_memfd: Don't set FGP_ACCESSED when getting folios Ackerley Tng
2026-02-25 7:20 ` [PATCH RFC v2 2/6] KVM: guest_memfd: Directly allocate folios with filemap_alloc_folio() Ackerley Tng
@ 2026-02-25 7:20 ` Ackerley Tng
2026-02-25 7:20 ` [PATCH RFC v2 4/6] KVM: guest_memfd: Track amount of memory allocated on inode Ackerley Tng
` (2 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Ackerley Tng @ 2026-02-25 7:20 UTC (permalink / raw)
To: Paolo Bonzini, Andrew Morton, David Hildenbrand, Lorenzo Stoakes,
Liam R. Howlett, Vlastimil Babka, Mike Rapoport,
Suren Baghdasaryan, Michal Hocko, Matthew Wilcox (Oracle),
Shuah Khan, Jonathan Corbet, Alexander Viro, Christian Brauner,
Jan Kara, seanjc, rientjes, rick.p.edgecombe, yan.y.zhao, fvdl,
jthoughton, vannapurve, shivankg, michael.roth, pratyush,
pasha.tatashin, kalyazin, tabba
Cc: kvm, linux-kernel, linux-mm, linux-fsdevel, linux-kselftest,
linux-doc, Ackerley Tng
Add .unaccount_folio callback to allow filesystems to do accounting-related
updates to the inode or struct address_space mapping, when the folio is
about to be removed from the filemap/page_cache.
.free_folio cannot be used since .free_folio cannot assume that struct
address_space mapping still exists.
From the name, .invalidate_folio and .release_folio seem suitable, but
those are meant only to handle freeing of a folio's private
data. .release_folio is also not called in the truncation path.
An alternative would be to add a more general callback and call that from
filemap_remove_folio() and delete_from_page_cache_batch(). .unaccount_folio
was chosen as it is more specific to the how guest_memfd will be using this
callback in later patches. Also, .unaccount_folio only needs a single call
site.
This further refactoring was considered:
if (mapping->a_ops->unaccount_folio &&
mapping->a_ops->unaccount_folio(folio))
... do generic page_cache unaccounting ...
but that was abandoned since a hugetlb folio may not have an associated
mapping.
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
---
Documentation/filesystems/vfs.rst | 8 ++++++++
include/linux/fs.h | 1 +
mm/filemap.c | 3 +++
3 files changed, 12 insertions(+)
diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
index 670ba66b60e49..5ed5c43d5768b 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -809,6 +809,7 @@ cache in your filesystem. The following members are defined:
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
bool (*release_folio)(struct folio *, gfp_t);
+ void (*unaccount_folio)(struct folio *folio);
void (*free_folio)(struct folio *);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
int (*migrate_folio)(struct mapping *, struct folio *dst,
@@ -967,6 +968,13 @@ cache in your filesystem. The following members are defined:
its release_folio will need to ensure this. Possibly it can
clear the uptodate flag if it cannot free private data yet.
+``unaccount_folio``
+ unaccount_folio is called under inode lock and struct
+ address_space's xa_lock, just before the folio is removed from
+ the page cache in order to allow updating any kind of
+ accounting on the inode or address_space mapping while the
+ address_space mapping still exists.
+
``free_folio``
free_folio is called once the folio is no longer visible in the
page cache in order to allow the cleanup of any private data.
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a01621fa636a6..c71f327032142 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -422,6 +422,7 @@ struct address_space_operations {
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t offset, size_t len);
bool (*release_folio)(struct folio *, gfp_t);
+ void (*unaccount_folio)(struct folio *folio);
void (*free_folio)(struct folio *folio);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
/*
diff --git a/mm/filemap.c b/mm/filemap.c
index ebd75684cb0a7..ff957929e6087 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -176,6 +176,9 @@ static void filemap_unaccount_folio(struct address_space *mapping,
}
}
+ if (unlikely(mapping->a_ops->unaccount_folio))
+ mapping->a_ops->unaccount_folio(folio);
+
/* hugetlb folios do not participate in page cache accounting. */
if (folio_test_hugetlb(folio))
return;
--
2.53.0.414.gf7e9f6c205-goog
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH RFC v2 4/6] KVM: guest_memfd: Track amount of memory allocated on inode
2026-02-25 7:20 [PATCH RFC v2 0/6] guest_memfd: Track amount of memory allocated on inode Ackerley Tng
` (2 preceding siblings ...)
2026-02-25 7:20 ` [PATCH RFC v2 3/6] fs: Add .unaccount_folio callback Ackerley Tng
@ 2026-02-25 7:20 ` Ackerley Tng
2026-02-25 7:20 ` [PATCH RFC v2 5/6] KVM: selftests: Wrap fstat() to assert success Ackerley Tng
2026-02-25 7:20 ` [PATCH RFC v2 6/6] KVM: selftests: Test that st_blocks is updated on allocation Ackerley Tng
5 siblings, 0 replies; 7+ messages in thread
From: Ackerley Tng @ 2026-02-25 7:20 UTC (permalink / raw)
To: Paolo Bonzini, Andrew Morton, David Hildenbrand, Lorenzo Stoakes,
Liam R. Howlett, Vlastimil Babka, Mike Rapoport,
Suren Baghdasaryan, Michal Hocko, Matthew Wilcox (Oracle),
Shuah Khan, Jonathan Corbet, Alexander Viro, Christian Brauner,
Jan Kara, seanjc, rientjes, rick.p.edgecombe, yan.y.zhao, fvdl,
jthoughton, vannapurve, shivankg, michael.roth, pratyush,
pasha.tatashin, kalyazin, tabba
Cc: kvm, linux-kernel, linux-mm, linux-fsdevel, linux-kselftest,
linux-doc, Ackerley Tng
The guest memfd currently does not update the inode's i_blocks and i_bytes
count when memory is allocated or freed. Hence, st_blocks returned from
fstat() is always 0.
Introduce byte accounting for guest memfd inodes. When a new folio is
added to the filemap, add the folio's size. Conversely, when folios are
truncated and removed from the mapping, deduct the folio's size.
With this change, stat.st_blocks for a guest_memfd will correctly report
the number of 512-byte blocks allocated to the file, consistent with other
memory-based filesystems like tmpfs.
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
---
virt/kvm/guest_memfd.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 2488d7b8f2b0d..b31e6612d16a8 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -137,6 +137,8 @@ static struct folio *__kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
return ERR_PTR(ret);
}
+ inode_add_bytes(inode, folio_size(folio));
+
return folio;
}
@@ -553,10 +555,16 @@ static void kvm_gmem_free_folio(struct folio *folio)
}
#endif
+static void kvm_gmem_unaccount_folio(struct folio *folio)
+{
+ __inode_sub_bytes(folio_inode(folio), folio_size(folio));
+}
+
static const struct address_space_operations kvm_gmem_aops = {
.dirty_folio = noop_dirty_folio,
.migrate_folio = kvm_gmem_migrate_folio,
.error_remove_folio = kvm_gmem_error_folio,
+ .unaccount_folio = kvm_gmem_unaccount_folio,
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
.free_folio = kvm_gmem_free_folio,
#endif
--
2.53.0.414.gf7e9f6c205-goog
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH RFC v2 5/6] KVM: selftests: Wrap fstat() to assert success
2026-02-25 7:20 [PATCH RFC v2 0/6] guest_memfd: Track amount of memory allocated on inode Ackerley Tng
` (3 preceding siblings ...)
2026-02-25 7:20 ` [PATCH RFC v2 4/6] KVM: guest_memfd: Track amount of memory allocated on inode Ackerley Tng
@ 2026-02-25 7:20 ` Ackerley Tng
2026-02-25 7:20 ` [PATCH RFC v2 6/6] KVM: selftests: Test that st_blocks is updated on allocation Ackerley Tng
5 siblings, 0 replies; 7+ messages in thread
From: Ackerley Tng @ 2026-02-25 7:20 UTC (permalink / raw)
To: Paolo Bonzini, Andrew Morton, David Hildenbrand, Lorenzo Stoakes,
Liam R. Howlett, Vlastimil Babka, Mike Rapoport,
Suren Baghdasaryan, Michal Hocko, Matthew Wilcox (Oracle),
Shuah Khan, Jonathan Corbet, Alexander Viro, Christian Brauner,
Jan Kara, seanjc, rientjes, rick.p.edgecombe, yan.y.zhao, fvdl,
jthoughton, vannapurve, shivankg, michael.roth, pratyush,
pasha.tatashin, kalyazin, tabba
Cc: kvm, linux-kernel, linux-mm, linux-fsdevel, linux-kselftest,
linux-doc, Ackerley Tng
Extend kvm_syscalls.h to wrap fstat() to assert success. This will be used
in the next patch.
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
---
tools/testing/selftests/kvm/guest_memfd_test.c | 15 +++++----------
tools/testing/selftests/kvm/include/kvm_syscalls.h | 2 ++
2 files changed, 7 insertions(+), 10 deletions(-)
diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
index 618c937f3c90f..81387f06e770a 100644
--- a/tools/testing/selftests/kvm/guest_memfd_test.c
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -212,10 +212,8 @@ static void test_mmap_not_supported(int fd, size_t total_size)
static void test_file_size(int fd, size_t total_size)
{
struct stat sb;
- int ret;
- ret = fstat(fd, &sb);
- TEST_ASSERT(!ret, "fstat should succeed");
+ kvm_fstat(fd, &sb);
TEST_ASSERT_EQ(sb.st_size, total_size);
TEST_ASSERT_EQ(sb.st_blksize, page_size);
}
@@ -303,25 +301,22 @@ static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm,
static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
{
- int fd1, fd2, ret;
+ int fd1, fd2;
struct stat st1, st2;
fd1 = __vm_create_guest_memfd(vm, page_size, 0);
TEST_ASSERT(fd1 != -1, "memfd creation should succeed");
- ret = fstat(fd1, &st1);
- TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+ kvm_fstat(fd1, &st1);
TEST_ASSERT(st1.st_size == page_size, "memfd st_size should match requested size");
fd2 = __vm_create_guest_memfd(vm, page_size * 2, 0);
TEST_ASSERT(fd2 != -1, "memfd creation should succeed");
- ret = fstat(fd2, &st2);
- TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+ kvm_fstat(fd2, &st2);
TEST_ASSERT(st2.st_size == page_size * 2, "second memfd st_size should match requested size");
- ret = fstat(fd1, &st1);
- TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+ kvm_fstat(fd1, &st1);
TEST_ASSERT(st1.st_size == page_size, "first memfd st_size should still match requested size");
TEST_ASSERT(st1.st_ino != st2.st_ino, "different memfd should have different inode numbers");
diff --git a/tools/testing/selftests/kvm/include/kvm_syscalls.h b/tools/testing/selftests/kvm/include/kvm_syscalls.h
index d4e613162bba9..3f039c34e12e0 100644
--- a/tools/testing/selftests/kvm/include/kvm_syscalls.h
+++ b/tools/testing/selftests/kvm/include/kvm_syscalls.h
@@ -2,6 +2,7 @@
#ifndef SELFTEST_KVM_SYSCALLS_H
#define SELFTEST_KVM_SYSCALLS_H
+#include <sys/stat.h>
#include <sys/syscall.h>
#define MAP_ARGS0(m,...)
@@ -77,5 +78,6 @@ __KVM_SYSCALL_DEFINE(munmap, 2, void *, mem, size_t, size);
__KVM_SYSCALL_DEFINE(close, 1, int, fd);
__KVM_SYSCALL_DEFINE(fallocate, 4, int, fd, int, mode, loff_t, offset, loff_t, len);
__KVM_SYSCALL_DEFINE(ftruncate, 2, unsigned int, fd, off_t, length);
+__KVM_SYSCALL_DEFINE(fstat, 2, int, fd, struct stat *, buf);
#endif /* SELFTEST_KVM_SYSCALLS_H */
--
2.53.0.414.gf7e9f6c205-goog
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH RFC v2 6/6] KVM: selftests: Test that st_blocks is updated on allocation
2026-02-25 7:20 [PATCH RFC v2 0/6] guest_memfd: Track amount of memory allocated on inode Ackerley Tng
` (4 preceding siblings ...)
2026-02-25 7:20 ` [PATCH RFC v2 5/6] KVM: selftests: Wrap fstat() to assert success Ackerley Tng
@ 2026-02-25 7:20 ` Ackerley Tng
5 siblings, 0 replies; 7+ messages in thread
From: Ackerley Tng @ 2026-02-25 7:20 UTC (permalink / raw)
To: Paolo Bonzini, Andrew Morton, David Hildenbrand, Lorenzo Stoakes,
Liam R. Howlett, Vlastimil Babka, Mike Rapoport,
Suren Baghdasaryan, Michal Hocko, Matthew Wilcox (Oracle),
Shuah Khan, Jonathan Corbet, Alexander Viro, Christian Brauner,
Jan Kara, seanjc, rientjes, rick.p.edgecombe, yan.y.zhao, fvdl,
jthoughton, vannapurve, shivankg, michael.roth, pratyush,
pasha.tatashin, kalyazin, tabba
Cc: kvm, linux-kernel, linux-mm, linux-fsdevel, linux-kselftest,
linux-doc, Ackerley Tng
The st_blocks field reported by fstat should reflect the number of
allocated 512-byte blocks for the guest memfd file.
Extend the fallocate test to verify that st_blocks is correctly updated
when memory is allocated or deallocated via
fallocate(FALLOC_FL_PUNCH_HOLE).
Add checks after each fallocate call to ensure that st_blocks increases on
allocation, decreases when a hole is punched, and is restored when the hole
is re-allocated. Also verify that st_blocks remains unchanged for failing
fallocate calls.
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
---
tools/testing/selftests/kvm/guest_memfd_test.c | 17 +++++++++++++++++
1 file changed, 17 insertions(+)
diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
index 81387f06e770a..89228d73fa736 100644
--- a/tools/testing/selftests/kvm/guest_memfd_test.c
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -218,41 +218,58 @@ static void test_file_size(int fd, size_t total_size)
TEST_ASSERT_EQ(sb.st_blksize, page_size);
}
+static void assert_st_blocks_matches_size(int fd, size_t expected_size)
+{
+ struct stat sb;
+
+ kvm_fstat(fd, &sb);
+ TEST_ASSERT_EQ(sb.st_blocks, expected_size / 512);
+}
+
static void test_fallocate(int fd, size_t total_size)
{
int ret;
ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, total_size);
TEST_ASSERT(!ret, "fallocate with aligned offset and size should succeed");
+ assert_st_blocks_matches_size(fd, total_size);
ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
page_size - 1, page_size);
TEST_ASSERT(ret, "fallocate with unaligned offset should fail");
+ assert_st_blocks_matches_size(fd, total_size);
ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size, page_size);
TEST_ASSERT(ret, "fallocate beginning at total_size should fail");
+ assert_st_blocks_matches_size(fd, total_size);
ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size + page_size, page_size);
TEST_ASSERT(ret, "fallocate beginning after total_size should fail");
+ assert_st_blocks_matches_size(fd, total_size);
ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
total_size, page_size);
TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) at total_size should succeed");
+ assert_st_blocks_matches_size(fd, total_size);
ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
total_size + page_size, page_size);
TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) after total_size should succeed");
+ assert_st_blocks_matches_size(fd, total_size);
ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
page_size, page_size - 1);
TEST_ASSERT(ret, "fallocate with unaligned size should fail");
+ assert_st_blocks_matches_size(fd, total_size);
ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
page_size, page_size);
TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) with aligned offset and size should succeed");
+ assert_st_blocks_matches_size(fd, total_size - page_size);
ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, page_size, page_size);
TEST_ASSERT(!ret, "fallocate to restore punched hole should succeed");
+ assert_st_blocks_matches_size(fd, total_size);
}
static void test_invalid_punch_hole(int fd, size_t total_size)
--
2.53.0.414.gf7e9f6c205-goog
^ permalink raw reply [flat|nested] 7+ messages in thread