From: Pasha Tatashin <pasha.tatashin@soleen.com>
To: pratyush@kernel.org, jasonmiu@google.com, graf@amazon.com,
changyuanl@google.com, pasha.tatashin@soleen.com,
rppt@kernel.org, dmatlack@google.com, rientjes@google.com,
corbet@lwn.net, rdunlap@infradead.org,
ilpo.jarvinen@linux.intel.com, kanie@linux.alibaba.com,
ojeda@kernel.org, aliceryhl@google.com, masahiroy@kernel.org,
akpm@linux-foundation.org, tj@kernel.org, yoann.congal@smile.fr,
mmaurer@google.com, roman.gushchin@linux.dev,
chenridong@huawei.com, axboe@kernel.dk, mark.rutland@arm.com,
jannh@google.com, vincent.guittot@linaro.org, hannes@cmpxchg.org,
dan.j.williams@intel.com, david@redhat.com,
joel.granados@kernel.org, rostedt@goodmis.org,
anna.schumaker@oracle.com, song@kernel.org,
zhangguopeng@kylinos.cn, linux@weissschuh.net,
linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org,
linux-mm@kvack.org, gregkh@linuxfoundation.org,
tglx@linutronix.de, mingo@redhat.com, bp@alien8.de,
dave.hansen@linux.intel.com, x86@kernel.org, hpa@zytor.com,
rafael@kernel.org, dakr@kernel.org,
bartosz.golaszewski@linaro.org, cw00.choi@samsung.com,
myungjoo.ham@samsung.com, yesanishhere@gmail.com,
Jonathan.Cameron@huawei.com, quic_zijuhu@quicinc.com,
aleksander.lobakin@intel.com, ira.weiny@intel.com,
andriy.shevchenko@linux.intel.com, leon@kernel.org,
lukas@wunner.de, bhelgaas@google.com, wagi@kernel.org,
djeffery@redhat.com, stuart.w.hayes@gmail.com, ptyadav@amazon.de,
lennart@poettering.net, brauner@kernel.org,
linux-api@vger.kernel.org, linux-fsdevel@vger.kernel.org,
saeedm@nvidia.com, ajayachandra@nvidia.com, jgg@nvidia.com,
parav@nvidia.com, leonro@nvidia.com, witu@nvidia.com
Subject: [PATCH v3 29/30] luo: allow preserving memfd
Date: Thu, 7 Aug 2025 01:44:35 +0000 [thread overview]
Message-ID: <20250807014442.3829950-30-pasha.tatashin@soleen.com> (raw)
In-Reply-To: <20250807014442.3829950-1-pasha.tatashin@soleen.com>
From: Pratyush Yadav <ptyadav@amazon.de>
The ability to preserve a memfd allows userspace to use KHO and LUO to
transfer its memory contents to the next kernel. This is useful in many
ways. For one, it can be used with IOMMUFD as the backing store for
IOMMU page tables. Preserving IOMMUFD is essential for performing a
hypervisor live update with passthrough devices. memfd support provides
the first building block for making that possible.
For another, applications with a large amount of memory that takes time
to reconstruct, reboots to consume kernel upgrades can be very
expensive. memfd with LUO gives those applications reboot-persistent
memory that they can use to quickly save and reconstruct that state.
While memfd is backed by either hugetlbfs or shmem, currently only
support on shmem is added. To be more precise, support for anonymous
shmem files is added.
The handover to the next kernel is not transparent. All the properties
of the file are not preserved; only its memory contents, position, and
size. The recreated file gets the UID and GID of the task doing the
restore, and the task's cgroup gets charged with the memory.
After LUO is in prepared state, the file cannot grow or shrink, and all
its pages are pinned to avoid migrations and swapping. The file can
still be read from or written to.
Co-developed-by: Changyuan Lyu <changyuanl@google.com>
Signed-off-by: Changyuan Lyu <changyuanl@google.com>
Co-developed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Signed-off-by: Pratyush Yadav <ptyadav@amazon.de>
---
MAINTAINERS | 2 +
mm/Makefile | 1 +
mm/memfd_luo.c | 507 +++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 510 insertions(+)
create mode 100644 mm/memfd_luo.c
diff --git a/MAINTAINERS b/MAINTAINERS
index b88b77977649..7421d21672f3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -14209,6 +14209,7 @@ F: tools/testing/selftests/livepatch/
LIVE UPDATE
M: Pasha Tatashin <pasha.tatashin@soleen.com>
+R: Pratyush Yadav <pratyush@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/ABI/testing/sysfs-kernel-liveupdate
@@ -14218,6 +14219,7 @@ F: Documentation/userspace-api/liveupdate.rst
F: include/linux/liveupdate.h
F: include/uapi/linux/liveupdate.h
F: kernel/liveupdate/
+F: mm/memfd_luo.c
F: tools/testing/selftests/liveupdate/
LLC (802.2)
diff --git a/mm/Makefile b/mm/Makefile
index ef54aa615d9d..0a9936ffc172 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -100,6 +100,7 @@ obj-$(CONFIG_NUMA) += memory-tiers.o
obj-$(CONFIG_DEVICE_MIGRATION) += migrate_device.o
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o
obj-$(CONFIG_PAGE_COUNTER) += page_counter.o
+obj-$(CONFIG_LIVEUPDATE) += memfd_luo.o
obj-$(CONFIG_MEMCG_V1) += memcontrol-v1.o
obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o
ifdef CONFIG_SWAP
diff --git a/mm/memfd_luo.c b/mm/memfd_luo.c
new file mode 100644
index 000000000000..0c91b40a2080
--- /dev/null
+++ b/mm/memfd_luo.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ * Changyuan Lyu <changyuanl@google.com>
+ *
+ * Copyright (C) 2025 Amazon.com Inc. or its affiliates.
+ * Pratyush Yadav <ptyadav@amazon.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/file.h>
+#include <linux/io.h>
+#include <linux/libfdt.h>
+#include <linux/liveupdate.h>
+#include <linux/kexec_handover.h>
+#include <linux/shmem_fs.h>
+#include <linux/bits.h>
+#include "internal.h"
+
+static const char memfd_luo_compatible[] = "memfd-v1";
+
+#define PRESERVED_PFN_MASK GENMASK(63, 12)
+#define PRESERVED_PFN_SHIFT 12
+#define PRESERVED_FLAG_DIRTY BIT(0)
+#define PRESERVED_FLAG_UPTODATE BIT(1)
+
+#define PRESERVED_FOLIO_PFN(desc) (((desc) & PRESERVED_PFN_MASK) >> PRESERVED_PFN_SHIFT)
+#define PRESERVED_FOLIO_FLAGS(desc) ((desc) & ~PRESERVED_PFN_MASK)
+#define PRESERVED_FOLIO_MKDESC(pfn, flags) (((pfn) << PRESERVED_PFN_SHIFT) | (flags))
+
+struct memfd_luo_preserved_folio {
+ /*
+ * The folio descriptor is made of 2 parts. The bottom 12 bits are used
+ * for storing flags, the others for storing the PFN.
+ */
+ u64 foliodesc;
+ u64 index;
+};
+
+static int memfd_luo_preserve_folios(struct memfd_luo_preserved_folio *pfolios,
+ struct folio **folios,
+ unsigned int nr_folios)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < nr_folios; i++) {
+ struct memfd_luo_preserved_folio *pfolio = &pfolios[i];
+ struct folio *folio = folios[i];
+ unsigned int flags = 0;
+ unsigned long pfn;
+
+ err = kho_preserve_folio(folio);
+ if (err)
+ goto err_unpreserve;
+
+ pfn = folio_pfn(folio);
+ if (folio_test_dirty(folio))
+ flags |= PRESERVED_FLAG_DIRTY;
+ if (folio_test_uptodate(folio))
+ flags |= PRESERVED_FLAG_UPTODATE;
+
+ pfolio->foliodesc = PRESERVED_FOLIO_MKDESC(pfn, flags);
+ pfolio->index = folio->index;
+ }
+
+ return 0;
+
+err_unpreserve:
+ i--;
+ for (; i >= 0; i--)
+ WARN_ON_ONCE(kho_unpreserve_folio(folios[i]));
+ return err;
+}
+
+static void memfd_luo_unpreserve_folios(const struct memfd_luo_preserved_folio *pfolios,
+ unsigned int nr_folios)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr_folios; i++) {
+ const struct memfd_luo_preserved_folio *pfolio = &pfolios[i];
+ struct folio *folio;
+
+ if (!pfolio->foliodesc)
+ continue;
+
+ folio = pfn_folio(PRESERVED_FOLIO_PFN(pfolio->foliodesc));
+
+ kho_unpreserve_folio(folio);
+ unpin_folio(folio);
+ }
+}
+
+static void *memfd_luo_create_fdt(unsigned long size)
+{
+ unsigned int order = get_order(size);
+ struct folio *fdt_folio;
+ int err = 0;
+ void *fdt;
+
+ if (order > MAX_PAGE_ORDER)
+ return NULL;
+
+ fdt_folio = folio_alloc(GFP_KERNEL, order);
+ if (!fdt_folio)
+ return NULL;
+
+ fdt = folio_address(fdt_folio);
+
+ err |= fdt_create(fdt, (1 << (order + PAGE_SHIFT)));
+ err |= fdt_finish_reservemap(fdt);
+ err |= fdt_begin_node(fdt, "");
+ if (err)
+ goto free;
+
+ return fdt;
+
+free:
+ folio_put(fdt_folio);
+ return NULL;
+}
+
+static int memfd_luo_finish_fdt(void *fdt)
+{
+ int err;
+
+ err = fdt_end_node(fdt);
+ if (err)
+ return err;
+
+ return fdt_finish(fdt);
+}
+
+static int memfd_luo_prepare(struct liveupdate_file_handler *handler,
+ struct file *file, u64 *data)
+{
+ struct memfd_luo_preserved_folio *preserved_folios;
+ struct inode *inode = file_inode(file);
+ unsigned int max_folios, nr_folios = 0;
+ int err = 0, preserved_size;
+ struct folio **folios;
+ long size, nr_pinned;
+ pgoff_t offset;
+ void *fdt;
+ u64 pos;
+
+ if (WARN_ON_ONCE(!shmem_file(file)))
+ return -EINVAL;
+
+ inode_lock(inode);
+ shmem_i_mapping_freeze(inode, true);
+
+ size = i_size_read(inode);
+ if ((PAGE_ALIGN(size) / PAGE_SIZE) > UINT_MAX) {
+ err = -E2BIG;
+ goto err_unlock;
+ }
+
+ /*
+ * Guess the number of folios based on inode size. Real number might end
+ * up being smaller if there are higher order folios.
+ */
+ max_folios = PAGE_ALIGN(size) / PAGE_SIZE;
+ folios = kvmalloc_array(max_folios, sizeof(*folios), GFP_KERNEL);
+ if (!folios) {
+ err = -ENOMEM;
+ goto err_unfreeze;
+ }
+
+ /*
+ * Pin the folios so they don't move around behind our back. This also
+ * ensures none of the folios are in CMA -- which ensures they don't
+ * fall in KHO scratch memory. It also moves swapped out folios back to
+ * memory.
+ *
+ * A side effect of doing this is that it allocates a folio for all
+ * indices in the file. This might waste memory on sparse memfds. If
+ * that is really a problem in the future, we can have a
+ * memfd_pin_folios() variant that does not allocate a page on empty
+ * slots.
+ */
+ nr_pinned = memfd_pin_folios(file, 0, size - 1, folios, max_folios,
+ &offset);
+ if (nr_pinned < 0) {
+ err = nr_pinned;
+ pr_err("failed to pin folios: %d\n", err);
+ goto err_free_folios;
+ }
+ /* nr_pinned won't be more than max_folios which is also unsigned int. */
+ nr_folios = (unsigned int)nr_pinned;
+
+ preserved_size = sizeof(struct memfd_luo_preserved_folio) * nr_folios;
+ if (check_mul_overflow(sizeof(struct memfd_luo_preserved_folio),
+ nr_folios, &preserved_size)) {
+ err = -E2BIG;
+ goto err_unpin;
+ }
+
+ /*
+ * Most of the space should be taken by preserved folios. So take its
+ * size, plus a page for other properties.
+ */
+ fdt = memfd_luo_create_fdt(PAGE_ALIGN(preserved_size) + PAGE_SIZE);
+ if (!fdt) {
+ err = -ENOMEM;
+ goto err_unpin;
+ }
+
+ pos = file->f_pos;
+ err = fdt_property(fdt, "pos", &pos, sizeof(pos));
+ if (err)
+ goto err_free_fdt;
+
+ err = fdt_property(fdt, "size", &size, sizeof(size));
+ if (err)
+ goto err_free_fdt;
+
+ err = fdt_property_placeholder(fdt, "folios", preserved_size,
+ (void **)&preserved_folios);
+ if (err) {
+ pr_err("Failed to reserve folios property in FDT: %s\n",
+ fdt_strerror(err));
+ err = -ENOMEM;
+ goto err_free_fdt;
+ }
+
+ err = memfd_luo_preserve_folios(preserved_folios, folios, nr_folios);
+ if (err)
+ goto err_free_fdt;
+
+ err = memfd_luo_finish_fdt(fdt);
+ if (err)
+ goto err_unpreserve;
+
+ err = kho_preserve_folio(virt_to_folio(fdt));
+ if (err)
+ goto err_unpreserve;
+
+ kvfree(folios);
+ inode_unlock(inode);
+
+ *data = virt_to_phys(fdt);
+ return 0;
+
+err_unpreserve:
+ memfd_luo_unpreserve_folios(preserved_folios, nr_folios);
+err_free_fdt:
+ folio_put(virt_to_folio(fdt));
+err_unpin:
+ unpin_folios(folios, nr_pinned);
+err_free_folios:
+ kvfree(folios);
+err_unfreeze:
+ shmem_i_mapping_freeze(inode, false);
+err_unlock:
+ inode_unlock(inode);
+ return err;
+}
+
+static int memfd_luo_freeze(struct liveupdate_file_handler *handler,
+ struct file *file, u64 *data)
+{
+ u64 pos = file->f_pos;
+ void *fdt;
+ int err;
+
+ if (WARN_ON_ONCE(!*data))
+ return -EINVAL;
+
+ fdt = phys_to_virt(*data);
+
+ /*
+ * The pos or size might have changed since prepare. Everything else
+ * stays the same.
+ */
+ err = fdt_setprop(fdt, 0, "pos", &pos, sizeof(pos));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void memfd_luo_cancel(struct liveupdate_file_handler *handler,
+ struct file *file, u64 data)
+{
+ const struct memfd_luo_preserved_folio *pfolios;
+ struct inode *inode = file_inode(file);
+ struct folio *fdt_folio;
+ void *fdt;
+ int len;
+
+ if (WARN_ON_ONCE(!data))
+ return;
+
+ inode_lock(inode);
+ shmem_i_mapping_freeze(inode, false);
+
+ fdt = phys_to_virt(data);
+ fdt_folio = virt_to_folio(fdt);
+ pfolios = fdt_getprop(fdt, 0, "folios", &len);
+ if (pfolios)
+ memfd_luo_unpreserve_folios(pfolios, len / sizeof(*pfolios));
+
+ kho_unpreserve_folio(fdt_folio);
+ folio_put(fdt_folio);
+ inode_unlock(inode);
+}
+
+static struct folio *memfd_luo_get_fdt(u64 data)
+{
+ return kho_restore_folio((phys_addr_t)data);
+}
+
+static void memfd_luo_finish(struct liveupdate_file_handler *handler,
+ struct file *file, u64 data, bool reclaimed)
+{
+ const struct memfd_luo_preserved_folio *pfolios;
+ struct folio *fdt_folio;
+ int len;
+
+ if (reclaimed)
+ return;
+
+ fdt_folio = memfd_luo_get_fdt(data);
+
+ pfolios = fdt_getprop(folio_address(fdt_folio), 0, "folios", &len);
+ if (pfolios)
+ memfd_luo_unpreserve_folios(pfolios, len / sizeof(*pfolios));
+
+ folio_put(fdt_folio);
+}
+
+static int memfd_luo_retrieve(struct liveupdate_file_handler *handler, u64 data,
+ struct file **file_p)
+{
+ const struct memfd_luo_preserved_folio *pfolios;
+ int nr_pfolios, len, ret = 0, i = 0;
+ struct address_space *mapping;
+ struct folio *folio, *fdt_folio;
+ const u64 *pos, *size;
+ struct inode *inode;
+ struct file *file;
+ const void *fdt;
+
+ fdt_folio = memfd_luo_get_fdt(data);
+ if (!fdt_folio)
+ return -ENOENT;
+
+ fdt = page_to_virt(folio_page(fdt_folio, 0));
+
+ pfolios = fdt_getprop(fdt, 0, "folios", &len);
+ if (!pfolios || len % sizeof(*pfolios)) {
+ pr_err("invalid 'folios' property\n");
+ ret = -EINVAL;
+ goto put_fdt;
+ }
+ nr_pfolios = len / sizeof(*pfolios);
+
+ size = fdt_getprop(fdt, 0, "size", &len);
+ if (!size || len != sizeof(u64)) {
+ pr_err("invalid 'size' property\n");
+ ret = -EINVAL;
+ goto put_folios;
+ }
+
+ pos = fdt_getprop(fdt, 0, "pos", &len);
+ if (!pos || len != sizeof(u64)) {
+ pr_err("invalid 'pos' property\n");
+ ret = -EINVAL;
+ goto put_folios;
+ }
+
+ file = shmem_file_setup("", 0, VM_NORESERVE);
+
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ pr_err("failed to setup file: %d\n", ret);
+ goto put_folios;
+ }
+
+ inode = file->f_inode;
+ mapping = inode->i_mapping;
+ vfs_setpos(file, *pos, MAX_LFS_FILESIZE);
+
+ for (; i < nr_pfolios; i++) {
+ const struct memfd_luo_preserved_folio *pfolio = &pfolios[i];
+ phys_addr_t phys;
+ u64 index;
+ int flags;
+
+ if (!pfolio->foliodesc)
+ continue;
+
+ phys = PFN_PHYS(PRESERVED_FOLIO_PFN(pfolio->foliodesc));
+ folio = kho_restore_folio(phys);
+ if (!folio) {
+ pr_err("Unable to restore folio at physical address: %llx\n",
+ phys);
+ goto put_file;
+ }
+ index = pfolio->index;
+ flags = PRESERVED_FOLIO_FLAGS(pfolio->foliodesc);
+
+ /* Set up the folio for insertion. */
+ /*
+ * TODO: Should find a way to unify this and
+ * shmem_alloc_and_add_folio().
+ */
+ __folio_set_locked(folio);
+ __folio_set_swapbacked(folio);
+
+ ret = mem_cgroup_charge(folio, NULL, mapping_gfp_mask(mapping));
+ if (ret) {
+ pr_err("shmem: failed to charge folio index %d: %d\n",
+ i, ret);
+ goto unlock_folio;
+ }
+
+ ret = shmem_add_to_page_cache(folio, mapping, index, NULL,
+ mapping_gfp_mask(mapping));
+ if (ret) {
+ pr_err("shmem: failed to add to page cache folio index %d: %d\n",
+ i, ret);
+ goto unlock_folio;
+ }
+
+ if (flags & PRESERVED_FLAG_UPTODATE)
+ folio_mark_uptodate(folio);
+ if (flags & PRESERVED_FLAG_DIRTY)
+ folio_mark_dirty(folio);
+
+ ret = shmem_inode_acct_blocks(inode, 1);
+ if (ret) {
+ pr_err("shmem: failed to account folio index %d: %d\n",
+ i, ret);
+ goto unlock_folio;
+ }
+
+ shmem_recalc_inode(inode, 1, 0);
+ folio_add_lru(folio);
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+
+ inode->i_size = *size;
+ *file_p = file;
+ folio_put(fdt_folio);
+ return 0;
+
+unlock_folio:
+ folio_unlock(folio);
+ folio_put(folio);
+put_file:
+ fput(file);
+ i++;
+put_folios:
+ for (; i < nr_pfolios; i++) {
+ const struct memfd_luo_preserved_folio *pfolio = &pfolios[i];
+
+ folio = kho_restore_folio(PRESERVED_FOLIO_PFN(pfolio->foliodesc));
+ if (folio)
+ folio_put(folio);
+ }
+
+put_fdt:
+ folio_put(fdt_folio);
+ return ret;
+}
+
+static bool memfd_luo_can_preserve(struct liveupdate_file_handler *handler,
+ struct file *file)
+{
+ struct inode *inode = file_inode(file);
+
+ return shmem_file(file) && !inode->i_nlink;
+}
+
+static const struct liveupdate_file_ops memfd_luo_file_ops = {
+ .prepare = memfd_luo_prepare,
+ .freeze = memfd_luo_freeze,
+ .cancel = memfd_luo_cancel,
+ .finish = memfd_luo_finish,
+ .retrieve = memfd_luo_retrieve,
+ .can_preserve = memfd_luo_can_preserve,
+ .owner = THIS_MODULE,
+};
+
+static struct liveupdate_file_handler memfd_luo_handler = {
+ .ops = &memfd_luo_file_ops,
+ .compatible = memfd_luo_compatible,
+};
+
+static int __init memfd_luo_init(void)
+{
+ int err;
+
+ err = liveupdate_register_file_handler(&memfd_luo_handler);
+ if (err)
+ pr_err("Could not register luo filesystem handler: %d\n", err);
+
+ return err;
+}
+late_initcall(memfd_luo_init);
--
2.50.1.565.gc32cd1483b-goog
next prev parent reply other threads:[~2025-08-07 1:46 UTC|newest]
Thread overview: 147+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-07 1:44 [PATCH v3 00/30] Live Update Orchestrator Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 01/30] kho: init new_physxa->phys_bits to fix lockdep Pasha Tatashin
2025-08-08 11:42 ` Pratyush Yadav
2025-08-08 11:52 ` Pratyush Yadav
2025-08-08 14:00 ` Pasha Tatashin
2025-08-08 19:06 ` Andrew Morton
2025-08-08 19:51 ` Pasha Tatashin
2025-08-08 20:19 ` Pasha Tatashin
2025-08-14 13:11 ` Jason Gunthorpe
2025-08-14 14:57 ` Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 02/30] kho: mm: Don't allow deferred struct page with KHO Pasha Tatashin
2025-08-08 11:47 ` Pratyush Yadav
2025-08-08 14:01 ` Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 03/30] kho: warn if KHO is disabled due to an error Pasha Tatashin
2025-08-08 11:48 ` Pratyush Yadav
2025-08-07 1:44 ` [PATCH v3 04/30] kho: allow to drive kho from within kernel Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 05/30] kho: make debugfs interface optional Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 06/30] kho: drop notifiers Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 07/30] kho: add interfaces to unpreserve folios and physical memory ranges Pasha Tatashin
2025-08-14 13:22 ` Jason Gunthorpe
2025-08-14 15:05 ` Pasha Tatashin
2025-08-14 17:01 ` Jason Gunthorpe
2025-08-15 9:12 ` Mike Rapoport
2025-08-18 13:55 ` Jason Gunthorpe
2025-09-21 22:20 ` Pasha Tatashin
2025-09-25 5:26 ` Mike Rapoport
2025-08-07 1:44 ` [PATCH v3 08/30] kho: don't unpreserve memory during abort Pasha Tatashin
2025-08-14 13:30 ` Jason Gunthorpe
2025-09-22 14:57 ` Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 09/30] liveupdate: kho: move to kernel/liveupdate Pasha Tatashin
2025-08-30 8:35 ` Mike Rapoport
2025-09-22 14:54 ` Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 10/30] liveupdate: luo_core: luo_ioctl: Live Update Orchestrator Pasha Tatashin
2025-08-14 13:31 ` Jason Gunthorpe
2025-09-22 15:00 ` Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 11/30] liveupdate: luo_core: integrate with KHO Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 12/30] liveupdate: luo_subsystems: add subsystem registration Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 13/30] liveupdate: luo_subsystems: implement subsystem callbacks Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 14/30] liveupdate: luo_files: add infrastructure for FDs Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 15/30] liveupdate: luo_files: implement file systems callbacks Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 16/30] liveupdate: luo_ioctl: add userpsace interface Pasha Tatashin
2025-08-14 13:49 ` Jason Gunthorpe
2025-09-22 21:09 ` Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 17/30] liveupdate: luo_files: luo_ioctl: Unregister all FDs on device close Pasha Tatashin
2025-08-27 15:34 ` Pratyush Yadav
2025-09-22 21:23 ` Pasha Tatashin
2025-09-23 13:13 ` Pratyush Yadav
2025-08-07 1:44 ` [PATCH v3 18/30] liveupdate: luo_files: luo_ioctl: Add ioctls for per-file state management Pasha Tatashin
2025-08-14 14:02 ` Jason Gunthorpe
2025-09-22 23:17 ` Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 19/30] liveupdate: luo_sysfs: add sysfs state monitoring Pasha Tatashin
2025-08-26 16:03 ` Jason Gunthorpe
2025-08-26 18:58 ` Pasha Tatashin
2025-10-09 1:07 ` yanjun.zhu
2025-10-09 5:20 ` Greg KH
2025-10-09 10:58 ` Pratyush Yadav
2025-10-09 12:01 ` Pasha Tatashin
2025-10-09 14:50 ` Jason Gunthorpe
2025-10-09 15:34 ` Zhu Yanjun
2025-10-09 17:04 ` Pasha Tatashin
2025-10-09 17:30 ` Yanjun.Zhu
2025-10-09 17:56 ` Yanjun.Zhu
2025-10-09 23:12 ` Pratyush Yadav
2025-10-10 6:39 ` Greg KH
2025-08-07 1:44 ` [PATCH v3 20/30] reboot: call liveupdate_reboot() before kexec Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 21/30] kho: move kho debugfs directory to liveupdate Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 22/30] liveupdate: add selftests for subsystems un/registration Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 23/30] selftests/liveupdate: add subsystem/state tests Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 24/30] docs: add luo documentation Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 25/30] MAINTAINERS: add liveupdate entry Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 26/30] mm: shmem: use SHMEM_F_* flags instead of VM_* flags Pasha Tatashin
2025-08-11 23:11 ` Vipin Sharma
2025-08-13 12:42 ` Pratyush Yadav
2025-08-07 1:44 ` [PATCH v3 27/30] mm: shmem: allow freezing inode mapping Pasha Tatashin
2025-08-07 1:44 ` [PATCH v3 28/30] mm: shmem: export some functions to internal.h Pasha Tatashin
2025-08-07 1:44 ` Pasha Tatashin [this message]
2025-08-08 20:22 ` [PATCH v3 29/30] luo: allow preserving memfd Pasha Tatashin
2025-08-13 12:44 ` Pratyush Yadav
2025-08-13 6:34 ` Vipin Sharma
2025-08-13 7:09 ` Greg KH
2025-08-13 12:02 ` Pratyush Yadav
2025-08-13 12:14 ` Greg KH
2025-08-13 12:41 ` Jason Gunthorpe
2025-08-13 13:00 ` Greg KH
2025-08-13 13:37 ` Pratyush Yadav
2025-08-13 13:41 ` Pasha Tatashin
2025-08-13 13:53 ` Greg KH
2025-08-13 13:53 ` Greg KH
2025-08-13 20:03 ` Jason Gunthorpe
2025-08-13 13:31 ` Pratyush Yadav
2025-08-13 12:29 ` Pratyush Yadav
2025-08-13 13:49 ` Pasha Tatashin
2025-08-13 13:55 ` Pratyush Yadav
2025-08-26 16:20 ` Jason Gunthorpe
2025-08-27 15:03 ` Pratyush Yadav
2025-08-28 12:43 ` Jason Gunthorpe
2025-08-28 23:00 ` Chris Li
2025-09-01 17:10 ` Pratyush Yadav
2025-09-02 13:48 ` Jason Gunthorpe
2025-09-03 14:10 ` Pratyush Yadav
2025-09-03 15:01 ` Jason Gunthorpe
2025-09-04 12:57 ` Pratyush Yadav
2025-09-04 14:42 ` Jason Gunthorpe
2025-09-09 14:53 ` Pratyush Yadav
2025-09-09 15:40 ` Pasha Tatashin
2025-09-09 15:54 ` Jason Gunthorpe
2025-09-09 16:30 ` Pasha Tatashin
2025-09-09 16:57 ` Jason Gunthorpe
2025-09-09 17:27 ` Pasha Tatashin
2025-09-09 15:56 ` Pratyush Yadav
2025-09-09 16:25 ` Pasha Tatashin
2025-08-28 7:14 ` Mike Rapoport
2025-08-29 18:47 ` Chris Li
2025-08-29 19:18 ` Chris Li
2025-09-02 13:41 ` Jason Gunthorpe
2025-09-03 12:01 ` Chris Li
2025-09-04 17:34 ` Jason Gunthorpe
2025-09-09 14:48 ` Pratyush Yadav
2025-09-01 16:23 ` Mike Rapoport
2025-09-01 16:54 ` Pasha Tatashin
2025-09-01 17:21 ` Pratyush Yadav
2025-09-01 19:02 ` Pasha Tatashin
2025-09-02 11:38 ` Jason Gunthorpe
2025-09-03 15:59 ` Pasha Tatashin
2025-09-03 16:40 ` Jason Gunthorpe
2025-09-03 19:29 ` Mike Rapoport
2025-09-02 11:58 ` Mike Rapoport
2025-09-01 17:01 ` Pratyush Yadav
2025-09-02 11:44 ` Mike Rapoport
2025-09-03 14:17 ` Pratyush Yadav
2025-09-03 19:39 ` Mike Rapoport
2025-09-04 12:39 ` Pratyush Yadav
2025-08-07 1:44 ` [PATCH v3 30/30] docs: add documentation for memfd preservation via LUO Pasha Tatashin
2025-08-08 12:07 ` [PATCH v3 00/30] Live Update Orchestrator David Hildenbrand
2025-08-08 12:24 ` Pratyush Yadav
2025-08-08 13:53 ` Pasha Tatashin
2025-08-08 13:52 ` Pasha Tatashin
2025-08-26 13:16 ` Pratyush Yadav
2025-08-26 13:54 ` Pasha Tatashin
2025-08-26 14:24 ` Jason Gunthorpe
2025-08-26 15:02 ` Pasha Tatashin
2025-08-26 15:13 ` Jason Gunthorpe
2025-08-26 16:10 ` Pasha Tatashin
2025-08-26 16:22 ` Jason Gunthorpe
2025-08-26 17:03 ` Pasha Tatashin
2025-08-26 17:08 ` Jason Gunthorpe
2025-08-27 14:01 ` Pratyush Yadav
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250807014442.3829950-30-pasha.tatashin@soleen.com \
--to=pasha.tatashin@soleen.com \
--cc=Jonathan.Cameron@huawei.com \
--cc=ajayachandra@nvidia.com \
--cc=akpm@linux-foundation.org \
--cc=aleksander.lobakin@intel.com \
--cc=aliceryhl@google.com \
--cc=andriy.shevchenko@linux.intel.com \
--cc=anna.schumaker@oracle.com \
--cc=axboe@kernel.dk \
--cc=bartosz.golaszewski@linaro.org \
--cc=bhelgaas@google.com \
--cc=bp@alien8.de \
--cc=brauner@kernel.org \
--cc=changyuanl@google.com \
--cc=chenridong@huawei.com \
--cc=corbet@lwn.net \
--cc=cw00.choi@samsung.com \
--cc=dakr@kernel.org \
--cc=dan.j.williams@intel.com \
--cc=dave.hansen@linux.intel.com \
--cc=david@redhat.com \
--cc=djeffery@redhat.com \
--cc=dmatlack@google.com \
--cc=graf@amazon.com \
--cc=gregkh@linuxfoundation.org \
--cc=hannes@cmpxchg.org \
--cc=hpa@zytor.com \
--cc=ilpo.jarvinen@linux.intel.com \
--cc=ira.weiny@intel.com \
--cc=jannh@google.com \
--cc=jasonmiu@google.com \
--cc=jgg@nvidia.com \
--cc=joel.granados@kernel.org \
--cc=kanie@linux.alibaba.com \
--cc=lennart@poettering.net \
--cc=leon@kernel.org \
--cc=leonro@nvidia.com \
--cc=linux-api@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux@weissschuh.net \
--cc=lukas@wunner.de \
--cc=mark.rutland@arm.com \
--cc=masahiroy@kernel.org \
--cc=mingo@redhat.com \
--cc=mmaurer@google.com \
--cc=myungjoo.ham@samsung.com \
--cc=ojeda@kernel.org \
--cc=parav@nvidia.com \
--cc=pratyush@kernel.org \
--cc=ptyadav@amazon.de \
--cc=quic_zijuhu@quicinc.com \
--cc=rafael@kernel.org \
--cc=rdunlap@infradead.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=rostedt@goodmis.org \
--cc=rppt@kernel.org \
--cc=saeedm@nvidia.com \
--cc=song@kernel.org \
--cc=stuart.w.hayes@gmail.com \
--cc=tglx@linutronix.de \
--cc=tj@kernel.org \
--cc=vincent.guittot@linaro.org \
--cc=wagi@kernel.org \
--cc=witu@nvidia.com \
--cc=x86@kernel.org \
--cc=yesanishhere@gmail.com \
--cc=yoann.congal@smile.fr \
--cc=zhangguopeng@kylinos.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox