From: Junaid Shahid <junaids@google.com>
To: linux-kernel@vger.kernel.org
Cc: kvm@vger.kernel.org, pbonzini@redhat.com, jmattson@google.com,
pjt@google.com, oweisse@google.com,
alexandre.chartre@oracle.com, rppt@linux.ibm.com,
dave.hansen@linux.intel.com, peterz@infradead.org,
tglx@linutronix.de, luto@kernel.org, linux-mm@kvack.org
Subject: [RFC PATCH 30/47] mm: asi: Add API for mapping userspace address ranges
Date: Tue, 22 Feb 2022 21:22:06 -0800 [thread overview]
Message-ID: <20220223052223.1202152-31-junaids@google.com> (raw)
In-Reply-To: <20220223052223.1202152-1-junaids@google.com>
asi_map_user()/asi_unmap_user() can be used to map userspace address
ranges for ASI classes that do not specify ASI_MAP_ALL_USERSPACE.
In addition, another structure, asi_pgtbl_pool, allows for
pre-allocating a set of pages to avoid having to allocate memory
for page tables within asi_map_user(), which makes it easier to use
that function while holding locks.
Signed-off-by: Junaid Shahid <junaids@google.com>
---
arch/x86/include/asm/asi.h | 19 +++
arch/x86/mm/asi.c | 252 ++++++++++++++++++++++++++++++++++---
include/asm-generic/asi.h | 21 ++++
include/linux/mm_types.h | 2 +-
4 files changed, 275 insertions(+), 19 deletions(-)
diff --git a/arch/x86/include/asm/asi.h b/arch/x86/include/asm/asi.h
index 35421356584b..bdb2f70d4f85 100644
--- a/arch/x86/include/asm/asi.h
+++ b/arch/x86/include/asm/asi.h
@@ -44,6 +44,12 @@ struct asi {
atomic64_t *tlb_gen;
atomic64_t __tlb_gen;
int64_t asi_ref_count;
+ rwlock_t user_map_lock;
+};
+
+struct asi_pgtbl_pool {
+ struct page *pgtbl_list;
+ uint count;
};
DECLARE_PER_CPU_ALIGNED(struct asi_state, asi_cpu_state);
@@ -74,6 +80,19 @@ void asi_do_lazy_map(struct asi *asi, size_t addr);
void asi_clear_user_pgd(struct mm_struct *mm, size_t addr);
void asi_clear_user_p4d(struct mm_struct *mm, size_t addr);
+int asi_map_user(struct asi *asi, void *addr, size_t len,
+ struct asi_pgtbl_pool *pool,
+ size_t allowed_start, size_t allowed_end);
+void asi_unmap_user(struct asi *asi, void *va, size_t len);
+int asi_fill_pgtbl_pool(struct asi_pgtbl_pool *pool, uint count, gfp_t flags);
+void asi_clear_pgtbl_pool(struct asi_pgtbl_pool *pool);
+
+static inline void asi_init_pgtbl_pool(struct asi_pgtbl_pool *pool)
+{
+ pool->pgtbl_list = NULL;
+ pool->count = 0;
+}
+
static inline void asi_init_thread_state(struct thread_struct *thread)
{
thread->intr_nest_depth = 0;
diff --git a/arch/x86/mm/asi.c b/arch/x86/mm/asi.c
index 29c74b6d4262..9b1bd005f343 100644
--- a/arch/x86/mm/asi.c
+++ b/arch/x86/mm/asi.c
@@ -86,6 +86,55 @@ void asi_unregister_class(int index)
}
EXPORT_SYMBOL_GPL(asi_unregister_class);
+static ulong get_pgtbl_from_pool(struct asi_pgtbl_pool *pool)
+{
+ struct page *pgtbl;
+
+ if (pool->count == 0)
+ return 0;
+
+ pgtbl = pool->pgtbl_list;
+ pool->pgtbl_list = pgtbl->asi_pgtbl_pool_next;
+ pgtbl->asi_pgtbl_pool_next = NULL;
+ pool->count--;
+
+ return (ulong)page_address(pgtbl);
+}
+
+static void return_pgtbl_to_pool(struct asi_pgtbl_pool *pool, ulong virt)
+{
+ struct page *pgtbl = virt_to_page(virt);
+
+ pgtbl->asi_pgtbl_pool_next = pool->pgtbl_list;
+ pool->pgtbl_list = pgtbl;
+ pool->count++;
+}
+
+int asi_fill_pgtbl_pool(struct asi_pgtbl_pool *pool, uint count, gfp_t flags)
+{
+ if (!static_cpu_has(X86_FEATURE_ASI))
+ return 0;
+
+ while (pool->count < count) {
+ ulong pgtbl = get_zeroed_page(flags);
+
+ if (!pgtbl)
+ return -ENOMEM;
+
+ return_pgtbl_to_pool(pool, pgtbl);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asi_fill_pgtbl_pool);
+
+void asi_clear_pgtbl_pool(struct asi_pgtbl_pool *pool)
+{
+ while (pool->count > 0)
+ free_page(get_pgtbl_from_pool(pool));
+}
+EXPORT_SYMBOL_GPL(asi_clear_pgtbl_pool);
+
static void asi_clone_pgd(pgd_t *dst_table, pgd_t *src_table, size_t addr)
{
pgd_t *src = pgd_offset_pgd(src_table, addr);
@@ -110,10 +159,12 @@ static void asi_clone_pgd(pgd_t *dst_table, pgd_t *src_table, size_t addr)
#define DEFINE_ASI_PGTBL_ALLOC(base, level) \
static level##_t * asi_##level##_alloc(struct asi *asi, \
base##_t *base, ulong addr, \
- gfp_t flags) \
+ gfp_t flags, \
+ struct asi_pgtbl_pool *pool) \
{ \
if (unlikely(base##_none(*base))) { \
- ulong pgtbl = get_zeroed_page(flags); \
+ ulong pgtbl = pool ? get_pgtbl_from_pool(pool) \
+ : get_zeroed_page(flags); \
phys_addr_t pgtbl_pa; \
\
if (pgtbl == 0) \
@@ -127,7 +178,10 @@ static level##_t * asi_##level##_alloc(struct asi *asi, \
mm_inc_nr_##level##s(asi->mm); \
} else { \
paravirt_release_##level(PHYS_PFN(pgtbl_pa)); \
- free_page(pgtbl); \
+ if (pool) \
+ return_pgtbl_to_pool(pool, pgtbl); \
+ else \
+ free_page(pgtbl); \
} \
\
/* NOP on native. PV call on Xen. */ \
@@ -336,6 +390,7 @@ int asi_init(struct mm_struct *mm, int asi_index, struct asi **out_asi)
asi->class = &asi_class[asi_index];
asi->mm = mm;
asi->pcid_index = asi_index;
+ rwlock_init(&asi->user_map_lock);
if (asi->class->flags & ASI_MAP_STANDARD_NONSENSITIVE) {
uint i;
@@ -650,11 +705,6 @@ static bool follow_physaddr(struct mm_struct *mm, size_t virt,
/*
* Map the given range into the ASI page tables. The source of the mapping
* is the regular unrestricted page tables.
- * Can be used to map any kernel memory.
- *
- * The caller MUST ensure that the source mapping will not change during this
- * function. For dynamic kernel memory, this is generally ensured by mapping
- * the memory within the allocator.
*
* If the source mapping is a large page and the range being mapped spans the
* entire large page, then it will be mapped as a large page in the ASI page
@@ -664,19 +714,17 @@ static bool follow_physaddr(struct mm_struct *mm, size_t virt,
* destination page, but that should be ok for now, as usually in such cases,
* the range would consist of a small-ish number of pages.
*/
-int asi_map_gfp(struct asi *asi, void *addr, size_t len, gfp_t gfp_flags)
+int __asi_map(struct asi *asi, size_t start, size_t end, gfp_t gfp_flags,
+ struct asi_pgtbl_pool *pool,
+ size_t allowed_start, size_t allowed_end)
{
size_t virt;
- size_t start = (size_t)addr;
- size_t end = start + len;
size_t page_size;
- if (!static_cpu_has(X86_FEATURE_ASI) || !asi)
- return 0;
-
VM_BUG_ON(start & ~PAGE_MASK);
- VM_BUG_ON(len & ~PAGE_MASK);
- VM_BUG_ON(start < TASK_SIZE_MAX);
+ VM_BUG_ON(end & ~PAGE_MASK);
+ VM_BUG_ON(end > allowed_end);
+ VM_BUG_ON(start < allowed_start);
gfp_flags &= GFP_RECLAIM_MASK;
@@ -702,14 +750,15 @@ int asi_map_gfp(struct asi *asi, void *addr, size_t len, gfp_t gfp_flags)
continue; \
} \
\
- level = asi_##level##_alloc(asi, base, virt, gfp_flags);\
+ level = asi_##level##_alloc(asi, base, virt, \
+ gfp_flags, pool); \
if (!level) \
return -ENOMEM; \
\
if (page_size >= LEVEL##_SIZE && \
(level##_none(*level) || level##_leaf(*level)) && \
is_page_within_range(virt, LEVEL##_SIZE, \
- start, end)) { \
+ allowed_start, allowed_end)) {\
page_size = LEVEL##_SIZE; \
phys &= LEVEL##_MASK; \
\
@@ -737,6 +786,26 @@ int asi_map_gfp(struct asi *asi, void *addr, size_t len, gfp_t gfp_flags)
return 0;
}
+/*
+ * Maps the given kernel address range into the ASI page tables.
+ *
+ * The caller MUST ensure that the source mapping will not change during this
+ * function. For dynamic kernel memory, this is generally ensured by mapping
+ * the memory within the allocator.
+ */
+int asi_map_gfp(struct asi *asi, void *addr, size_t len, gfp_t gfp_flags)
+{
+ size_t start = (size_t)addr;
+ size_t end = start + len;
+
+ if (!static_cpu_has(X86_FEATURE_ASI) || !asi)
+ return 0;
+
+ VM_BUG_ON(start < TASK_SIZE_MAX);
+
+ return __asi_map(asi, start, end, gfp_flags, NULL, start, end);
+}
+
int asi_map(struct asi *asi, void *addr, size_t len)
{
return asi_map_gfp(asi, addr, len, GFP_KERNEL);
@@ -935,3 +1004,150 @@ void asi_clear_user_p4d(struct mm_struct *mm, size_t addr)
if (!pgtable_l5_enabled())
__asi_clear_user_pgd(mm, addr);
}
+
+/*
+ * Maps the given userspace address range into the ASI page tables.
+ *
+ * The caller MUST ensure that the source mapping will not change during this
+ * function e.g. by synchronizing via MMU notifiers or acquiring the
+ * appropriate locks.
+ */
+int asi_map_user(struct asi *asi, void *addr, size_t len,
+ struct asi_pgtbl_pool *pool,
+ size_t allowed_start, size_t allowed_end)
+{
+ int err;
+ size_t start = (size_t)addr;
+ size_t end = start + len;
+
+ if (!static_cpu_has(X86_FEATURE_ASI) || !asi)
+ return 0;
+
+ VM_BUG_ON(end > TASK_SIZE_MAX);
+
+ read_lock(&asi->user_map_lock);
+ err = __asi_map(asi, start, end, GFP_NOWAIT, pool,
+ allowed_start, allowed_end);
+ read_unlock(&asi->user_map_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(asi_map_user);
+
+static bool
+asi_unmap_free_pte_range(struct asi_pgtbl_pool *pgtbls_to_free,
+ pte_t *pte, size_t addr, size_t end)
+{
+ do {
+ pte_clear(NULL, addr, pte);
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+
+ return true;
+}
+
+#define DEFINE_ASI_UNMAP_FREE_RANGE(level, LEVEL, next_level, NEXT_LVL_SIZE) \
+static bool \
+asi_unmap_free_##level##_range(struct asi_pgtbl_pool *pgtbls_to_free, \
+ level##_t *level, size_t addr, size_t end) \
+{ \
+ bool unmapped = false; \
+ size_t next; \
+ \
+ do { \
+ next = level##_addr_end(addr, end); \
+ if (level##_none(*level)) \
+ continue; \
+ \
+ if (IS_ALIGNED(addr, LEVEL##_SIZE) && \
+ IS_ALIGNED(next, LEVEL##_SIZE)) { \
+ if (!level##_large(*level)) { \
+ ulong pgtbl = level##_page_vaddr(*level); \
+ struct page *page = virt_to_page(pgtbl); \
+ \
+ page->private = PG_LEVEL_##NEXT_LVL_SIZE; \
+ return_pgtbl_to_pool(pgtbls_to_free, pgtbl); \
+ } \
+ level##_clear(level); \
+ unmapped = true; \
+ } else { \
+ /* \
+ * At this time, we don't have a case where we need to \
+ * unmap a subset of a huge page. But that could arise \
+ * in the future. In that case, we'll need to split \
+ * the huge mapping here. \
+ */ \
+ if (WARN_ON(level##_large(*level))) \
+ continue; \
+ \
+ unmapped |= asi_unmap_free_##next_level##_range( \
+ pgtbls_to_free, \
+ next_level##_offset(level, addr), \
+ addr, next); \
+ } \
+ } while (level++, addr = next, addr != end); \
+ \
+ return unmapped; \
+}
+
+DEFINE_ASI_UNMAP_FREE_RANGE(pmd, PMD, pte, 4K)
+DEFINE_ASI_UNMAP_FREE_RANGE(pud, PUD, pmd, 2M)
+DEFINE_ASI_UNMAP_FREE_RANGE(p4d, P4D, pud, 1G)
+DEFINE_ASI_UNMAP_FREE_RANGE(pgd, PGDIR, p4d, 512G)
+
+static bool asi_unmap_and_free_range(struct asi_pgtbl_pool *pgtbls_to_free,
+ struct asi *asi, size_t addr, size_t end)
+{
+ size_t next;
+ bool unmapped = false;
+ pgd_t *pgd = pgd_offset_pgd(asi->pgd, addr);
+
+ BUILD_BUG_ON((void *)&((struct page *)NULL)->private ==
+ (void *)&((struct page *)NULL)->asi_pgtbl_pool_next);
+
+ if (pgtable_l5_enabled())
+ return asi_unmap_free_pgd_range(pgtbls_to_free, pgd, addr, end);
+
+ do {
+ next = pgd_addr_end(addr, end);
+ unmapped |= asi_unmap_free_p4d_range(pgtbls_to_free,
+ p4d_offset(pgd, addr),
+ addr, next);
+ } while (pgd++, addr = next, addr != end);
+
+ return unmapped;
+}
+
+void asi_unmap_user(struct asi *asi, void *addr, size_t len)
+{
+ static void (*const free_pgtbl_at_level[])(struct asi *, size_t) = {
+ NULL,
+ asi_free_pte,
+ asi_free_pmd,
+ asi_free_pud,
+ asi_free_p4d
+ };
+
+ struct asi_pgtbl_pool pgtbls_to_free = { 0 };
+ size_t start = (size_t)addr;
+ size_t end = start + len;
+ bool unmapped;
+
+ if (!static_cpu_has(X86_FEATURE_ASI) || !asi)
+ return;
+
+ write_lock(&asi->user_map_lock);
+ unmapped = asi_unmap_and_free_range(&pgtbls_to_free, asi, start, end);
+ write_unlock(&asi->user_map_lock);
+
+ if (unmapped)
+ asi_flush_tlb_range(asi, addr, len);
+
+ while (pgtbls_to_free.count > 0) {
+ size_t pgtbl = get_pgtbl_from_pool(&pgtbls_to_free);
+ struct page *page = virt_to_page(pgtbl);
+
+ VM_BUG_ON(page->private >= PG_LEVEL_NUM);
+ free_pgtbl_at_level[page->private](asi, pgtbl);
+ }
+}
+EXPORT_SYMBOL_GPL(asi_unmap_user);
diff --git a/include/asm-generic/asi.h b/include/asm-generic/asi.h
index 8513d0d7865a..fffb323d2a00 100644
--- a/include/asm-generic/asi.h
+++ b/include/asm-generic/asi.h
@@ -26,6 +26,7 @@
struct asi_hooks {};
struct asi {};
+struct asi_pgtbl_pool {};
static inline
int asi_register_class(const char *name, uint flags,
@@ -92,6 +93,26 @@ void asi_clear_user_pgd(struct mm_struct *mm, size_t addr) { }
static inline
void asi_clear_user_p4d(struct mm_struct *mm, size_t addr) { }
+static inline
+int asi_map_user(struct asi *asi, void *addr, size_t len,
+ struct asi_pgtbl_pool *pool,
+ size_t allowed_start, size_t allowed_end)
+{
+ return 0;
+}
+
+static inline void asi_unmap_user(struct asi *asi, void *va, size_t len) { }
+
+static inline
+int asi_fill_pgtbl_pool(struct asi_pgtbl_pool *pool, uint count, gfp_t flags)
+{
+ return 0;
+}
+
+static inline void asi_clear_pgtbl_pool(struct asi_pgtbl_pool *pool) { }
+
+static inline void asi_init_pgtbl_pool(struct asi_pgtbl_pool *pool) { }
+
static inline
void asi_flush_tlb_range(struct asi *asi, void *addr, size_t len) { }
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7d38229ca85c..c3f209720a84 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -198,7 +198,7 @@ struct page {
/* Links the pages_to_free_async list */
struct llist_node async_free_node;
- unsigned long _asi_pad_1;
+ struct page *asi_pgtbl_pool_next;
u64 asi_tlb_gen;
union {
--
2.35.1.473.g83b2b277ed-goog
next prev parent reply other threads:[~2022-02-23 5:24 UTC|newest]
Thread overview: 64+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-23 5:21 [RFC PATCH 00/47] Address Space Isolation for KVM Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 01/47] mm: asi: Introduce ASI core API Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 02/47] mm: asi: Add command-line parameter to enable/disable ASI Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 03/47] mm: asi: Switch to unrestricted address space when entering scheduler Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 04/47] mm: asi: ASI support in interrupts/exceptions Junaid Shahid
2022-03-14 15:50 ` Thomas Gleixner
2022-03-15 2:01 ` Junaid Shahid
2022-03-15 12:55 ` Thomas Gleixner
2022-03-15 22:41 ` Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 05/47] mm: asi: Make __get_current_cr3_fast() ASI-aware Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 06/47] mm: asi: ASI page table allocation and free functions Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 07/47] mm: asi: Functions to map/unmap a memory range into ASI page tables Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 08/47] mm: asi: Add basic infrastructure for global non-sensitive mappings Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 09/47] mm: Add __PAGEFLAG_FALSE Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 10/47] mm: asi: Support for global non-sensitive direct map allocations Junaid Shahid
2022-03-23 21:06 ` Matthew Wilcox
2022-03-23 23:48 ` Junaid Shahid
2022-03-24 1:54 ` Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 11/47] mm: asi: Global non-sensitive vmalloc/vmap support Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 12/47] mm: asi: Support for global non-sensitive slab caches Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 13/47] asi: Added ASI memory cgroup flag Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 14/47] mm: asi: Disable ASI API when ASI is not enabled for a process Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 15/47] kvm: asi: Restricted address space for VM execution Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 16/47] mm: asi: Support for mapping non-sensitive pcpu chunks Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 17/47] mm: asi: Aliased direct map for local non-sensitive allocations Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 18/47] mm: asi: Support for pre-ASI-init " Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 19/47] mm: asi: Support for locally nonsensitive page allocations Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 20/47] mm: asi: Support for locally non-sensitive vmalloc allocations Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 21/47] mm: asi: Add support for locally non-sensitive VM_USERMAP pages Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 22/47] mm: asi: Added refcounting when initilizing an asi Junaid Shahid
2022-02-23 5:21 ` [RFC PATCH 23/47] mm: asi: Add support for mapping all userspace memory into ASI Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 24/47] mm: asi: Support for local non-sensitive slab caches Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 25/47] mm: asi: Avoid warning from NMI userspace accesses in ASI context Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 26/47] mm: asi: Use separate PCIDs for restricted address spaces Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 27/47] mm: asi: Avoid TLB flushes during ASI CR3 switches when possible Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 28/47] mm: asi: Avoid TLB flush IPIs to CPUs not in ASI context Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 29/47] mm: asi: Reduce TLB flushes when freeing pages asynchronously Junaid Shahid
2022-02-23 5:22 ` Junaid Shahid [this message]
2022-02-23 5:22 ` [RFC PATCH 31/47] mm: asi: Support for non-sensitive SLUB caches Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 32/47] x86: asi: Allocate FPU state separately when ASI is enabled Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 33/47] kvm: asi: Map guest memory into restricted ASI address space Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 34/47] kvm: asi: Unmap guest memory from ASI address space when using nested virt Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 35/47] mm: asi: asi_exit() on PF, skip handling if address is accessible Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 36/47] mm: asi: Adding support for dynamic percpu ASI allocations Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 37/47] mm: asi: ASI annotation support for static variables Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 38/47] mm: asi: ASI annotation support for dynamic modules Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 39/47] mm: asi: Skip conventional L1TF/MDS mitigations Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 40/47] mm: asi: support for static percpu DEFINE_PER_CPU*_ASI Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 41/47] mm: asi: Annotation of static variables to be nonsensitive Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 42/47] mm: asi: Annotation of PERCPU " Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 43/47] mm: asi: Annotation of dynamic " Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 44/47] kvm: asi: Splitting kvm_vcpu_arch into non/sensitive parts Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 45/47] mm: asi: Mapping global nonsensitive areas in asi_global_init Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 46/47] kvm: asi: Do asi_exit() in vcpu_run loop before returning to userspace Junaid Shahid
2022-02-23 5:22 ` [RFC PATCH 47/47] mm: asi: Properly un/mapping task stack from ASI + tlb flush Junaid Shahid
2022-03-05 3:39 ` [RFC PATCH 00/47] Address Space Isolation for KVM Hyeonggon Yoo
2022-03-16 21:34 ` Alexandre Chartre
2022-03-17 23:25 ` Junaid Shahid
2022-03-22 9:46 ` Alexandre Chartre
2022-03-23 19:35 ` Junaid Shahid
2022-04-08 8:52 ` Alexandre Chartre
2022-04-11 3:26 ` junaid_shahid
2022-03-16 22:49 ` Thomas Gleixner
2022-03-17 21:24 ` Junaid Shahid
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220223052223.1202152-31-junaids@google.com \
--to=junaids@google.com \
--cc=alexandre.chartre@oracle.com \
--cc=dave.hansen@linux.intel.com \
--cc=jmattson@google.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=oweisse@google.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=pjt@google.com \
--cc=rppt@linux.ibm.com \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox