From: kirill.shutemov@linux.intel.com
To: Kai Huang <kai.huang@intel.com>
Cc: linux-kernel@vger.kernel.org, kvm@vger.kernel.org,
linux-mm@kvack.org, dave.hansen@intel.com, tony.luck@intel.com,
peterz@infradead.org, tglx@linutronix.de, seanjc@google.com,
pbonzini@redhat.com, david@redhat.com, dan.j.williams@intel.com,
rafael.j.wysocki@intel.com, ying.huang@intel.com,
reinette.chatre@intel.com, len.brown@intel.com,
ak@linux.intel.com, isaku.yamahata@intel.com, chao.gao@intel.com,
sathyanarayanan.kuppuswamy@linux.intel.com, bagasdotme@gmail.com,
sagis@google.com, imammedo@redhat.com
Subject: Re: [PATCH v11 12/20] x86/virt/tdx: Allocate and set up PAMTs for TDMRs
Date: Fri, 9 Jun 2023 02:24:52 +0300 [thread overview]
Message-ID: <20230608232452.yrx2tekugkvral4z@box> (raw)
In-Reply-To: <4e108968c3294189ad150f62df1f146168036342.1685887183.git.kai.huang@intel.com>
On Mon, Jun 05, 2023 at 02:27:25AM +1200, Kai Huang wrote:
> diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
> index fa9fa8bc581a..5f0499ba5d67 100644
> --- a/arch/x86/virt/vmx/tdx/tdx.c
> +++ b/arch/x86/virt/vmx/tdx/tdx.c
> @@ -265,7 +265,7 @@ static int tdx_get_sysinfo(struct tdsysinfo_struct *sysinfo,
> * overlap.
> */
> static int add_tdx_memblock(struct list_head *tmb_list, unsigned long start_pfn,
> - unsigned long end_pfn)
> + unsigned long end_pfn, int nid)
> {
> struct tdx_memblock *tmb;
>
> @@ -276,6 +276,7 @@ static int add_tdx_memblock(struct list_head *tmb_list, unsigned long start_pfn,
> INIT_LIST_HEAD(&tmb->list);
> tmb->start_pfn = start_pfn;
> tmb->end_pfn = end_pfn;
> + tmb->nid = nid;
>
> /* @tmb_list is protected by mem_hotplug_lock */
> list_add_tail(&tmb->list, tmb_list);
> @@ -303,9 +304,9 @@ static void free_tdx_memlist(struct list_head *tmb_list)
> static int build_tdx_memlist(struct list_head *tmb_list)
> {
> unsigned long start_pfn, end_pfn;
> - int i, ret;
> + int i, nid, ret;
>
> - for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
> + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
> /*
> * The first 1MB is not reported as TDX convertible memory.
> * Although the first 1MB is always reserved and won't end up
> @@ -321,7 +322,7 @@ static int build_tdx_memlist(struct list_head *tmb_list)
> * memblock has already guaranteed they are in address
> * ascending order and don't overlap.
> */
> - ret = add_tdx_memblock(tmb_list, start_pfn, end_pfn);
> + ret = add_tdx_memblock(tmb_list, start_pfn, end_pfn, nid);
> if (ret)
> goto err;
> }
These three hunks and change to struct tdx_memblock looks unrelated.
Why not fold this to 09/20?
> @@ -472,6 +473,202 @@ static int fill_out_tdmrs(struct list_head *tmb_list,
> return 0;
> }
>
> +/*
> + * Calculate PAMT size given a TDMR and a page size. The returned
> + * PAMT size is always aligned up to 4K page boundary.
> + */
> +static unsigned long tdmr_get_pamt_sz(struct tdmr_info *tdmr, int pgsz,
> + u16 pamt_entry_size)
> +{
> + unsigned long pamt_sz, nr_pamt_entries;
> +
> + switch (pgsz) {
> + case TDX_PS_4K:
> + nr_pamt_entries = tdmr->size >> PAGE_SHIFT;
> + break;
> + case TDX_PS_2M:
> + nr_pamt_entries = tdmr->size >> PMD_SHIFT;
> + break;
> + case TDX_PS_1G:
> + nr_pamt_entries = tdmr->size >> PUD_SHIFT;
> + break;
> + default:
> + WARN_ON_ONCE(1);
> + return 0;
> + }
> +
> + pamt_sz = nr_pamt_entries * pamt_entry_size;
> + /* TDX requires PAMT size must be 4K aligned */
> + pamt_sz = ALIGN(pamt_sz, PAGE_SIZE);
> +
> + return pamt_sz;
> +}
> +
> +/*
> + * Locate a NUMA node which should hold the allocation of the @tdmr
> + * PAMT. This node will have some memory covered by the TDMR. The
> + * relative amount of memory covered is not considered.
> + */
> +static int tdmr_get_nid(struct tdmr_info *tdmr, struct list_head *tmb_list)
> +{
> + struct tdx_memblock *tmb;
> +
> + /*
> + * A TDMR must cover at least part of one TMB. That TMB will end
> + * after the TDMR begins. But, that TMB may have started before
> + * the TDMR. Find the next 'tmb' that _ends_ after this TDMR
> + * begins. Ignore 'tmb' start addresses. They are irrelevant.
> + */
> + list_for_each_entry(tmb, tmb_list, list) {
> + if (tmb->end_pfn > PHYS_PFN(tdmr->base))
> + return tmb->nid;
> + }
> +
> + /*
> + * Fall back to allocating the TDMR's metadata from node 0 when
> + * no TDX memory block can be found. This should never happen
> + * since TDMRs originate from TDX memory blocks.
> + */
> + pr_warn("TDMR [0x%llx, 0x%llx): unable to find local NUMA node for PAMT allocation, fallback to use node 0.\n",
> + tdmr->base, tdmr_end(tdmr));
> + return 0;
> +}
> +
> +#define TDX_PS_NR (TDX_PS_1G + 1)
This should be next to the rest TDX_PS_*.
> +
> +/*
> + * Allocate PAMTs from the local NUMA node of some memory in @tmb_list
> + * within @tdmr, and set up PAMTs for @tdmr.
> + */
> +static int tdmr_set_up_pamt(struct tdmr_info *tdmr,
> + struct list_head *tmb_list,
> + u16 pamt_entry_size)
> +{
> + unsigned long pamt_base[TDX_PS_NR];
> + unsigned long pamt_size[TDX_PS_NR];
> + unsigned long tdmr_pamt_base;
> + unsigned long tdmr_pamt_size;
> + struct page *pamt;
> + int pgsz, nid;
> +
> + nid = tdmr_get_nid(tdmr, tmb_list);
> +
> + /*
> + * Calculate the PAMT size for each TDX supported page size
> + * and the total PAMT size.
> + */
> + tdmr_pamt_size = 0;
> + for (pgsz = TDX_PS_4K; pgsz <= TDX_PS_1G ; pgsz++) {
"< TDX_PS_NR" instead of "<= TDX_PS_1G".
> + pamt_size[pgsz] = tdmr_get_pamt_sz(tdmr, pgsz,
> + pamt_entry_size);
> + tdmr_pamt_size += pamt_size[pgsz];
> + }
> +
> + /*
> + * Allocate one chunk of physically contiguous memory for all
> + * PAMTs. This helps minimize the PAMT's use of reserved areas
> + * in overlapped TDMRs.
> + */
> + pamt = alloc_contig_pages(tdmr_pamt_size >> PAGE_SHIFT, GFP_KERNEL,
> + nid, &node_online_map);
> + if (!pamt)
> + return -ENOMEM;
> +
> + /*
> + * Break the contiguous allocation back up into the
> + * individual PAMTs for each page size.
> + */
> + tdmr_pamt_base = page_to_pfn(pamt) << PAGE_SHIFT;
> + for (pgsz = TDX_PS_4K; pgsz <= TDX_PS_1G; pgsz++) {
> + pamt_base[pgsz] = tdmr_pamt_base;
> + tdmr_pamt_base += pamt_size[pgsz];
> + }
> +
> + tdmr->pamt_4k_base = pamt_base[TDX_PS_4K];
> + tdmr->pamt_4k_size = pamt_size[TDX_PS_4K];
> + tdmr->pamt_2m_base = pamt_base[TDX_PS_2M];
> + tdmr->pamt_2m_size = pamt_size[TDX_PS_2M];
> + tdmr->pamt_1g_base = pamt_base[TDX_PS_1G];
> + tdmr->pamt_1g_size = pamt_size[TDX_PS_1G];
> +
> + return 0;
> +}
> +
> +static void tdmr_get_pamt(struct tdmr_info *tdmr, unsigned long *pamt_pfn,
> + unsigned long *pamt_npages)
> +{
> + unsigned long pamt_base, pamt_sz;
> +
> + /*
> + * The PAMT was allocated in one contiguous unit. The 4K PAMT
> + * should always point to the beginning of that allocation.
> + */
> + pamt_base = tdmr->pamt_4k_base;
> + pamt_sz = tdmr->pamt_4k_size + tdmr->pamt_2m_size + tdmr->pamt_1g_size;
> +
> + *pamt_pfn = PHYS_PFN(pamt_base);
> + *pamt_npages = pamt_sz >> PAGE_SHIFT;
> +}
> +
> +static void tdmr_free_pamt(struct tdmr_info *tdmr)
> +{
> + unsigned long pamt_pfn, pamt_npages;
> +
> + tdmr_get_pamt(tdmr, &pamt_pfn, &pamt_npages);
> +
> + /* Do nothing if PAMT hasn't been allocated for this TDMR */
> + if (!pamt_npages)
> + return;
> +
> + if (WARN_ON_ONCE(!pamt_pfn))
> + return;
> +
> + free_contig_range(pamt_pfn, pamt_npages);
> +}
> +
> +static void tdmrs_free_pamt_all(struct tdmr_info_list *tdmr_list)
> +{
> + int i;
> +
> + for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++)
> + tdmr_free_pamt(tdmr_entry(tdmr_list, i));
> +}
> +
> +/* Allocate and set up PAMTs for all TDMRs */
> +static int tdmrs_set_up_pamt_all(struct tdmr_info_list *tdmr_list,
> + struct list_head *tmb_list,
> + u16 pamt_entry_size)
> +{
> + int i, ret = 0;
> +
> + for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) {
> + ret = tdmr_set_up_pamt(tdmr_entry(tdmr_list, i), tmb_list,
> + pamt_entry_size);
> + if (ret)
> + goto err;
> + }
> +
> + return 0;
> +err:
> + tdmrs_free_pamt_all(tdmr_list);
> + return ret;
> +}
> +
> +static unsigned long tdmrs_count_pamt_pages(struct tdmr_info_list *tdmr_list)
> +{
> + unsigned long pamt_npages = 0;
> + int i;
> +
> + for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) {
> + unsigned long pfn, npages;
> +
> + tdmr_get_pamt(tdmr_entry(tdmr_list, i), &pfn, &npages);
> + pamt_npages += npages;
> + }
> +
> + return pamt_npages;
> +}
> +
> /*
> * Construct a list of TDMRs on the preallocated space in @tdmr_list
> * to cover all TDX memory regions in @tmb_list based on the TDX module
> @@ -487,10 +684,13 @@ static int construct_tdmrs(struct list_head *tmb_list,
> if (ret)
> return ret;
>
> + ret = tdmrs_set_up_pamt_all(tdmr_list, tmb_list,
> + sysinfo->pamt_entry_size);
> + if (ret)
> + return ret;
> /*
> * TODO:
> *
> - * - Allocate and set up PAMTs for each TDMR.
> * - Designate reserved areas for each TDMR.
> *
> * Return -EINVAL until constructing TDMRs is done
> @@ -547,6 +747,11 @@ static int init_tdx_module(void)
> * Return error before all steps are done.
> */
> ret = -EINVAL;
> + if (ret)
> + tdmrs_free_pamt_all(&tdx_tdmr_list);
> + else
> + pr_info("%lu KBs allocated for PAMT.\n",
> + tdmrs_count_pamt_pages(&tdx_tdmr_list) * 4);
"* 4"? This is very cryptic. procfs uses "<< (PAGE_SHIFT - 10)" which
slightly less magic to me. And just make the helper that returns kilobytes
to begin with, if it is the only caller.
> out_free_tdmrs:
> if (ret)
> free_tdmr_list(&tdx_tdmr_list);
> diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h
> index c20848e76469..e8110e1a9980 100644
> --- a/arch/x86/virt/vmx/tdx/tdx.h
> +++ b/arch/x86/virt/vmx/tdx/tdx.h
> @@ -133,6 +133,7 @@ struct tdx_memblock {
> struct list_head list;
> unsigned long start_pfn;
> unsigned long end_pfn;
> + int nid;
> };
>
> struct tdmr_info_list {
> --
> 2.40.1
>
--
Kiryl Shutsemau / Kirill A. Shutemov
next prev parent reply other threads:[~2023-06-08 23:25 UTC|newest]
Thread overview: 144+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <cover.1685887183.git.kai.huang@intel.com>
[not found] ` <ec640452a4385d61bec97f8b761ed1ff38898504.1685887183.git.kai.huang@intel.com>
2023-06-06 23:55 ` [PATCH v11 05/20] x86/virt/tdx: Add SEAMCALL infrastructure Isaku Yamahata
2023-06-07 14:24 ` Dave Hansen
2023-06-07 18:53 ` Isaku Yamahata
2023-06-07 19:27 ` Dave Hansen
2023-06-07 19:47 ` Isaku Yamahata
2023-06-07 20:08 ` Sean Christopherson
2023-06-07 20:22 ` Dave Hansen
2023-06-08 0:51 ` Huang, Kai
2023-06-08 13:50 ` Dave Hansen
2023-06-07 22:56 ` Huang, Kai
2023-06-08 14:05 ` Dave Hansen
2023-06-19 12:52 ` David Hildenbrand
2023-06-20 10:37 ` Huang, Kai
2023-06-20 12:20 ` kirill.shutemov
2023-06-20 12:39 ` David Hildenbrand
2023-06-20 15:15 ` Dave Hansen
[not found] ` <86f2a8814240f4bbe850f6a09fc9d0b934979d1b.1685887183.git.kai.huang@intel.com>
[not found] ` <20230606123821.exit7gyxs42dxotz@box.shutemov.name>
2023-06-06 22:58 ` [PATCH v11 04/20] x86/cpu: Detect TDX partial write machine check erratum Huang, Kai
2023-06-07 15:06 ` kirill.shutemov
2023-06-07 14:15 ` Dave Hansen
2023-06-07 22:43 ` Huang, Kai
2023-06-19 11:37 ` Huang, Kai
2023-06-20 15:44 ` Dave Hansen
2023-06-20 23:11 ` Huang, Kai
2023-06-19 12:21 ` David Hildenbrand
2023-06-20 10:31 ` Huang, Kai
2023-06-20 15:39 ` Dave Hansen
2023-06-20 16:03 ` David Hildenbrand
2023-06-20 16:21 ` Dave Hansen
[not found] ` <21b3a45cb73b4e1917c1eba75b7769781a15aa14.1685887183.git.kai.huang@intel.com>
2023-06-07 15:22 ` [PATCH v11 07/20] x86/virt/tdx: Add skeleton to enable TDX on demand Dave Hansen
2023-06-08 2:10 ` Huang, Kai
2023-06-08 13:43 ` Dave Hansen
2023-06-12 11:21 ` Huang, Kai
2023-06-19 13:16 ` David Hildenbrand
2023-06-19 23:28 ` Huang, Kai
[not found] ` <50386eddbb8046b0b222d385e56e8115ed566526.1685887183.git.kai.huang@intel.com>
2023-06-07 15:25 ` [PATCH v11 08/20] x86/virt/tdx: Get information about TDX module and TDX-capable memory Dave Hansen
2023-06-08 0:27 ` kirill.shutemov
2023-06-08 2:40 ` Huang, Kai
2023-06-08 11:41 ` kirill.shutemov
2023-06-08 13:13 ` Dave Hansen
2023-06-12 2:00 ` Huang, Kai
2023-06-08 23:29 ` Isaku Yamahata
2023-06-08 23:54 ` kirill.shutemov
2023-06-09 1:33 ` Isaku Yamahata
2023-06-09 10:02 ` kirill.shutemov
2023-06-12 2:00 ` Huang, Kai
2023-06-19 13:29 ` David Hildenbrand
2023-06-19 23:51 ` Huang, Kai
[not found] ` <468533166590ff5ed11730350c4af8cdb0b99165.1685887183.git.kai.huang@intel.com>
2023-06-07 15:48 ` [PATCH v11 09/20] x86/virt/tdx: Use all system memory when initializing TDX module as TDX memory Dave Hansen
2023-06-07 23:22 ` Huang, Kai
2023-06-08 22:40 ` kirill.shutemov
[not found] ` <927ec9871721d2a50f1aba7d1cf7c3be50e4f49b.1685887183.git.kai.huang@intel.com>
2023-06-07 16:05 ` [PATCH v11 11/20] x86/virt/tdx: Fill out TDMRs to cover all TDX memory regions Dave Hansen
2023-06-08 10:48 ` Huang, Kai
2023-06-08 13:11 ` Dave Hansen
2023-06-12 2:33 ` Huang, Kai
2023-06-12 14:33 ` kirill.shutemov
2023-06-12 22:10 ` Huang, Kai
2023-06-13 10:18 ` kirill.shutemov
2023-06-13 23:19 ` Huang, Kai
2023-06-08 23:02 ` kirill.shutemov
2023-06-12 2:25 ` Huang, Kai
2023-06-09 4:01 ` Sathyanarayanan Kuppuswamy
2023-06-12 2:28 ` Huang, Kai
2023-06-14 12:31 ` Nikolay Borisov
2023-06-14 22:45 ` Huang, Kai
[not found] ` <cee2f2664aac3c5314896c6d14cba50f2617c0e5.1685887183.git.kai.huang@intel.com>
2023-06-08 0:08 ` [PATCH v11 03/20] x86/virt/tdx: Make INTEL_TDX_HOST depend on X86_X2APIC kirill.shutemov
[not found] ` <9b3582c9f3a81ae68b32d9997fcd20baecb63b9b.1685887183.git.kai.huang@intel.com>
2023-06-07 8:19 ` [PATCH v11 06/20] x86/virt/tdx: Handle SEAMCALL running out of entropy error Isaku Yamahata
2023-06-07 15:08 ` Dave Hansen
2023-06-07 23:36 ` Huang, Kai
2023-06-08 0:29 ` Dave Hansen
2023-06-08 0:08 ` kirill.shutemov
2023-06-09 14:42 ` Nikolay Borisov
2023-06-12 11:04 ` Huang, Kai
2023-06-19 13:00 ` David Hildenbrand
2023-06-20 10:39 ` Huang, Kai
2023-06-20 11:14 ` David Hildenbrand
2023-06-08 21:03 ` [PATCH v11 00/20] TDX host kernel support Dan Williams
2023-06-12 10:56 ` Huang, Kai
[not found] ` <f9148e67e968d7aed4707b67ea9b1aa761401255.1685887183.git.kai.huang@intel.com>
2023-06-07 15:54 ` [PATCH v11 10/20] x86/virt/tdx: Add placeholder to construct TDMRs to cover all TDX memory regions Dave Hansen
2023-06-07 15:57 ` Dave Hansen
2023-06-08 10:18 ` Huang, Kai
2023-06-08 22:52 ` kirill.shutemov
2023-06-12 2:21 ` Huang, Kai
2023-06-12 3:01 ` Dave Hansen
[not found] ` <409448809f7c78191aa27d6d2970ba1384c2d464.1685887183.git.kai.huang@intel.com>
2023-06-08 23:53 ` [PATCH v11 13/20] x86/virt/tdx: Designate reserved areas for all TDMRs kirill.shutemov
[not found] ` <4e6cd933edd2501147366df7a17e1087560a4320.1685887183.git.kai.huang@intel.com>
2023-06-08 23:53 ` [PATCH v11 14/20] x86/virt/tdx: Configure TDX module with the TDMRs and global KeyID kirill.shutemov
[not found] ` <30358db4eff961c69783bbd4d9f3e50932a9a759.1685887183.git.kai.huang@intel.com>
2023-06-08 23:53 ` [PATCH v11 15/20] x86/virt/tdx: Configure global KeyID on all packages kirill.shutemov
2023-06-15 8:12 ` Nikolay Borisov
2023-06-15 22:24 ` Huang, Kai
2023-06-19 14:56 ` kirill.shutemov
2023-06-19 23:38 ` Huang, Kai
[not found] ` <34853e0f8f38ec2fda66b0ba480d4df63b8aab43.1685887183.git.kai.huang@intel.com>
2023-06-08 23:56 ` [PATCH v11 20/20] Documentation/x86: Add documentation for TDX host support Dave Hansen
2023-06-12 3:41 ` Huang, Kai
2023-06-16 9:02 ` Nikolay Borisov
2023-06-16 16:26 ` Dave Hansen
[not found] ` <7bd7d0c6196deb58b54d6e629603775844b1307d.1685887183.git.kai.huang@intel.com>
2023-06-09 10:03 ` [PATCH v11 16/20] x86/virt/tdx: Initialize all TDMRs kirill.shutemov
[not found] ` <17bcbe3e154415ee7a4c77489809a3db0c5ddf3f.1685887183.git.kai.huang@intel.com>
2023-06-09 10:14 ` [PATCH v11 17/20] x86/kexec: Flush cache of TDX private memory kirill.shutemov
[not found] ` <116cafb15625ac0bcda7b47143921d0c42061b69.1685887183.git.kai.huang@intel.com>
2023-06-09 13:17 ` [PATCH v11 19/20] x86/mce: Improve error log of kernel space TDX #MC due to erratum kirill.shutemov
2023-06-12 3:08 ` Huang, Kai
2023-06-12 7:59 ` kirill.shutemov
2023-06-12 13:51 ` Dave Hansen
2023-06-12 23:31 ` Huang, Kai
[not found] ` <5aa7506d4fedbf625e3fe8ceeb88af3be1ce97ea.1685887183.git.kai.huang@intel.com>
2023-06-09 13:23 ` [PATCH v11 18/20] x86: Handle TDX erratum to reset TDX private memory during kexec() and reboot kirill.shutemov
2023-06-12 3:06 ` Huang, Kai
2023-06-12 7:58 ` kirill.shutemov
2023-06-12 10:27 ` Huang, Kai
2023-06-12 11:48 ` kirill.shutemov
2023-06-12 13:18 ` David Laight
2023-06-12 13:47 ` Dave Hansen
2023-06-13 0:51 ` Huang, Kai
2023-06-13 11:05 ` kirill.shutemov
2023-06-14 0:15 ` Huang, Kai
2023-06-13 14:25 ` Dave Hansen
2023-06-13 23:18 ` Huang, Kai
2023-06-14 0:24 ` Dave Hansen
2023-06-14 0:38 ` Huang, Kai
2023-06-14 0:42 ` Huang, Kai
2023-06-19 11:43 ` Huang, Kai
2023-06-19 14:31 ` Dave Hansen
2023-06-19 14:46 ` kirill.shutemov
2023-06-19 23:35 ` Huang, Kai
2023-06-19 23:41 ` Dave Hansen
2023-06-20 0:56 ` Huang, Kai
2023-06-20 1:06 ` Dave Hansen
2023-06-20 7:58 ` Peter Zijlstra
2023-06-25 15:30 ` Huang, Kai
2023-06-25 23:26 ` Huang, Kai
2023-06-20 7:48 ` Peter Zijlstra
2023-06-20 8:11 ` Peter Zijlstra
2023-06-20 10:42 ` Huang, Kai
2023-06-20 10:56 ` Peter Zijlstra
2023-06-14 9:33 ` Huang, Kai
2023-06-14 10:02 ` kirill.shutemov
2023-06-14 10:58 ` Huang, Kai
2023-06-14 11:08 ` kirill.shutemov
2023-06-14 11:17 ` Huang, Kai
[not found] ` <4e108968c3294189ad150f62df1f146168036342.1685887183.git.kai.huang@intel.com>
2023-06-08 23:24 ` kirill.shutemov [this message]
2023-06-08 23:43 ` [PATCH v11 12/20] x86/virt/tdx: Allocate and set up PAMTs for TDMRs Dave Hansen
2023-06-12 2:52 ` Huang, Kai
2023-06-25 15:38 ` Huang, Kai
2023-06-15 7:48 ` Nikolay Borisov
[not found] ` <af4e428ab1245e9441031438e606c14472daf927.1685887183.git.kai.huang@intel.com>
[not found] ` <a2da8af2-41a9-a0cf-dbe9-7f0a14bf05fe@linux.intel.com>
2023-06-06 22:58 ` [PATCH v11 02/20] x86/virt/tdx: Detect TDX during kernel boot Huang, Kai
2023-06-06 23:44 ` Isaku Yamahata
2023-06-19 12:12 ` David Hildenbrand
2023-06-19 23:58 ` Huang, Kai
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230608232452.yrx2tekugkvral4z@box \
--to=kirill.shutemov@linux.intel.com \
--cc=ak@linux.intel.com \
--cc=bagasdotme@gmail.com \
--cc=chao.gao@intel.com \
--cc=dan.j.williams@intel.com \
--cc=dave.hansen@intel.com \
--cc=david@redhat.com \
--cc=imammedo@redhat.com \
--cc=isaku.yamahata@intel.com \
--cc=kai.huang@intel.com \
--cc=kvm@vger.kernel.org \
--cc=len.brown@intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=rafael.j.wysocki@intel.com \
--cc=reinette.chatre@intel.com \
--cc=sagis@google.com \
--cc=sathyanarayanan.kuppuswamy@linux.intel.com \
--cc=seanjc@google.com \
--cc=tglx@linutronix.de \
--cc=tony.luck@intel.com \
--cc=ying.huang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox