On 16.05.25 13:09, Kirill A. Shutemov wrote: > On Fri, May 16, 2025 at 12:42:21PM +0200, Jürgen Groß wrote: >> On 16.05.25 11:15, Kirill A. Shutemov wrote: >>> Both Intel and AMD CPUs support 5-level paging, which is expected to >>> become more widely adopted in the future. >>> >>> Remove CONFIG_X86_5LEVEL and ifdeffery for it to make it more readable. >>> >>> Signed-off-by: Kirill A. Shutemov >>> Suggested-by: Borislav Petkov >>> --- >>> Documentation/arch/x86/cpuinfo.rst | 8 +++---- >>> .../arch/x86/x86_64/5level-paging.rst | 9 -------- >>> arch/x86/Kconfig | 22 +------------------ >>> arch/x86/Kconfig.cpufeatures | 4 ---- >>> arch/x86/boot/compressed/pgtable_64.c | 11 ++-------- >>> arch/x86/boot/header.S | 4 ---- >>> arch/x86/boot/startup/map_kernel.c | 5 +---- >>> arch/x86/include/asm/page_64.h | 2 -- >>> arch/x86/include/asm/page_64_types.h | 7 ------ >>> arch/x86/include/asm/pgtable_64_types.h | 18 --------------- >>> arch/x86/kernel/alternative.c | 2 +- >>> arch/x86/kernel/head64.c | 2 -- >>> arch/x86/kernel/head_64.S | 2 -- >>> arch/x86/mm/init.c | 4 ---- >>> arch/x86/mm/pgtable.c | 2 +- >>> drivers/firmware/efi/libstub/x86-5lvl.c | 2 +- >>> 16 files changed, 10 insertions(+), 94 deletions(-) >> >> There are some instances of: >> >> #if CONFIG_PGTABLE_LEVELS >= 5 >> >> in 64-bit-only code under arch/x86, which could be simplified, too. >> >> They are still correct, but I wanted to hint at further code removals >> being possible. > > Okay, fair enough. Fixup is below. > > Did I miss anything else? Yes. One more instance in arch/x86/xen/mmu_pv.c, one in arch/x86/include/asm/paravirt.h, one in arch/x86/include/asm/paravirt_types.h, one in arch/x86/kernel/paravirt.c Juergen > > diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c > index 2fb7d53cf333..c9103a6fa06e 100644 > --- a/arch/x86/entry/vsyscall/vsyscall_64.c > +++ b/arch/x86/entry/vsyscall/vsyscall_64.c > @@ -341,9 +341,7 @@ void __init set_vsyscall_pgtable_user_bits(pgd_t *root) > pgd = pgd_offset_pgd(root, VSYSCALL_ADDR); > set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); > p4d = p4d_offset(pgd, VSYSCALL_ADDR); > -#if CONFIG_PGTABLE_LEVELS >= 5 > set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER)); > -#endif > pud = pud_offset(p4d, VSYSCALL_ADDR); > set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER)); > pmd = pmd_offset(pud, VSYSCALL_ADDR); > diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h > index b89f8f1194a9..f06e5d6a2747 100644 > --- a/arch/x86/include/asm/pgtable_64.h > +++ b/arch/x86/include/asm/pgtable_64.h > @@ -41,11 +41,9 @@ static inline void sync_initial_page_table(void) { } > pr_err("%s:%d: bad pud %p(%016lx)\n", \ > __FILE__, __LINE__, &(e), pud_val(e)) > > -#if CONFIG_PGTABLE_LEVELS >= 5 > #define p4d_ERROR(e) \ > pr_err("%s:%d: bad p4d %p(%016lx)\n", \ > __FILE__, __LINE__, &(e), p4d_val(e)) > -#endif > > #define pgd_ERROR(e) \ > pr_err("%s:%d: bad pgd %p(%016lx)\n", \ > diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c > index 38971c6dcd4b..61c52bb80e33 100644 > --- a/arch/x86/xen/mmu_pv.c > +++ b/arch/x86/xen/mmu_pv.c > @@ -578,7 +578,6 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val) > xen_mc_issue(XEN_LAZY_MMU); > } > > -#if CONFIG_PGTABLE_LEVELS >= 5 > __visible p4dval_t xen_p4d_val(p4d_t p4d) > { > return pte_mfn_to_pfn(p4d.p4d); > @@ -592,7 +591,6 @@ __visible p4d_t xen_make_p4d(p4dval_t p4d) > return native_make_p4d(p4d); > } > PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d); > -#endif /* CONFIG_PGTABLE_LEVELS >= 5 */ > > static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd, > void (*func)(struct mm_struct *mm, struct page *,