* [PATCH 1/2] xtensa: mm: fix PMD folding implementation
2019-11-05 14:33 [PATCH 0/2] xtensa: get rid of __ARCH_USE_5LEVEL_HACK Mike Rapoport
@ 2019-11-05 14:33 ` Mike Rapoport
2019-11-05 14:33 ` [PATCH 2/2] xtensa: get rid of __ARCH_USE_5LEVEL_HACK Mike Rapoport
1 sibling, 0 replies; 5+ messages in thread
From: Mike Rapoport @ 2019-11-05 14:33 UTC (permalink / raw)
To: Chris Zankel, Max Filippov
Cc: linux-xtensa, linux-mm, linux-kernel, Mike Rapoport, Mike Rapoport
From: Mike Rapoport <rppt@linux.ibm.com>
There was a definition of pmd_offset() in arch/xtensa/include/asm/pgtable.h
that shadowed the generic implementation defined in
include/asm-generic/pgtable-nopmd.h.
As the result, xtensa had shortcuts in page table traversal in several
places instead of doing level unfolding.
Remove local override for pmd_offset() and add page table unfolding where
necessary.
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
---
arch/xtensa/include/asm/pgtable.h | 3 ---
arch/xtensa/mm/fault.c | 10 ++++++++--
arch/xtensa/mm/kasan_init.c | 6 ++++--
arch/xtensa/mm/mmu.c | 3 ++-
arch/xtensa/mm/tlb.c | 6 +++++-
5 files changed, 19 insertions(+), 9 deletions(-)
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 3f7fe5a..af72f02 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -371,9 +371,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(dir,address) ((pmd_t*)(dir))
-
/* Find an entry in the third-level page table.. */
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir,addr) \
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index f81b147..68a0414 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -197,6 +197,7 @@ void do_page_fault(struct pt_regs *regs)
struct mm_struct *act_mm = current->active_mm;
int index = pgd_index(address);
pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
@@ -211,8 +212,13 @@ void do_page_fault(struct pt_regs *regs)
pgd_val(*pgd) = pgd_val(*pgd_k);
- pmd = pmd_offset(pgd, address);
- pmd_k = pmd_offset(pgd_k, address);
+ pud = pud_offset(pgd, address);
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud) || !pud_present(*pud_k))
+ goto bad_page_fault;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_page_fault;
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
index af71525..ace98bd 100644
--- a/arch/xtensa/mm/kasan_init.c
+++ b/arch/xtensa/mm/kasan_init.c
@@ -20,7 +20,8 @@ void __init kasan_early_init(void)
{
unsigned long vaddr = KASAN_SHADOW_START;
pgd_t *pgd = pgd_offset_k(vaddr);
- pmd_t *pmd = pmd_offset(pgd, vaddr);
+ pud_t *pud = pud_offset(pgd, vaddr);
+ pmd_t *pmd = pmd_offset(pud, vaddr);
int i;
for (i = 0; i < PTRS_PER_PTE; ++i)
@@ -42,7 +43,8 @@ static void __init populate(void *start, void *end)
unsigned long i, j;
unsigned long vaddr = (unsigned long)start;
pgd_t *pgd = pgd_offset_k(vaddr);
- pmd_t *pmd = pmd_offset(pgd, vaddr);
+ pud_t *pud = pud_offset(pgd, vaddr);
+ pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
if (!pte)
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 03678c4..018dda2 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -22,7 +22,8 @@
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
pgd_t *pgd = pgd_offset_k(vaddr);
- pmd_t *pmd = pmd_offset(pgd, vaddr);
+ pud_t *pud = pud_offset(pgd, vaddr);
+ pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte;
unsigned long i;
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 59153d0..164a2ca 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -169,6 +169,7 @@ static unsigned get_pte_for_vaddr(unsigned vaddr)
struct task_struct *task = get_current();
struct mm_struct *mm = task->mm;
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -177,7 +178,10 @@ static unsigned get_pte_for_vaddr(unsigned vaddr)
pgd = pgd_offset(mm, vaddr);
if (pgd_none_or_clear_bad(pgd))
return 0;
- pmd = pmd_offset(pgd, vaddr);
+ pud = pud_offset(pgd, vaddr);
+ if (pud_none_or_clear_bad(pud))
+ return 0;
+ pmd = pmd_offset(pud, vaddr);
if (pmd_none_or_clear_bad(pmd))
return 0;
pte = pte_offset_map(pmd, vaddr);
--
2.7.4
^ permalink raw reply [flat|nested] 5+ messages in thread* [PATCH 2/2] xtensa: get rid of __ARCH_USE_5LEVEL_HACK
2019-11-05 14:33 [PATCH 0/2] xtensa: get rid of __ARCH_USE_5LEVEL_HACK Mike Rapoport
2019-11-05 14:33 ` [PATCH 1/2] xtensa: mm: fix PMD folding implementation Mike Rapoport
@ 2019-11-05 14:33 ` Mike Rapoport
2019-11-13 20:30 ` Max Filippov
1 sibling, 1 reply; 5+ messages in thread
From: Mike Rapoport @ 2019-11-05 14:33 UTC (permalink / raw)
To: Chris Zankel, Max Filippov
Cc: linux-xtensa, linux-mm, linux-kernel, Mike Rapoport, Mike Rapoport
From: Mike Rapoport <rppt@linux.ibm.com>
xtensa has 2-level page tables and already uses pgtable-nopmd for page
table folding.
Add walks of p4d level where appropriate and drop usage of
__ARCH_USE_5LEVEL_HACK.
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
---
arch/xtensa/include/asm/pgtable.h | 1 -
arch/xtensa/mm/fault.c | 10 ++++++++--
arch/xtensa/mm/kasan_init.c | 6 ++++--
arch/xtensa/mm/mmu.c | 3 ++-
arch/xtensa/mm/tlb.c | 5 ++++-
5 files changed, 18 insertions(+), 7 deletions(-)
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index af72f02..27ac17c 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -8,7 +8,6 @@
#ifndef _XTENSA_PGTABLE_H
#define _XTENSA_PGTABLE_H
-#define __ARCH_USE_5LEVEL_HACK
#include <asm/page.h>
#include <asm/kmem_layout.h>
#include <asm-generic/pgtable-nopmd.h>
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 68a0414..bee30a7 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -197,6 +197,7 @@ void do_page_fault(struct pt_regs *regs)
struct mm_struct *act_mm = current->active_mm;
int index = pgd_index(address);
pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
@@ -212,8 +213,13 @@ void do_page_fault(struct pt_regs *regs)
pgd_val(*pgd) = pgd_val(*pgd_k);
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
+ goto bad_page_fault;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud) || !pud_present(*pud_k))
goto bad_page_fault;
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
index ace98bd..9c95779 100644
--- a/arch/xtensa/mm/kasan_init.c
+++ b/arch/xtensa/mm/kasan_init.c
@@ -20,7 +20,8 @@ void __init kasan_early_init(void)
{
unsigned long vaddr = KASAN_SHADOW_START;
pgd_t *pgd = pgd_offset_k(vaddr);
- pud_t *pud = pud_offset(pgd, vaddr);
+ p4d_t *p4d = p4d_offset(pgd, vaddr);
+ pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
int i;
@@ -43,7 +44,8 @@ static void __init populate(void *start, void *end)
unsigned long i, j;
unsigned long vaddr = (unsigned long)start;
pgd_t *pgd = pgd_offset_k(vaddr);
- pud_t *pud = pud_offset(pgd, vaddr);
+ p4d_t *p4d = p4d_offset(pgd, vaddr);
+ pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 018dda2..37e478a 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -22,7 +22,8 @@
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
pgd_t *pgd = pgd_offset_k(vaddr);
- pud_t *pud = pud_offset(pgd, vaddr);
+ p4d_t *p4d = p4d_offset(pgd, vaddr);
+ pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte;
unsigned long i;
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 164a2ca..a460474 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -178,7 +178,10 @@ static unsigned get_pte_for_vaddr(unsigned vaddr)
pgd = pgd_offset(mm, vaddr);
if (pgd_none_or_clear_bad(pgd))
return 0;
- pud = pud_offset(pgd, vaddr);
+ p4d = p4d_offset(pgd, vaddr);
+ if (p4d_none_or_clear_bad(p4d))
+ return 0;
+ pud = pud_offset(p4d, vaddr);
if (pud_none_or_clear_bad(pud))
return 0;
pmd = pmd_offset(pud, vaddr);
--
2.7.4
^ permalink raw reply [flat|nested] 5+ messages in thread