From: bibo mao <maobibo@loongson.cn>
To: Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
Anshuman Khandual <anshuman.khandual@arm.com>,
Andrew Morton <akpm@linux-foundation.org>,
Mike Kravetz <mike.kravetz@oracle.com>
Cc: linux-mips@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-mm@kvack.org, Bibo Mao <maobibo@loongson.cn>
Subject: [PATCH 2/2] hugetlb: use lightweight tlb flush when update huge tlb on mips
Date: Mon, 29 Jun 2020 21:15:33 +0800 [thread overview]
Message-ID: <1593436533-8645-2-git-send-email-maobibo@loongson.cn> (raw)
In-Reply-To: <1593436533-8645-1-git-send-email-maobibo@loongson.cn>
From: Bibo Mao <maobibo@loongson.cn>
On mips platform huge pte pointers to invalid_pte_table if
huge_pte_none return true. TLB entry with normal page size is
added if huge pte entry is none. When updating huge pte entry,
older tlb entry with normal page needs to be invalid.
This patch uses lightweight tlb flush function local_flush_tlb_page,
rather than flush_tlb_range which will flush all tlb entries instead.
Also this patch adds new huge tlb update function named
update_mmu_cache_huge, page faulting address is passed rather than
huge page start address.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
arch/mips/include/asm/hugetlb.h | 17 ++++++++++++-----
include/linux/hugetlb.h | 9 +++++++++
mm/hugetlb.c | 12 +++++++-----
3 files changed, 28 insertions(+), 10 deletions(-)
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
index c214440..fce09b4 100644
--- a/arch/mips/include/asm/hugetlb.h
+++ b/arch/mips/include/asm/hugetlb.h
@@ -72,15 +72,22 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
if (changed) {
set_pte_at(vma->vm_mm, addr, ptep, pte);
- /*
- * There could be some standard sized pages in there,
- * get them all.
- */
- flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
}
return changed;
}
+#define update_mmu_cache_huge update_mmu_cache_huge
+static inline void update_mmu_cache_huge(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ /*
+ * There could be some standard sized page in there,
+ * parameter address must be page faulting address rather than
+ * start address of huge page
+ */
+ local_flush_tlb_page(vma, address);
+ update_mmu_cache(vma, address & huge_page_mask(hstate_vma(vma)), ptep);
+}
#include <asm-generic/hugetlb.h>
#endif /* __ASM_HUGETLB_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 858522e..2f3f9eb 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -746,6 +746,15 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
}
#endif
+#ifndef update_mmu_cache_huge
+#define update_mmu_cache_huge update_mmu_cache_huge
+static inline void update_mmu_cache_huge(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ update_mmu_cache(vma, address & huge_page_mask(hstate_vma(vma)), ptep);
+}
+#endif
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1410e62..96faad7 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3757,10 +3757,12 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t entry;
+ struct hstate *h = hstate_vma(vma);
+ unsigned long haddr = address & huge_page_mask(h);
entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
- if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
- update_mmu_cache(vma, address, ptep);
+ if (huge_ptep_set_access_flags(vma, haddr, ptep, entry, 1))
+ update_mmu_cache_huge(vma, address, ptep);
}
bool is_hugetlb_entry_migration(pte_t pte)
@@ -4128,7 +4130,7 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
* and just make the page writable */
if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
page_move_anon_rmap(old_page, vma);
- set_huge_ptep_writable(vma, haddr, ptep);
+ set_huge_ptep_writable(vma, address, ptep);
return 0;
}
@@ -4630,7 +4632,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
entry = pte_mkyoung(entry);
if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
flags & FAULT_FLAG_WRITE))
- update_mmu_cache(vma, haddr, ptep);
+ update_mmu_cache_huge(vma, address, ptep);
out_put_page:
if (page != pagecache_page)
unlock_page(page);
@@ -4770,7 +4772,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
hugetlb_count_add(pages_per_huge_page(h), dst_mm);
/* No need to invalidate - it was non-present before */
- update_mmu_cache(dst_vma, dst_addr, dst_pte);
+ update_mmu_cache_huge(dst_vma, dst_addr, dst_pte);
spin_unlock(ptl);
set_page_huge_active(page);
--
1.8.3.1
next prev parent reply other threads:[~2020-06-29 13:15 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-29 13:15 [PATCH 1/2] hugetlb: clear huge pte during flush function on mips platform bibo mao
2020-06-29 13:15 ` bibo mao [this message]
2021-05-11 21:03 ` Thomas Bogendoerfer
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1593436533-8645-2-git-send-email-maobibo@loongson.cn \
--to=maobibo@loongson.cn \
--cc=akpm@linux-foundation.org \
--cc=anshuman.khandual@arm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mips@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mike.kravetz@oracle.com \
--cc=tsbogend@alpha.franken.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox