From: Yu Zhao <yuzhao@google.com>
To: Andrew Morton <akpm@linux-foundation.org>,
Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andi Kleen <ak@linux.intel.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Dave Hansen <dave.hansen@linux.intel.com>,
Hillf Danton <hdanton@sina.com>, Jens Axboe <axboe@kernel.dk>,
Jesse Barnes <jsbarnes@google.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Jonathan Corbet <corbet@lwn.net>,
Matthew Wilcox <willy@infradead.org>,
Mel Gorman <mgorman@suse.de>,
Michael Larabel <Michael@michaellarabel.com>,
Michal Hocko <mhocko@kernel.org>,
Rik van Riel <riel@surriel.com>,
Vlastimil Babka <vbabka@suse.cz>, Will Deacon <will@kernel.org>,
Ying Huang <ying.huang@intel.com>,
linux-arm-kernel@lists.infradead.org, linux-doc@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
page-reclaim@google.com, x86@kernel.org,
Yu Zhao <yuzhao@google.com>,
Konstantin Kharlamov <Hi-Angel@yandex.ru>
Subject: [PATCH v6 1/9] mm: x86, arm64: add arch_has_hw_pte_young()
Date: Tue, 4 Jan 2022 13:22:20 -0700 [thread overview]
Message-ID: <20220104202227.2903605-2-yuzhao@google.com> (raw)
In-Reply-To: <20220104202227.2903605-1-yuzhao@google.com>
Some architectures automatically set the accessed bit in PTEs, e.g.,
x86 and arm64 v8.2. On architectures that don't have this capability,
clearing the accessed bit in a PTE usually triggers a page fault
following the TLB miss of this PTE.
Being aware of this capability can help make better decisions, e.g.,
whether to spread the work out over a period of time to avoid bursty
page faults when trying to clear the accessed bit in a large number of
PTEs.
Signed-off-by: Yu Zhao <yuzhao@google.com>
Tested-by: Konstantin Kharlamov <Hi-Angel@yandex.ru>
---
arch/arm64/include/asm/cpufeature.h | 5 +++++
arch/arm64/include/asm/pgtable.h | 13 ++++++++-----
arch/arm64/kernel/cpufeature.c | 19 +++++++++++++++++++
arch/arm64/tools/cpucaps | 1 +
arch/x86/include/asm/pgtable.h | 6 +++---
include/linux/pgtable.h | 13 +++++++++++++
mm/memory.c | 14 +-------------
7 files changed, 50 insertions(+), 21 deletions(-)
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index ef6be92b1921..99518b4b2a9e 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -779,6 +779,11 @@ static inline bool system_supports_tlb_range(void)
cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
}
+static inline bool system_has_hw_af(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_HW_AFDBM) && cpus_have_const_cap(ARM64_HW_AF);
+}
+
extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index c4ba047a82d2..e736f47436c7 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -999,13 +999,16 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
* page after fork() + CoW for pfn mappings. We don't always have a
* hardware-managed access flag on arm64.
*/
-static inline bool arch_faults_on_old_pte(void)
+static inline bool arch_has_hw_pte_young(bool local)
{
- WARN_ON(preemptible());
+ if (local) {
+ WARN_ON(preemptible());
+ return cpu_has_hw_af();
+ }
- return !cpu_has_hw_af();
+ return system_has_hw_af();
}
-#define arch_faults_on_old_pte arch_faults_on_old_pte
+#define arch_has_hw_pte_young arch_has_hw_pte_young
/*
* Experimentally, it's cheap to set the access flag in hardware and we
@@ -1013,7 +1016,7 @@ static inline bool arch_faults_on_old_pte(void)
*/
static inline bool arch_wants_old_prefaulted_pte(void)
{
- return !arch_faults_on_old_pte();
+ return arch_has_hw_pte_young(true);
}
#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 6f3e677d88f1..5bb553ee2c0e 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2171,6 +2171,25 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_hw_dbm,
.cpu_enable = cpu_enable_hw_dbm,
},
+ {
+ /*
+ * __cpu_setup always enables this capability. But if the boot
+ * CPU has it and a late CPU doesn't, the absent
+ * ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU will prevent this late CPU
+ * from going online. There is neither known hardware does that
+ * nor obvious reasons to design hardware works that way, hence
+ * no point leaving the door open here. If the need arises, a
+ * new weak system feature flag should do the trick.
+ */
+ .desc = "Hardware update of the Access flag",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_HW_AF,
+ .sys_reg = SYS_ID_AA64MMFR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64MMFR1_HADBS_SHIFT,
+ .min_field_value = 1,
+ .matches = has_cpuid_feature,
+ },
#endif
{
.desc = "CRC32 instructions",
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 870c39537dd0..56e4ef5d95fa 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -36,6 +36,7 @@ HAS_STAGE2_FWB
HAS_SYSREG_GIC_CPUIF
HAS_TLB_RANGE
HAS_VIRT_HOST_EXTN
+HW_AF
HW_DBM
KVM_PROTECTED_MODE
MISMATCHED_CACHE_TYPE
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 448cd01eb3ec..c60b16f8b741 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1397,10 +1397,10 @@ static inline bool arch_has_pfn_modify_check(void)
return boot_cpu_has_bug(X86_BUG_L1TF);
}
-#define arch_faults_on_old_pte arch_faults_on_old_pte
-static inline bool arch_faults_on_old_pte(void)
+#define arch_has_hw_pte_young arch_has_hw_pte_young
+static inline bool arch_has_hw_pte_young(bool local)
{
- return false;
+ return true;
}
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index e24d2c992b11..53bd6a26918f 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -258,6 +258,19 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef arch_has_hw_pte_young
+/*
+ * Return whether the accessed bit is supported by the local CPU or system-wide.
+ *
+ * This stub assumes accessing thru an old PTE triggers a page fault.
+ * Architectures that automatically set the access bit should overwrite it.
+ */
+static inline bool arch_has_hw_pte_young(bool local)
+{
+ return false;
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
diff --git a/mm/memory.c b/mm/memory.c
index 8f1de811a1dc..ead6c7d4b9a1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -121,18 +121,6 @@ int randomize_va_space __read_mostly =
2;
#endif
-#ifndef arch_faults_on_old_pte
-static inline bool arch_faults_on_old_pte(void)
-{
- /*
- * Those arches which don't have hw access flag feature need to
- * implement their own helper. By default, "true" means pagefault
- * will be hit on old pte.
- */
- return true;
-}
-#endif
-
#ifndef arch_wants_old_prefaulted_pte
static inline bool arch_wants_old_prefaulted_pte(void)
{
@@ -2755,7 +2743,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
* On architectures with software "accessed" bits, we would
* take a double page fault, so mark it accessed here.
*/
- if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
+ if (!arch_has_hw_pte_young(true) && !pte_young(vmf->orig_pte)) {
pte_t entry;
vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
--
2.34.1.448.ga2b2bfdf31-goog
next prev parent reply other threads:[~2022-01-04 20:23 UTC|newest]
Thread overview: 111+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-04 20:22 [PATCH v6 0/9] Multigenerational LRU Framework Yu Zhao
2022-01-04 20:22 ` Yu Zhao [this message]
2022-01-05 10:45 ` [PATCH v6 1/9] mm: x86, arm64: add arch_has_hw_pte_young() Will Deacon
2022-01-05 20:47 ` Yu Zhao
2022-01-06 10:30 ` Will Deacon
2022-01-07 7:25 ` Yu Zhao
2022-01-11 14:19 ` Will Deacon
2022-01-11 22:27 ` Yu Zhao
2022-01-04 20:22 ` [PATCH v6 2/9] mm: x86: add CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG Yu Zhao
2022-01-04 21:24 ` Linus Torvalds
2022-01-04 20:22 ` [PATCH v6 3/9] mm/vmscan.c: refactor shrink_node() Yu Zhao
2022-01-04 20:22 ` [PATCH v6 4/9] mm: multigenerational lru: groundwork Yu Zhao
2022-01-04 21:34 ` Linus Torvalds
2022-01-11 8:16 ` Aneesh Kumar K.V
2022-01-12 2:16 ` Yu Zhao
2022-01-04 20:22 ` [PATCH v6 5/9] mm: multigenerational lru: mm_struct list Yu Zhao
2022-01-07 9:06 ` Michal Hocko
2022-01-08 0:19 ` Yu Zhao
2022-01-10 15:21 ` Michal Hocko
2022-01-12 8:08 ` Yu Zhao
2022-01-04 20:22 ` [PATCH v6 6/9] mm: multigenerational lru: aging Yu Zhao
2022-01-06 16:06 ` Michal Hocko
2022-01-06 21:27 ` Yu Zhao
2022-01-07 8:43 ` Michal Hocko
2022-01-07 21:12 ` Yu Zhao
2022-01-06 16:12 ` Michal Hocko
2022-01-06 21:41 ` Yu Zhao
2022-01-07 8:55 ` Michal Hocko
2022-01-07 9:00 ` Michal Hocko
2022-01-10 3:58 ` Yu Zhao
2022-01-10 14:37 ` Michal Hocko
2022-01-13 9:43 ` Yu Zhao
2022-01-13 12:02 ` Michal Hocko
2022-01-19 6:31 ` Yu Zhao
2022-01-19 9:44 ` Michal Hocko
2022-01-10 15:01 ` Michal Hocko
2022-01-10 16:01 ` Vlastimil Babka
2022-01-10 16:25 ` Michal Hocko
2022-01-11 23:16 ` Yu Zhao
2022-01-12 10:28 ` Michal Hocko
2022-01-13 9:25 ` Yu Zhao
2022-01-07 13:11 ` Michal Hocko
2022-01-07 23:36 ` Yu Zhao
2022-01-10 15:35 ` Michal Hocko
2022-01-11 1:18 ` Yu Zhao
2022-01-11 9:00 ` Michal Hocko
[not found] ` <1641900108.61dd684cb0e59@mail.inbox.lv>
2022-01-11 12:15 ` Michal Hocko
2022-01-11 14:22 ` Alexey Avramov
2022-01-07 14:44 ` Michal Hocko
2022-01-10 4:47 ` Yu Zhao
2022-01-10 10:54 ` Michal Hocko
2022-01-19 7:04 ` Yu Zhao
2022-01-19 9:42 ` Michal Hocko
2022-01-23 21:28 ` Yu Zhao
2022-01-24 14:01 ` Michal Hocko
2022-01-10 16:57 ` Michal Hocko
2022-01-12 1:01 ` Yu Zhao
2022-01-12 10:17 ` Michal Hocko
2022-01-12 23:43 ` Yu Zhao
2022-01-13 11:57 ` Michal Hocko
2022-01-23 21:40 ` Yu Zhao
2022-01-04 20:22 ` [PATCH v6 7/9] mm: multigenerational lru: eviction Yu Zhao
2022-01-11 10:37 ` Aneesh Kumar K.V
2022-01-12 8:05 ` Yu Zhao
2022-01-04 20:22 ` [PATCH v6 8/9] mm: multigenerational lru: user interface Yu Zhao
2022-01-10 10:27 ` Mike Rapoport
2022-01-12 8:35 ` Yu Zhao
2022-01-12 10:31 ` Michal Hocko
2022-01-12 15:45 ` Mike Rapoport
2022-01-13 9:47 ` Yu Zhao
2022-01-13 10:31 ` Aneesh Kumar K.V
2022-01-13 23:02 ` Yu Zhao
2022-01-14 5:20 ` Aneesh Kumar K.V
2022-01-14 6:50 ` Yu Zhao
2022-01-04 20:22 ` [PATCH v6 9/9] mm: multigenerational lru: Kconfig Yu Zhao
2022-01-04 21:39 ` Linus Torvalds
2022-01-04 20:22 ` [PATCH v6 0/9] Multigenerational LRU Framework Yu Zhao
2022-01-04 20:30 ` Yu Zhao
2022-01-04 21:43 ` Linus Torvalds
2022-01-05 21:12 ` Yu Zhao
2022-01-07 9:38 ` Michal Hocko
2022-01-07 18:45 ` Yu Zhao
2022-01-10 15:39 ` Michal Hocko
2022-01-10 22:04 ` Yu Zhao
2022-01-10 22:46 ` Jesse Barnes
2022-01-11 1:41 ` Linus Torvalds
2022-01-11 10:40 ` Michal Hocko
2022-01-11 8:41 ` Yu Zhao
2022-01-11 8:53 ` Holger Hoffstätte
2022-01-11 9:26 ` Jan Alexander Steffens (heftig)
2022-01-11 16:04 ` Shuang Zhai
2022-01-12 1:46 ` Suleiman Souhlal
2022-01-12 6:07 ` Sofia Trinh
2022-01-12 16:17 ` Daniel Byrne
2022-01-18 9:21 ` Yu Zhao
2022-01-18 9:36 ` Donald Carr
2022-01-19 20:19 ` Steven Barrett
2022-01-19 22:25 ` Brian Geffon
2022-01-05 2:44 ` Shuang Zhai
2022-01-05 8:55 ` SeongJae Park
2022-01-05 10:53 ` Yu Zhao
2022-01-05 11:25 ` SeongJae Park
2022-01-05 21:06 ` Yu Zhao
2022-01-10 14:49 ` Alexey Avramov
2022-01-11 10:24 ` Alexey Avramov
2022-01-12 20:56 ` Oleksandr Natalenko
2022-01-13 8:59 ` Yu Zhao
2022-01-23 5:43 ` Barry Song
2022-01-25 6:48 ` Yu Zhao
2022-01-28 8:54 ` Barry Song
2022-02-08 9:16 ` Yu Zhao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220104202227.2903605-2-yuzhao@google.com \
--to=yuzhao@google.com \
--cc=Hi-Angel@yandex.ru \
--cc=Michael@michaellarabel.com \
--cc=ak@linux.intel.com \
--cc=akpm@linux-foundation.org \
--cc=axboe@kernel.dk \
--cc=catalin.marinas@arm.com \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=hannes@cmpxchg.org \
--cc=hdanton@sina.com \
--cc=jsbarnes@google.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@suse.de \
--cc=mhocko@kernel.org \
--cc=page-reclaim@google.com \
--cc=riel@surriel.com \
--cc=torvalds@linux-foundation.org \
--cc=vbabka@suse.cz \
--cc=will@kernel.org \
--cc=willy@infradead.org \
--cc=x86@kernel.org \
--cc=ying.huang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox