From: James Houghton <jthoughton@google.com>
To: Sean Christopherson <seanjc@google.com>,
Paolo Bonzini <pbonzini@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
David Matlack <dmatlack@google.com>,
David Rientjes <rientjes@google.com>,
James Houghton <jthoughton@google.com>,
Jason Gunthorpe <jgg@ziepe.ca>, Jonathan Corbet <corbet@lwn.net>,
Marc Zyngier <maz@kernel.org>,
Oliver Upton <oliver.upton@linux.dev>,
Wei Xu <weixugc@google.com>, Yu Zhao <yuzhao@google.com>,
Axel Rasmussen <axelrasmussen@google.com>,
kvm@vger.kernel.org, linux-doc@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH v7 09/18] KVM: x86/mmu: Add support for lockless walks of rmap SPTEs
Date: Thu, 26 Sep 2024 01:34:57 +0000 [thread overview]
Message-ID: <20240926013506.860253-10-jthoughton@google.com> (raw)
In-Reply-To: <20240926013506.860253-1-jthoughton@google.com>
From: Sean Christopherson <seanjc@google.com>
Add a lockless version of for_each_rmap_spte(), which is pretty much the
same as the normal version, except that it doesn't BUG() the host if a
non-present SPTE is encountered. When mmu_lock is held, it should be
impossible for a different task to zap a SPTE, _and_ zapped SPTEs must
be removed from their rmap chain prior to dropping mmu_lock. Thus, the
normal walker BUG()s if a non-present SPTE is encountered as something is
wildly broken.
When walking rmaps without holding mmu_lock, the SPTEs pointed at by the
rmap chain can be zapped/dropped, and so a lockless walk can observe a
non-present SPTE if it runs concurrently with a different operation that
is zapping SPTEs.
Signed-off-by: Sean Christopherson <seanjc@google.com>
[jthoughton: Added lockdep assertion for kvm_rmap_lock, synchronization fixup]
Signed-off-by: James Houghton <jthoughton@google.com>
---
arch/x86/kvm/mmu/mmu.c | 75 +++++++++++++++++++++++-------------------
1 file changed, 42 insertions(+), 33 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 79676798ba77..72c682fa207a 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -932,7 +932,7 @@ static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu
*/
#define KVM_RMAP_LOCKED BIT(1)
-static unsigned long kvm_rmap_lock(struct kvm_rmap_head *rmap_head)
+static unsigned long __kvm_rmap_lock(struct kvm_rmap_head *rmap_head)
{
unsigned long old_val, new_val;
@@ -976,14 +976,25 @@ static unsigned long kvm_rmap_lock(struct kvm_rmap_head *rmap_head)
*/
} while (!atomic_long_try_cmpxchg_acquire(&rmap_head->val, &old_val, new_val));
- /* Return the old value, i.e. _without_ the LOCKED bit set. */
+ /*
+ * Return the old value, i.e. _without_ the LOCKED bit set. It's
+ * impossible for the return value to be 0 (see above), i.e. the read-
+ * only unlock flow can't get a false positive and fail to unlock.
+ */
return old_val;
}
+static unsigned long kvm_rmap_lock(struct kvm *kvm,
+ struct kvm_rmap_head *rmap_head)
+{
+ lockdep_assert_held_write(&kvm->mmu_lock);
+ return __kvm_rmap_lock(rmap_head);
+}
+
static void kvm_rmap_unlock(struct kvm_rmap_head *rmap_head,
unsigned long new_val)
{
- WARN_ON_ONCE(new_val & KVM_RMAP_LOCKED);
+ KVM_MMU_WARN_ON(new_val & KVM_RMAP_LOCKED);
/*
* Ensure that all accesses to the rmap have completed
* before we actually unlock the rmap.
@@ -1023,14 +1034,14 @@ static void kvm_rmap_unlock_readonly(struct kvm_rmap_head *rmap_head,
/*
* Returns the number of pointers in the rmap chain, not counting the new one.
*/
-static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
- struct kvm_rmap_head *rmap_head)
+static int pte_list_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ u64 *spte, struct kvm_rmap_head *rmap_head)
{
unsigned long old_val, new_val;
struct pte_list_desc *desc;
int count = 0;
- old_val = kvm_rmap_lock(rmap_head);
+ old_val = kvm_rmap_lock(kvm, rmap_head);
if (!old_val) {
new_val = (unsigned long)spte;
@@ -1110,7 +1121,7 @@ static void pte_list_remove(struct kvm *kvm, u64 *spte,
unsigned long rmap_val;
int i;
- rmap_val = kvm_rmap_lock(rmap_head);
+ rmap_val = kvm_rmap_lock(kvm, rmap_head);
if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_val, kvm))
goto out;
@@ -1154,7 +1165,7 @@ static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
unsigned long rmap_val;
int i;
- rmap_val = kvm_rmap_lock(rmap_head);
+ rmap_val = kvm_rmap_lock(kvm, rmap_head);
if (!rmap_val)
return false;
@@ -1246,23 +1257,18 @@ static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
struct rmap_iterator *iter)
{
unsigned long rmap_val = kvm_rmap_get(rmap_head);
- u64 *sptep;
if (!rmap_val)
return NULL;
if (!(rmap_val & KVM_RMAP_MANY)) {
iter->desc = NULL;
- sptep = (u64 *)rmap_val;
- goto out;
+ return (u64 *)rmap_val;
}
iter->desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
iter->pos = 0;
- sptep = iter->desc->sptes[iter->pos];
-out:
- BUG_ON(!is_shadow_present_pte(*sptep));
- return sptep;
+ return iter->desc->sptes[iter->pos];
}
/*
@@ -1272,14 +1278,11 @@ static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
*/
static u64 *rmap_get_next(struct rmap_iterator *iter)
{
- u64 *sptep;
-
if (iter->desc) {
if (iter->pos < PTE_LIST_EXT - 1) {
++iter->pos;
- sptep = iter->desc->sptes[iter->pos];
- if (sptep)
- goto out;
+ if (iter->desc->sptes[iter->pos])
+ return iter->desc->sptes[iter->pos];
}
iter->desc = iter->desc->more;
@@ -1287,20 +1290,24 @@ static u64 *rmap_get_next(struct rmap_iterator *iter)
if (iter->desc) {
iter->pos = 0;
/* desc->sptes[0] cannot be NULL */
- sptep = iter->desc->sptes[iter->pos];
- goto out;
+ return iter->desc->sptes[iter->pos];
}
}
return NULL;
-out:
- BUG_ON(!is_shadow_present_pte(*sptep));
- return sptep;
}
-#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \
- for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \
- _spte_; _spte_ = rmap_get_next(_iter_))
+#define __for_each_rmap_spte(_rmap_head_, _iter_, _sptep_) \
+ for (_sptep_ = rmap_get_first(_rmap_head_, _iter_); \
+ _sptep_; _sptep_ = rmap_get_next(_iter_))
+
+#define for_each_rmap_spte(_rmap_head_, _iter_, _sptep_) \
+ __for_each_rmap_spte(_rmap_head_, _iter_, _sptep_) \
+ if (!WARN_ON_ONCE(!is_shadow_present_pte(*(_sptep_)))) \
+
+#define for_each_rmap_spte_lockless(_rmap_head_, _iter_, _sptep_, _spte_) \
+ __for_each_rmap_spte(_rmap_head_, _iter_, _sptep_) \
+ if (is_shadow_present_pte(_spte_ = mmu_spte_get_lockless(sptep)))
static void drop_spte(struct kvm *kvm, u64 *sptep)
{
@@ -1396,11 +1403,12 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct rmap_iterator iter;
bool flush = false;
- for_each_rmap_spte(rmap_head, &iter, sptep)
+ for_each_rmap_spte(rmap_head, &iter, sptep) {
if (spte_ad_need_write_protect(*sptep))
flush |= spte_wrprot_for_clear_dirty(sptep);
else
flush |= spte_clear_dirty(sptep);
+ }
return flush;
}
@@ -1710,7 +1718,7 @@ static void __rmap_add(struct kvm *kvm,
kvm_update_page_stats(kvm, sp->role.level, 1);
rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
- rmap_count = pte_list_add(cache, spte, rmap_head);
+ rmap_count = pte_list_add(kvm, cache, spte, rmap_head);
if (rmap_count > kvm->stat.max_mmu_rmap_size)
kvm->stat.max_mmu_rmap_size = rmap_count;
@@ -1859,13 +1867,14 @@ static unsigned kvm_page_table_hashfn(gfn_t gfn)
return hash_64(gfn, KVM_MMU_HASH_SHIFT);
}
-static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache,
+static void mmu_page_add_parent_pte(struct kvm *kvm,
+ struct kvm_mmu_memory_cache *cache,
struct kvm_mmu_page *sp, u64 *parent_pte)
{
if (!parent_pte)
return;
- pte_list_add(cache, parent_pte, &sp->parent_ptes);
+ pte_list_add(kvm, cache, parent_pte, &sp->parent_ptes);
}
static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
@@ -2555,7 +2564,7 @@ static void __link_shadow_page(struct kvm *kvm,
mmu_spte_set(sptep, spte);
- mmu_page_add_parent_pte(cache, sp, sptep);
+ mmu_page_add_parent_pte(kvm, cache, sp, sptep);
/*
* The non-direct sub-pagetable must be updated before linking. For
--
2.46.0.792.g87dc391469-goog
next prev parent reply other threads:[~2024-09-26 1:35 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-26 1:34 [PATCH v7 00/18] mm: multi-gen LRU: Walk secondary MMU page tables while aging James Houghton
2024-09-26 1:34 ` [PATCH v7 01/18] KVM: Remove kvm_handle_hva_range helper functions James Houghton
2024-09-26 1:34 ` [PATCH v7 02/18] KVM: Add lockless memslot walk to KVM James Houghton
2024-09-26 1:34 ` [PATCH v7 03/18] KVM: x86/mmu: Factor out spte atomic bit clearing routine James Houghton
2024-09-26 1:34 ` [PATCH v7 04/18] KVM: x86/mmu: Relax locking for kvm_test_age_gfn and kvm_age_gfn James Houghton
2024-09-26 1:55 ` James Houghton
2024-10-03 20:05 ` James Houghton
2024-09-26 1:34 ` [PATCH v7 05/18] KVM: x86/mmu: Rearrange kvm_{test_,}age_gfn James Houghton
2024-09-26 1:34 ` [PATCH v7 06/18] KVM: x86/mmu: Only check gfn age in shadow MMU if indirect_shadow_pages > 0 James Houghton
2024-09-26 1:34 ` [PATCH v7 07/18] KVM: x86/mmu: Refactor low level rmap helpers to prep for walking w/o mmu_lock James Houghton
2024-09-26 1:34 ` [PATCH v7 08/18] KVM: x86/mmu: Add infrastructure to allow walking rmaps outside of mmu_lock James Houghton
2024-09-26 1:34 ` James Houghton [this message]
2024-09-26 1:34 ` [PATCH v7 10/18] KVM: x86/mmu: Support rmap walks without holding mmu_lock when aging gfns James Houghton
2024-09-26 1:34 ` [PATCH v7 11/18] mm: Add missing mmu_notifier_clear_young for !MMU_NOTIFIER James Houghton
2024-09-26 1:35 ` [PATCH v7 12/18] mm: Add has_fast_aging to struct mmu_notifier James Houghton
2024-09-26 1:35 ` [PATCH v7 13/18] mm: Add fast_only bool to test_young and clear_young MMU notifiers James Houghton
2024-09-26 1:35 ` [PATCH v7 14/18] KVM: Pass fast_only to kvm_{test_,}age_gfn James Houghton
2024-09-26 1:35 ` [PATCH v7 15/18] KVM: x86/mmu: Locklessly harvest access information from shadow MMU James Houghton
2024-09-26 1:35 ` [PATCH v7 16/18] KVM: x86/mmu: Enable has_fast_aging James Houghton
2024-09-26 1:35 ` [PATCH v7 17/18] mm: multi-gen LRU: Have secondary MMUs participate in aging James Houghton
2024-09-26 1:35 ` [PATCH v7 18/18] KVM: selftests: Add multi-gen LRU aging to access_tracking_perf_test James Houghton
2024-10-14 23:22 ` [PATCH v7 00/18] mm: multi-gen LRU: Walk secondary MMU page tables while aging Sean Christopherson
2024-10-15 0:07 ` James Houghton
2024-10-15 22:47 ` Yu Zhao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240926013506.860253-10-jthoughton@google.com \
--to=jthoughton@google.com \
--cc=akpm@linux-foundation.org \
--cc=axelrasmussen@google.com \
--cc=corbet@lwn.net \
--cc=dmatlack@google.com \
--cc=jgg@ziepe.ca \
--cc=kvm@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=maz@kernel.org \
--cc=oliver.upton@linux.dev \
--cc=pbonzini@redhat.com \
--cc=rientjes@google.com \
--cc=seanjc@google.com \
--cc=weixugc@google.com \
--cc=yuzhao@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox