From: JP Kobryn <inwardvessel@gmail.com>
To: shakeel.butt@linux.dev, tj@kernel.org, mhocko@kernel.org,
hannes@cmpxchg.org, yosryahmed@google.com,
akpm@linux-foundation.org
Cc: linux-mm@kvack.org, cgroups@vger.kernel.org, kernel-team@meta.com
Subject: [PATCH 07/11] cgroup: fetch cpu-specific lock in rstat cpu lock helpers
Date: Mon, 17 Feb 2025 19:14:44 -0800 [thread overview]
Message-ID: <20250218031448.46951-8-inwardvessel@gmail.com> (raw)
In-Reply-To: <20250218031448.46951-1-inwardvessel@gmail.com>
The lock/unlock helper functions for per-cpu locks accept a cpu
argument. This makes them appear as if the cpu will be used as the
offset off of the base per-cpu pointer. But in fact, the cpu is only
used as a tracepoint argument. Change the functions so that the cpu is
also used primarily for looking up the lock specific to this cpu. This
means the call sites can be adjusted to not have to perform the offset
prior to calling this function. Note that this follows suit with other
functions in the rstat source - functions that accept a cpu argument
perform the per-cpu pointer lookup within as opposed to having clients
lookup in advance.
Signed-off-by: JP Kobryn <inwardvessel@gmail.com>
---
kernel/cgroup/rstat.c | 37 +++++++++++++++++++++----------------
1 file changed, 21 insertions(+), 16 deletions(-)
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 26c75629bca2..4cb0f3ffc1db 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -115,7 +115,12 @@ static struct cgroup_rstat_ops rstat_bpf_ops = {
#endif /* CONFIG_CGROUP_BPF */
/*
- * Helper functions for rstat per CPU lock (cgroup_rstat_cpu_lock).
+ * Helper functions for rstat per-cpu locks.
+ * @lock: pointer to per-cpu lock variable
+ * @cpu: the cpu to use for getting the cpu-specific lock
+ * @cgrp: the associated cgroup
+ * @fast_path: whether this function is called while updating
+ * in the fast path or flushing in the NON-fast path
*
* This makes it easier to diagnose locking issues and contention in
* production environments. The parameter @fast_path determine the
@@ -123,19 +128,20 @@ static struct cgroup_rstat_ops rstat_bpf_ops = {
* operations without handling high-frequency fast-path "update" events.
*/
static __always_inline
-unsigned long _cgroup_rstat_cpu_lock(raw_spinlock_t *cpu_lock, int cpu,
+unsigned long _cgroup_rstat_cpu_lock(raw_spinlock_t *lock, int cpu,
struct cgroup *cgrp, const bool fast_path)
{
+ raw_spinlock_t *cpu_lock = per_cpu_ptr(lock, cpu);
unsigned long flags;
bool contended;
/*
- * The _irqsave() is needed because cgroup_rstat_lock is
- * spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring
- * this lock with the _irq() suffix only disables interrupts on
- * a non-PREEMPT_RT kernel. The raw_spinlock_t below disables
- * interrupts on both configurations. The _irqsave() ensures
- * that interrupts are always disabled and later restored.
+ * The _irqsave() is needed because the locks used for flushing
+ * are spinlock_t which is a sleeping lock on PREEMPT_RT.
+ * Acquiring this lock with the _irq() suffix only disables
+ * interrupts on a non-PREEMPT_RT kernel. The raw_spinlock_t below
+ * disables interrupts on both configurations. The _irqsave()
+ * ensures that interrupts are always disabled and later restored.
*/
contended = !raw_spin_trylock_irqsave(cpu_lock, flags);
if (contended) {
@@ -156,10 +162,12 @@ unsigned long _cgroup_rstat_cpu_lock(raw_spinlock_t *cpu_lock, int cpu,
}
static __always_inline
-void _cgroup_rstat_cpu_unlock(raw_spinlock_t *cpu_lock, int cpu,
+void _cgroup_rstat_cpu_unlock(raw_spinlock_t *lock, int cpu,
struct cgroup *cgrp, unsigned long flags,
const bool fast_path)
{
+ raw_spinlock_t *cpu_lock = per_cpu_ptr(lock, cpu);
+
if (fast_path)
trace_cgroup_rstat_cpu_unlock_fastpath(cgrp, cpu, false);
else
@@ -172,8 +180,6 @@ static void __cgroup_rstat_updated(struct cgroup_rstat *rstat, int cpu,
struct cgroup_rstat_ops *ops)
{
struct cgroup *cgrp;
-
- raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
unsigned long flags;
/*
@@ -188,7 +194,7 @@ static void __cgroup_rstat_updated(struct cgroup_rstat *rstat, int cpu,
return;
cgrp = ops->cgroup_fn(rstat);
- flags = _cgroup_rstat_cpu_lock(cpu_lock, cpu, cgrp, true);
+ flags = _cgroup_rstat_cpu_lock(&cgroup_rstat_cpu_lock, cpu, cgrp, true);
/* put @rstat and all ancestors on the corresponding updated lists */
while (true) {
@@ -216,7 +222,7 @@ static void __cgroup_rstat_updated(struct cgroup_rstat *rstat, int cpu,
rstat = parent;
}
- _cgroup_rstat_cpu_unlock(cpu_lock, cpu, cgrp, flags, true);
+ _cgroup_rstat_cpu_unlock(&cgroup_rstat_cpu_lock, cpu, cgrp, flags, true);
}
/**
@@ -315,14 +321,13 @@ static struct cgroup_rstat *cgroup_rstat_push_children(
static struct cgroup_rstat *cgroup_rstat_updated_list(
struct cgroup_rstat *root, int cpu, struct cgroup_rstat_ops *ops)
{
- raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
struct cgroup_rstat_cpu *rstatc = rstat_cpu(root, cpu);
struct cgroup_rstat *head = NULL, *parent, *child;
struct cgroup *cgrp;
unsigned long flags;
cgrp = ops->cgroup_fn(root);
- flags = _cgroup_rstat_cpu_lock(cpu_lock, cpu, cgrp, false);
+ flags = _cgroup_rstat_cpu_lock(&cgroup_rstat_cpu_lock, cpu, cgrp, false);
/* Return NULL if this subtree is not on-list */
if (!rstatc->updated_next)
@@ -359,7 +364,7 @@ static struct cgroup_rstat *cgroup_rstat_updated_list(
if (child != root)
head = cgroup_rstat_push_children(head, child, cpu, ops);
unlock_ret:
- _cgroup_rstat_cpu_unlock(cpu_lock, cpu, cgrp, flags, false);
+ _cgroup_rstat_cpu_unlock(&cgroup_rstat_cpu_lock, cpu, cgrp, flags, false);
return head;
}
--
2.48.1
next prev parent reply other threads:[~2025-02-18 3:15 UTC|newest]
Thread overview: 42+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-18 3:14 [PATCH 00/11] cgroup: separate rstat trees JP Kobryn
2025-02-18 3:14 ` [PATCH 01/11] cgroup: move rstat pointers into struct of their own JP Kobryn
2025-02-19 1:05 ` Shakeel Butt
2025-02-19 1:23 ` Shakeel Butt
2025-02-20 16:53 ` Yosry Ahmed
2025-02-24 17:06 ` JP Kobryn
2025-02-24 18:36 ` Yosry Ahmed
2025-02-18 3:14 ` [PATCH 02/11] cgroup: add level of indirection for cgroup_rstat struct JP Kobryn
2025-02-19 2:26 ` Shakeel Butt
2025-02-20 17:08 ` Yosry Ahmed
2025-02-19 5:57 ` kernel test robot
2025-02-18 3:14 ` [PATCH 03/11] cgroup: move cgroup_rstat from cgroup to cgroup_subsys_state JP Kobryn
2025-02-20 17:06 ` Shakeel Butt
2025-02-20 17:22 ` Yosry Ahmed
2025-02-25 19:20 ` JP Kobryn
2025-02-18 3:14 ` [PATCH 04/11] cgroup: introduce cgroup_rstat_ops JP Kobryn
2025-02-19 7:21 ` kernel test robot
2025-02-20 17:50 ` Shakeel Butt
2025-02-18 3:14 ` [PATCH 05/11] cgroup: separate rstat for bpf cgroups JP Kobryn
2025-02-21 18:14 ` Shakeel Butt
2025-02-18 3:14 ` [PATCH 06/11] cgroup: rstat lock indirection JP Kobryn
2025-02-21 22:09 ` Shakeel Butt
2025-02-18 3:14 ` JP Kobryn [this message]
2025-02-21 22:35 ` [PATCH 07/11] cgroup: fetch cpu-specific lock in rstat cpu lock helpers Shakeel Butt
2025-02-18 3:14 ` [PATCH 08/11] cgroup: rstat cpu lock indirection JP Kobryn
2025-02-19 8:48 ` kernel test robot
2025-02-22 0:18 ` Shakeel Butt
2025-02-18 3:14 ` [PATCH 09/11] cgroup: separate rstat locks for bpf cgroups JP Kobryn
2025-02-18 3:14 ` [PATCH 10/11] cgroup: separate rstat locks for subsystems JP Kobryn
2025-02-22 0:23 ` Shakeel Butt
2025-02-18 3:14 ` [PATCH 11/11] cgroup: separate rstat list pointers from base stats JP Kobryn
2025-02-22 0:28 ` Shakeel Butt
2025-02-20 15:51 ` [PATCH 00/11] cgroup: separate rstat trees Tejun Heo
2025-02-27 23:44 ` JP Kobryn
2025-02-20 17:26 ` Yosry Ahmed
2025-02-20 17:53 ` Shakeel Butt
2025-02-20 17:59 ` Yosry Ahmed
2025-02-20 18:14 ` JP Kobryn
2025-02-20 20:04 ` Yosry Ahmed
2025-02-20 20:22 ` Yosry Ahmed
2025-02-24 21:13 ` Shakeel Butt
2025-02-24 21:54 ` Yosry Ahmed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250218031448.46951-8-inwardvessel@gmail.com \
--to=inwardvessel@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=cgroups@vger.kernel.org \
--cc=hannes@cmpxchg.org \
--cc=kernel-team@meta.com \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=shakeel.butt@linux.dev \
--cc=tj@kernel.org \
--cc=yosryahmed@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox