linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Yu Zhao <yuzhao@google.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	 Marc Zyngier <maz@kernel.org>,
	Muchun Song <muchun.song@linux.dev>,
	 Thomas Gleixner <tglx@linutronix.de>,
	Will Deacon <will@kernel.org>
Cc: Douglas Anderson <dianders@chromium.org>,
	Mark Rutland <mark.rutland@arm.com>,
	 Nanyong Sun <sunnanyong@huawei.com>,
	linux-arm-kernel@lists.infradead.org,
	 linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	Yu Zhao <yuzhao@google.com>
Subject: [PATCH v2 3/6] irqchip/gic-v3: support SGI broadcast
Date: Thu,  7 Nov 2024 13:20:30 -0700	[thread overview]
Message-ID: <20241107202033.2721681-4-yuzhao@google.com> (raw)
In-Reply-To: <20241107202033.2721681-1-yuzhao@google.com>

GIC v3 and later support SGI broadcast, i.e., the mode that routes
interrupts to all PEs in the system excluding the local CPU.

Supporting this mode can avoid looping through all the remote CPUs
when broadcasting SGIs, especially for systems with 200+ CPUs. The
performance improvement can be measured with the rest of this series
booted with "hugetlb_free_vmemmap=on irqchip.gicv3_pseudo_nmi=1":

  cd /sys/kernel/mm/hugepages/
  echo 600 >hugepages-1048576kB/nr_hugepages
  echo 2048kB >hugepages-1048576kB/demote_size
  perf record -g time echo 600 >hugepages-1048576kB/demote"

With 80 CPUs:
           gic_ipi_send_mask()  bash sys time
  Before:  38.14%               0m10.513s
  After:    0.20%               0m5.132s

Signed-off-by: Yu Zhao <yuzhao@google.com>
---
 drivers/irqchip/irq-gic-v3.c | 31 ++++++++++++++++++++++++++++---
 1 file changed, 28 insertions(+), 3 deletions(-)

diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index ce87205e3e82..7ebe870e4608 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1322,6 +1322,7 @@ static void gic_cpu_init(void)
 
 #define MPIDR_TO_SGI_RS(mpidr)	(MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
 #define MPIDR_TO_SGI_CLUSTER_ID(mpidr)	((mpidr) & ~0xFUL)
+#define MPIDR_TO_SGI_TARGET_LIST(mpidr)	(1 << ((mpidr) & 0xf))
 
 /*
  * gic_starting_cpu() is called after the last point where cpuhp is allowed
@@ -1356,7 +1357,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
 	mpidr = gic_cpu_to_affinity(cpu);
 
 	while (cpu < nr_cpu_ids) {
-		tlist |= 1 << (mpidr & 0xf);
+		tlist |= MPIDR_TO_SGI_TARGET_LIST(mpidr);
 
 		next_cpu = cpumask_next(cpu, mask);
 		if (next_cpu >= nr_cpu_ids)
@@ -1394,9 +1395,20 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
 	gic_write_sgi1r(val);
 }
 
+static void gic_broadcast_sgi(unsigned int irq)
+{
+	u64 val;
+
+	val = BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT) | (irq << ICC_SGI1R_SGI_ID_SHIFT);
+
+	pr_devel("CPU %d: broadcasting SGI %u\n", smp_processor_id(), irq);
+	gic_write_sgi1r(val);
+}
+
 static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
 {
-	int cpu;
+	int cpu = smp_processor_id();
+	bool self = cpumask_test_cpu(cpu, mask);
 
 	if (WARN_ON(d->hwirq >= 16))
 		return;
@@ -1407,6 +1419,19 @@ static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
 	 */
 	dsb(ishst);
 
+	if (cpumask_weight(mask) + !self == num_online_cpus()) {
+		/* Broadcast to all but self */
+		gic_broadcast_sgi(d->hwirq);
+		if (self) {
+			unsigned long mpidr = gic_cpu_to_affinity(cpu);
+
+			/* Send to self */
+			gic_send_sgi(MPIDR_TO_SGI_CLUSTER_ID(mpidr),
+				     MPIDR_TO_SGI_TARGET_LIST(mpidr), d->hwirq);
+		}
+		goto done;
+	}
+
 	for_each_cpu(cpu, mask) {
 		u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu));
 		u16 tlist;
@@ -1414,7 +1439,7 @@ static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
 		tlist = gic_compute_target_list(&cpu, mask, cluster_id);
 		gic_send_sgi(cluster_id, tlist, d->hwirq);
 	}
-
+done:
 	/* Force the above writes to ICC_SGI1R_EL1 to be executed */
 	isb();
 }
-- 
2.47.0.277.g8800431eea-goog



  parent reply	other threads:[~2024-11-07 20:20 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-11-07 20:20 [PATCH v2 0/6] mm/arm64: re-enable HVO Yu Zhao
2024-11-07 20:20 ` [PATCH v2 1/6] mm/hugetlb_vmemmap: batch-update PTEs Yu Zhao
2024-11-07 20:20 ` [PATCH v2 2/6] mm/hugetlb_vmemmap: add arch-independent helpers Yu Zhao
2024-11-07 20:20 ` Yu Zhao [this message]
2024-11-07 20:20 ` [PATCH v2 4/6] arm64: broadcast IPIs to pause remote CPUs Yu Zhao
2024-11-07 20:20 ` [PATCH v2 5/6] arm64: pause remote CPUs to update vmemmap Yu Zhao
2024-11-07 20:20 ` [PATCH v2 6/6] arm64: select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP Yu Zhao
2024-11-25 15:22 ` [PATCH v2 0/6] mm/arm64: re-enable HVO Will Deacon
2024-11-25 22:22   ` Yu Zhao
2024-11-28 14:20     ` Will Deacon
2025-01-07  6:07       ` Yu Zhao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241107202033.2721681-4-yuzhao@google.com \
    --to=yuzhao@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=catalin.marinas@arm.com \
    --cc=dianders@chromium.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=muchun.song@linux.dev \
    --cc=sunnanyong@huawei.com \
    --cc=tglx@linutronix.de \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox