linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Waiman Long <llong@redhat.com>
To: Frederic Weisbecker <frederic@kernel.org>,
	LKML <linux-kernel@vger.kernel.org>
Cc: "Michal Koutný" <mkoutny@suse.com>,
	"Andrew Morton" <akpm@linux-foundation.org>,
	"Bjorn Helgaas" <bhelgaas@google.com>,
	"Catalin Marinas" <catalin.marinas@arm.com>,
	"Chen Ridong" <chenridong@huawei.com>,
	"Danilo Krummrich" <dakr@kernel.org>,
	"David S . Miller" <davem@davemloft.net>,
	"Eric Dumazet" <edumazet@google.com>,
	"Gabriele Monaco" <gmonaco@redhat.com>,
	"Greg Kroah-Hartman" <gregkh@linuxfoundation.org>,
	"Ingo Molnar" <mingo@redhat.com>,
	"Jakub Kicinski" <kuba@kernel.org>,
	"Jens Axboe" <axboe@kernel.dk>,
	"Johannes Weiner" <hannes@cmpxchg.org>,
	"Lai Jiangshan" <jiangshanlai@gmail.com>,
	"Marco Crivellari" <marco.crivellari@suse.com>,
	"Michal Hocko" <mhocko@suse.com>,
	"Muchun Song" <muchun.song@linux.dev>,
	"Paolo Abeni" <pabeni@redhat.com>,
	"Peter Zijlstra" <peterz@infradead.org>,
	"Phil Auld" <pauld@redhat.com>,
	"Rafael J . Wysocki" <rafael@kernel.org>,
	"Roman Gushchin" <roman.gushchin@linux.dev>,
	"Shakeel Butt" <shakeel.butt@linux.dev>,
	"Simon Horman" <horms@kernel.org>, "Tejun Heo" <tj@kernel.org>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"Vlastimil Babka" <vbabka@suse.cz>,
	"Will Deacon" <will@kernel.org>,
	cgroups@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
	linux-block@vger.kernel.org, linux-mm@kvack.org,
	linux-pci@vger.kernel.org, netdev@vger.kernel.org
Subject: Re: [PATCH 25/33] kthread: Include unbound kthreads in the managed affinity list
Date: Fri, 26 Dec 2025 17:11:26 -0500	[thread overview]
Message-ID: <1e530c72-75d7-4c7e-96e7-329056d6baf5@redhat.com> (raw)
In-Reply-To: <20251224134520.33231-26-frederic@kernel.org>

On 12/24/25 8:45 AM, Frederic Weisbecker wrote:
> The managed affinity list currently contains only unbound kthreads that
> have affinity preferences. Unbound kthreads globally affine by default
> are outside of the list because their affinity is automatically managed
> by the scheduler (through the fallback housekeeping mask) and by cpuset.
>
> However in order to preserve the preferred affinity of kthreads, cpuset
> will delegate the isolated partition update propagation to the
> housekeeping and kthread code.
>
> Prepare for that with including all unbound kthreads in the managed
> affinity list.
>
> Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
> ---
>   kernel/kthread.c | 70 ++++++++++++++++++++++++++++--------------------
>   1 file changed, 41 insertions(+), 29 deletions(-)
>
> diff --git a/kernel/kthread.c b/kernel/kthread.c
> index f1e4f1f35cae..51c0908d3d02 100644
> --- a/kernel/kthread.c
> +++ b/kernel/kthread.c
> @@ -365,9 +365,10 @@ static void kthread_fetch_affinity(struct kthread *kthread, struct cpumask *cpum
>   	if (kthread->preferred_affinity) {
>   		pref = kthread->preferred_affinity;
>   	} else {
> -		if (WARN_ON_ONCE(kthread->node == NUMA_NO_NODE))
> -			return;
> -		pref = cpumask_of_node(kthread->node);
> +		if (kthread->node == NUMA_NO_NODE)
> +			pref = housekeeping_cpumask(HK_TYPE_KTHREAD);
> +		else
> +			pref = cpumask_of_node(kthread->node);
>   	}
>   
>   	cpumask_and(cpumask, pref, housekeeping_cpumask(HK_TYPE_KTHREAD));
> @@ -380,32 +381,29 @@ static void kthread_affine_node(void)
>   	struct kthread *kthread = to_kthread(current);
>   	cpumask_var_t affinity;
>   
> -	WARN_ON_ONCE(kthread_is_per_cpu(current));
> +	if (WARN_ON_ONCE(kthread_is_per_cpu(current)))
> +		return;
>   
> -	if (kthread->node == NUMA_NO_NODE) {
> -		housekeeping_affine(current, HK_TYPE_KTHREAD);
> -	} else {
> -		if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) {
> -			WARN_ON_ONCE(1);
> -			return;
> -		}
> -
> -		mutex_lock(&kthread_affinity_lock);
> -		WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
> -		list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
> -		/*
> -		 * The node cpumask is racy when read from kthread() but:
> -		 * - a racing CPU going down will either fail on the subsequent
> -		 *   call to set_cpus_allowed_ptr() or be migrated to housekeepers
> -		 *   afterwards by the scheduler.
> -		 * - a racing CPU going up will be handled by kthreads_online_cpu()
> -		 */
> -		kthread_fetch_affinity(kthread, affinity);
> -		set_cpus_allowed_ptr(current, affinity);
> -		mutex_unlock(&kthread_affinity_lock);
> -
> -		free_cpumask_var(affinity);
> +	if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) {
> +		WARN_ON_ONCE(1);
> +		return;
>   	}
> +
> +	mutex_lock(&kthread_affinity_lock);
> +	WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
> +	list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
> +	/*
> +	 * The node cpumask is racy when read from kthread() but:
> +	 * - a racing CPU going down will either fail on the subsequent
> +	 *   call to set_cpus_allowed_ptr() or be migrated to housekeepers
> +	 *   afterwards by the scheduler.
> +	 * - a racing CPU going up will be handled by kthreads_online_cpu()
> +	 */
> +	kthread_fetch_affinity(kthread, affinity);
> +	set_cpus_allowed_ptr(current, affinity);
> +	mutex_unlock(&kthread_affinity_lock);
> +
> +	free_cpumask_var(affinity);
>   }
>   
>   static int kthread(void *_create)
> @@ -919,8 +917,22 @@ static int kthreads_online_cpu(unsigned int cpu)
>   			ret = -EINVAL;
>   			continue;
>   		}
> -		kthread_fetch_affinity(k, affinity);
> -		set_cpus_allowed_ptr(k->task, affinity);
> +
> +		/*
> +		 * Unbound kthreads without preferred affinity are already affine
> +		 * to housekeeping, whether those CPUs are online or not. So no need
> +		 * to handle newly online CPUs for them.
> +		 *
> +		 * But kthreads with a preferred affinity or node are different:
> +		 * if none of their preferred CPUs are online and part of
> +		 * housekeeping at the same time, they must be affine to housekeeping.
> +		 * But as soon as one of their preferred CPU becomes online, they must
> +		 * be affine to them.
> +		 */
> +		if (k->preferred_affinity || k->node != NUMA_NO_NODE) {
> +			kthread_fetch_affinity(k, affinity);
> +			set_cpus_allowed_ptr(k->task, affinity);
> +		}
>   	}
>   
>   	free_cpumask_var(affinity);
Reviewed-by: Waiman Long <longman@redhat.com>



  reply	other threads:[~2025-12-26 22:11 UTC|newest]

Thread overview: 68+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-24 13:44 [PATCH 00/33 v5] cpuset/isolation: Honour kthreads preferred affinity Frederic Weisbecker
2025-12-24 13:44 ` [PATCH 01/33] PCI: Prepare to protect against concurrent isolated cpuset change Frederic Weisbecker
2025-12-29  3:23   ` Zhang Qiao
2025-12-29  3:53     ` Waiman Long
2025-12-31 13:18       ` Frederic Weisbecker
2025-12-30 22:38     ` Frederic Weisbecker
2025-12-24 13:44 ` [PATCH 02/33] cpu: Revert "cpu/hotplug: Prevent self deadlock on CPU hot-unplug" Frederic Weisbecker
2025-12-24 13:44 ` [PATCH 03/33] memcg: Prepare to protect against concurrent isolated cpuset change Frederic Weisbecker
2025-12-26 23:56   ` Tejun Heo
2025-12-24 13:44 ` [PATCH 04/33] mm: vmstat: " Frederic Weisbecker
2025-12-24 13:44 ` [PATCH 05/33] sched/isolation: Save boot defined domain flags Frederic Weisbecker
2025-12-25 22:27   ` Waiman Long
2025-12-31 13:45     ` Frederic Weisbecker
2025-12-24 13:44 ` [PATCH 06/33] cpuset: Convert boot_hk_cpus to use HK_TYPE_DOMAIN_BOOT Frederic Weisbecker
2025-12-25 22:31   ` Waiman Long
2025-12-24 13:44 ` [PATCH 07/33] driver core: cpu: Convert /sys/devices/system/cpu/isolated " Frederic Weisbecker
2025-12-24 13:44 ` [PATCH 08/33] net: Keep ignoring isolated cpuset change Frederic Weisbecker
2025-12-24 13:44 ` [PATCH 09/33] block: Protect against concurrent " Frederic Weisbecker
2025-12-30  0:37   ` Jens Axboe
2025-12-31 14:02     ` Frederic Weisbecker
2025-12-31 15:30       ` Jens Axboe
2025-12-24 13:44 ` [PATCH 10/33] timers/migration: Prevent from lockdep false positive warning Frederic Weisbecker
2025-12-24 13:44 ` [PATCH 11/33] cpu: Provide lockdep check for CPU hotplug lock write-held Frederic Weisbecker
2025-12-24 13:44 ` [PATCH 12/33] cpuset: Provide lockdep check for cpuset lock held Frederic Weisbecker
2025-12-24 13:45 ` [PATCH 13/33] sched/isolation: Convert housekeeping cpumasks to rcu pointers Frederic Weisbecker
2025-12-24 13:45 ` [PATCH 14/33] cpuset: Update HK_TYPE_DOMAIN cpumask from cpuset Frederic Weisbecker
2025-12-26  2:24   ` Waiman Long
2025-12-26  3:20     ` Waiman Long
2025-12-26  8:08   ` Chen Ridong
2025-12-31 14:21     ` Frederic Weisbecker
2025-12-24 13:45 ` [PATCH 15/33] sched/isolation: Flush memcg workqueues on cpuset isolated partition change Frederic Weisbecker
2025-12-24 13:45 ` [PATCH 16/33] sched/isolation: Flush vmstat " Frederic Weisbecker
2025-12-24 13:45 ` [PATCH 17/33] PCI: Flush PCI probe workqueue " Frederic Weisbecker
2025-12-26  8:48   ` Chen Ridong
2025-12-31 14:37     ` Frederic Weisbecker
2025-12-24 13:45 ` [PATCH 18/33] cpuset: Propagate cpuset isolation update to workqueue through housekeeping Frederic Weisbecker
2025-12-26 20:31   ` Waiman Long
2025-12-27  0:18   ` Tejun Heo
2025-12-24 13:45 ` [PATCH 19/33] cpuset: Propagate cpuset isolation update to timers " Frederic Weisbecker
2025-12-26 20:40   ` Waiman Long
2025-12-31 15:53     ` Frederic Weisbecker
2025-12-24 13:45 ` [PATCH 20/33] timers/migration: Remove superfluous cpuset isolation test Frederic Weisbecker
2025-12-26 20:45   ` Waiman Long
2025-12-24 13:45 ` [PATCH 21/33] cpuset: Remove cpuset_cpu_is_isolated() Frederic Weisbecker
2025-12-26 20:48   ` Waiman Long
2025-12-24 13:45 ` [PATCH 22/33] sched/isolation: Remove HK_TYPE_TICK test from cpu_is_isolated() Frederic Weisbecker
2025-12-26 21:26   ` Waiman Long
2025-12-24 13:45 ` [PATCH 23/33] PCI: Remove superfluous HK_TYPE_WQ check Frederic Weisbecker
2025-12-24 13:45 ` [PATCH 24/33] kthread: Refine naming of affinity related fields Frederic Weisbecker
2025-12-26 21:37   ` Waiman Long
2025-12-24 13:45 ` [PATCH 25/33] kthread: Include unbound kthreads in the managed affinity list Frederic Weisbecker
2025-12-26 22:11   ` Waiman Long [this message]
2025-12-24 13:45 ` [PATCH 26/33] kthread: Include kthreadd to " Frederic Weisbecker
2025-12-26 22:13   ` Waiman Long
2025-12-24 13:45 ` [PATCH 27/33] kthread: Rely on HK_TYPE_DOMAIN for preferred affinity management Frederic Weisbecker
2025-12-26 22:16   ` Waiman Long
2025-12-24 13:45 ` [PATCH 28/33] sched: Switch the fallback task allowed cpumask to HK_TYPE_DOMAIN Frederic Weisbecker
2025-12-26 23:08   ` Waiman Long
2025-12-24 13:45 ` [PATCH 29/33] sched/arm64: Move fallback task " Frederic Weisbecker
2025-12-26 23:46   ` Waiman Long
2025-12-24 13:45 ` [PATCH 30/33] kthread: Honour kthreads preferred affinity after cpuset changes Frederic Weisbecker
2025-12-26 23:59   ` Waiman Long
2025-12-24 13:45 ` [PATCH 31/33] kthread: Comment on the purpose and placement of kthread_affine_node() call Frederic Weisbecker
2025-12-24 13:45 ` [PATCH 32/33] kthread: Document kthread_affine_preferred() Frederic Weisbecker
2025-12-24 13:45 ` [PATCH 33/33] doc: Add housekeeping documentation Frederic Weisbecker
2025-12-27  0:39   ` Waiman Long
2025-12-31 15:25     ` Frederic Weisbecker
2025-12-31 17:35       ` Waiman Long

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1e530c72-75d7-4c7e-96e7-329056d6baf5@redhat.com \
    --to=llong@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=axboe@kernel.dk \
    --cc=bhelgaas@google.com \
    --cc=catalin.marinas@arm.com \
    --cc=cgroups@vger.kernel.org \
    --cc=chenridong@huawei.com \
    --cc=dakr@kernel.org \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=frederic@kernel.org \
    --cc=gmonaco@redhat.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=hannes@cmpxchg.org \
    --cc=horms@kernel.org \
    --cc=jiangshanlai@gmail.com \
    --cc=kuba@kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=marco.crivellari@suse.com \
    --cc=mhocko@suse.com \
    --cc=mingo@redhat.com \
    --cc=mkoutny@suse.com \
    --cc=muchun.song@linux.dev \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=pauld@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rafael@kernel.org \
    --cc=roman.gushchin@linux.dev \
    --cc=shakeel.butt@linux.dev \
    --cc=tglx@linutronix.de \
    --cc=tj@kernel.org \
    --cc=vbabka@suse.cz \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox