From: Vlastimil Babka <vbabka@suse.cz>
To: Peter Zijlstra <peterz@infradead.org>, torvalds@linux-foundation.org
Cc: corbet@lwn.net, will@kernel.org, boqun.feng@gmail.com,
mark.rutland@arm.com, catalin.marinas@arm.com, dennis@kernel.org,
tj@kernel.org, cl@linux.com, hca@linux.ibm.com,
gor@linux.ibm.com, agordeev@linux.ibm.com,
borntraeger@linux.ibm.com, svens@linux.ibm.com,
tglx@linutronix.de, mingo@redhat.com, bp@alien8.de,
dave.hansen@linux.intel.com, x86@kernel.org, hpa@zytor.com,
joro@8bytes.org, suravee.suthikulpanit@amd.com,
robin.murphy@arm.com, dwmw2@infradead.org,
baolu.lu@linux.intel.com, Arnd Bergmann <arnd@arndb.de>,
Herbert Xu <herbert@gondor.apana.org.au>,
davem@davemloft.net, penberg@kernel.org, rientjes@google.com,
iamjoonsoo.kim@lge.com, Andrew Morton <akpm@linux-foundation.org>,
roman.gushchin@linux.dev, 42.hyeyoo@gmail.com,
linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-mm@kvack.org, linux-s390@vger.kernel.org,
iommu@lists.linux.dev, linux-arch@vger.kernel.org,
linux-crypto@vger.kernel.org
Subject: Re: [PATCH v3 09/11] mm/slub: Fold slab_update_freelist()
Date: Wed, 24 May 2023 13:58:24 +0200 [thread overview]
Message-ID: <18c33bf0-0c7e-7584-5149-33cf77b50b8a@suse.cz> (raw)
In-Reply-To: <20230515080554.520976397@infradead.org>
On 5/15/23 09:57, Peter Zijlstra wrote:
> The two functions slab_update_freelist() and __slab_update_freelist()
> are nearly identical, fold and add a boolean argument and rely on
> constant propagation.
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Something like that has been tried before and the result:
https://lore.kernel.org/all/CAHk-=wiJLqL2cUhJbvpyPQpkbVOu1rVSzgO2=S2jC55hneLtfQ@mail.gmail.com/
Your parameter is not called 'locked' but 'irq_save' which is better, but
that's just one detail.
After your refactoring in 08/11 which puts most of the code into
__update_freelist_fast() and _slow() I'd say the result is not so bad already.
BTW I have some suspicion that some SLUB code is based on assumptions that
are no longer true these days. IIRC I've seen some microbenchmark results a
while ago that showed that disabling/enabling irqs is surprisingly (to me)
very cheap today, so maybe it's not so useful to keep doing the
this_cpu_cmpxchg128 for the struct kmem_cache_cpu operations (less so for
struct slab cmpxchg128 where actually different cpus may be involved). But
it needs a closer look.
> ---
> mm/slub.c | 80 +++++++++++++++++++++-----------------------------------------
> 1 file changed, 28 insertions(+), 52 deletions(-)
>
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -559,53 +559,29 @@ __update_freelist_slow(struct slab *slab
> * allocation/ free operation in hardirq context. Therefore nothing can
> * interrupt the operation.
> */
> -static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
> - void *freelist_old, unsigned long counters_old,
> - void *freelist_new, unsigned long counters_new,
> - const char *n)
> +static __always_inline
> +bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
> + void *freelist_old, unsigned long counters_old,
> + void *freelist_new, unsigned long counters_new,
> + bool irq_save, const char *n)
> {
> bool ret;
>
> - if (USE_LOCKLESS_FAST_PATH())
> + if (!irq_save && USE_LOCKLESS_FAST_PATH())
> lockdep_assert_irqs_disabled();
>
> if (s->flags & __CMPXCHG_DOUBLE) {
> ret = __update_freelist_fast(slab, freelist_old, counters_old,
> freelist_new, counters_new);
> } else {
> - ret = __update_freelist_slow(slab, freelist_old, counters_old,
> - freelist_new, counters_new);
> - }
> - if (likely(ret))
> - return true;
> -
> - cpu_relax();
> - stat(s, CMPXCHG_DOUBLE_FAIL);
> -
> -#ifdef SLUB_DEBUG_CMPXCHG
> - pr_info("%s %s: cmpxchg double redo ", n, s->name);
> -#endif
> -
> - return false;
> -}
> -
> -static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
> - void *freelist_old, unsigned long counters_old,
> - void *freelist_new, unsigned long counters_new,
> - const char *n)
> -{
> - bool ret;
> -
> - if (s->flags & __CMPXCHG_DOUBLE) {
> - ret = __update_freelist_fast(slab, freelist_old, counters_old,
> - freelist_new, counters_new);
> - } else {
> unsigned long flags;
>
> - local_irq_save(flags);
> + if (irq_save)
> + local_irq_save(flags);
> ret = __update_freelist_slow(slab, freelist_old, counters_old,
> freelist_new, counters_new);
> - local_irq_restore(flags);
> + if (irq_save)
> + local_irq_restore(flags);
> }
> if (likely(ret))
> return true;
> @@ -2250,10 +2226,10 @@ static inline void *acquire_slab(struct
> VM_BUG_ON(new.frozen);
> new.frozen = 1;
>
> - if (!__slab_update_freelist(s, slab,
> - freelist, counters,
> - new.freelist, new.counters,
> - "acquire_slab"))
> + if (!slab_update_freelist(s, slab,
> + freelist, counters,
> + new.freelist, new.counters,
> + false, "acquire_slab"))
> return NULL;
>
> remove_partial(n, slab);
> @@ -2577,9 +2553,9 @@ static void deactivate_slab(struct kmem_
>
>
> if (!slab_update_freelist(s, slab,
> - old.freelist, old.counters,
> - new.freelist, new.counters,
> - "unfreezing slab")) {
> + old.freelist, old.counters,
> + new.freelist, new.counters,
> + true, "unfreezing slab")) {
> if (mode == M_PARTIAL)
> spin_unlock_irqrestore(&n->list_lock, flags);
> goto redo;
> @@ -2633,10 +2609,10 @@ static void __unfreeze_partials(struct k
>
> new.frozen = 0;
>
> - } while (!__slab_update_freelist(s, slab,
> - old.freelist, old.counters,
> - new.freelist, new.counters,
> - "unfreezing slab"));
> + } while (!slab_update_freelist(s, slab,
> + old.freelist, old.counters,
> + new.freelist, new.counters,
> + false, "unfreezing slab"));
>
> if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
> slab->next = slab_to_discard;
> @@ -3072,10 +3048,10 @@ static inline void *get_freelist(struct
> new.inuse = slab->objects;
> new.frozen = freelist != NULL;
>
> - } while (!__slab_update_freelist(s, slab,
> - freelist, counters,
> - NULL, new.counters,
> - "get_freelist"));
> + } while (!slab_update_freelist(s, slab,
> + freelist, counters,
> + NULL, new.counters,
> + false, "get_freelist"));
>
> return freelist;
> }
> @@ -3666,9 +3642,9 @@ static void __slab_free(struct kmem_cach
> }
>
> } while (!slab_update_freelist(s, slab,
> - prior, counters,
> - head, new.counters,
> - "__slab_free"));
> + prior, counters,
> + head, new.counters,
> + true, "__slab_free"));
>
> if (likely(!n)) {
>
>
>
next prev parent reply other threads:[~2023-05-24 11:58 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-15 7:56 [PATCH v3 00/11] Introduce cmpxchg128() -- aka. the demise of cmpxchg_double() Peter Zijlstra
2023-05-15 7:57 ` [PATCH v3 01/11] cyrpto/b128ops: Remove struct u128 Peter Zijlstra
2023-05-15 7:57 ` [PATCH v3 02/11] types: Introduce [us]128 Peter Zijlstra
2023-05-15 7:57 ` [PATCH v3 03/11] arch: Introduce arch_{,try_}_cmpxchg128{,_local}() Peter Zijlstra
2023-05-15 7:57 ` [PATCH v3 04/11] instrumentation: Wire up cmpxchg128() Peter Zijlstra
2023-05-15 7:57 ` [PATCH v3 05/11] percpu: Wire up cmpxchg128 Peter Zijlstra
2023-05-25 12:49 ` Peter Zijlstra
2023-05-25 22:59 ` Petr Tesařík
2023-05-15 7:57 ` [PATCH v3 06/11] x86,amd_iommu: Replace cmpxchg_double() Peter Zijlstra
2023-05-15 7:57 ` [PATCH v3 07/11] x86,intel_iommu: " Peter Zijlstra
2023-05-15 7:57 ` [PATCH v3 08/11] slub: " Peter Zijlstra
2023-05-24 9:32 ` Peter Zijlstra
2023-05-24 10:13 ` Vlastimil Babka
2023-05-25 10:29 ` Peter Zijlstra
2023-05-25 10:52 ` Arnd Bergmann
2023-05-25 13:10 ` Peter Zijlstra
2023-05-30 14:22 ` Peter Zijlstra
2023-05-30 19:32 ` Peter Zijlstra
2023-05-15 7:57 ` [PATCH v3 09/11] mm/slub: Fold slab_update_freelist() Peter Zijlstra
2023-05-24 11:58 ` Vlastimil Babka [this message]
2023-05-15 7:57 ` [PATCH v3 10/11] arch: Remove cmpxchg_double Peter Zijlstra
2023-05-15 8:52 ` Heiko Carstens
2023-05-15 7:57 ` [PATCH v3 11/11] s390/cpum_sf: Convert to cmpxchg128() Peter Zijlstra
2023-05-15 9:42 ` [PATCH v3 00/11] Introduce cmpxchg128() -- aka. the demise of cmpxchg_double() Arnd Bergmann
2023-05-24 9:39 ` Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=18c33bf0-0c7e-7584-5149-33cf77b50b8a@suse.cz \
--to=vbabka@suse.cz \
--cc=42.hyeyoo@gmail.com \
--cc=agordeev@linux.ibm.com \
--cc=akpm@linux-foundation.org \
--cc=arnd@arndb.de \
--cc=baolu.lu@linux.intel.com \
--cc=boqun.feng@gmail.com \
--cc=borntraeger@linux.ibm.com \
--cc=bp@alien8.de \
--cc=catalin.marinas@arm.com \
--cc=cl@linux.com \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=davem@davemloft.net \
--cc=dennis@kernel.org \
--cc=dwmw2@infradead.org \
--cc=gor@linux.ibm.com \
--cc=hca@linux.ibm.com \
--cc=herbert@gondor.apana.org.au \
--cc=hpa@zytor.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=iommu@lists.linux.dev \
--cc=joro@8bytes.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-crypto@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-s390@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=mingo@redhat.com \
--cc=penberg@kernel.org \
--cc=peterz@infradead.org \
--cc=rientjes@google.com \
--cc=robin.murphy@arm.com \
--cc=roman.gushchin@linux.dev \
--cc=suravee.suthikulpanit@amd.com \
--cc=svens@linux.ibm.com \
--cc=tglx@linutronix.de \
--cc=tj@kernel.org \
--cc=torvalds@linux-foundation.org \
--cc=will@kernel.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox