From: Yeoreum Yun <yeoreum.yun@arm.com>
To: Luka <luka.2016.cs@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: Re: [BUG] WARNING: locking bug in ___rmqueue_pcplist in Linux kernel v6.12
Date: Thu, 5 Jun 2025 06:13:16 +0100 [thread overview]
Message-ID: <aEEnbJKLdwOZm3UC@e129823.arm.com> (raw)
In-Reply-To: <aEAC4/XzOqkqA1wd@e129823.arm.com>
> Hi Luka,
>
> >
> > I am writing to report a potential vulnerability identified in the
> > upstream Linux Kernel version v6.12, corresponding to the following
> > commit in the mainline repository:
> >
> > Git Commit: adc218676eef25575469234709c2d87185ca223a (tag: v6.12)
> >
> > This issue was discovered during the testing of the Android 16 AOSP
> > kernel, which is based on Linux kernel version 6.12, specifically from
> > the AOSP kernel branch:
> >
> > AOSP kernel branch: android16-6.12
> > Manifest path: kernel/common.git
> > Source URL: https://android.googlesource.com/kernel/common/+/refs/heads/android16-6.12
> >
> > Although this kernel branch is used in Android 16 development, its
> > base is aligned with the upstream Linux v6.12 release. I observed this
> > issue while conducting stability and fuzzing tests on the Android 16
> > platform and identified that the root cause lies in the upstream
> > codebase.
> >
> >
> > Bug Location: ___rmqueue_pcplist+0x3b0/0x236c mm/page_alloc.c:3276
> >
> > Bug Report: https://hastebin.com/share/tobupusuya.bash
> >
> > Entire Log: https://hastebin.com/share/imecipavet.yaml
> >
> >
> > Thank you very much for your time and attention. I sincerely apologize
> > that I am currently unable to provide a reproducer for this issue.
> > However, I am actively working on reproducing the problem, and I will
> > make sure to share any findings or reproducing steps with you as soon
> > as they are available.
> >
> > I greatly appreciate your efforts in maintaining the Linux kernel and
> > your attention to this matter.
> >
>
> Could you make a test with this patch please?
>
> -----8-----
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index 28066b4ced81..4c7007377525 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -707,7 +707,7 @@ enum zone_watermarks {
> #define PCPF_FREE_HIGH_BATCH BIT(1)
>
> struct per_cpu_pages {
> - spinlock_t lock; /* Protects lists field */
> + raw_spinlock_t lock; /* Protects lists field */
> int count; /* number of pages in the list */
> int high; /* high watermark, emptying needed */
> int high_min; /* min high watermark */
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> :...skipping...
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index 28066b4ced81..4c7007377525 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -707,7 +707,7 @@ enum zone_watermarks {
> #define PCPF_FREE_HIGH_BATCH BIT(1)
>
> struct per_cpu_pages {
> - spinlock_t lock; /* Protects lists field */
> + raw_spinlock_t lock; /* Protects lists field */
> int count; /* number of pages in the list */
> int high; /* high watermark, emptying needed */
> int high_min; /* min high watermark */
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 2ef3c07266b3..f00d58aba491 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -134,7 +134,7 @@ static DEFINE_MUTEX(pcp_batch_high_lock);
> type *_ret; \
> pcpu_task_pin(); \
> _ret = this_cpu_ptr(ptr); \
> - spin_lock(&_ret->member); \
> + raw_spin_lock(&_ret->member); \
> _ret; \
> })
>
> @@ -143,7 +143,7 @@ static DEFINE_MUTEX(pcp_batch_high_lock);
> type *_ret; \
> pcpu_task_pin(); \
> _ret = this_cpu_ptr(ptr); \
> - if (!spin_trylock(&_ret->member)) { \
> + if (!raw_spin_trylock(&_ret->member)) { \
> pcpu_task_unpin(); \
> _ret = NULL; \
> } \
> @@ -152,7 +152,7 @@ static DEFINE_MUTEX(pcp_batch_high_lock);
>
> #define pcpu_spin_unlock(member, ptr) \
> ({ \
> - spin_unlock(&ptr->member); \
> + raw_spin_unlock(&ptr->member); \
> pcpu_task_unpin(); \
> })
>
> @@ -2393,9 +2393,9 @@ int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
>
> to_drain = pcp->count - pcp->high;
> if (to_drain > 0) {
> - spin_lock(&pcp->lock);
> + raw_spin_lock(&pcp->lock);
> free_pcppages_bulk(zone, to_drain, pcp, 0);
> - spin_unlock(&pcp->lock);
> + raw_spin_unlock(&pcp->lock);
> todo++;
> }
>
> @@ -2415,9 +2415,9 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
> batch = READ_ONCE(pcp->batch);
> to_drain = min(pcp->count, batch);
> if (to_drain > 0) {
> - spin_lock(&pcp->lock);
> + raw_spin_lock(&pcp->lock);
> free_pcppages_bulk(zone, to_drain, pcp, 0);
> - spin_unlock(&pcp->lock);
> + raw_spin_unlock(&pcp->lock);
> }
> }
> #endif
> @@ -2431,7 +2431,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
> int count;
>
> do {
> - spin_lock(&pcp->lock);
> + raw_spin_lock(&pcp->lock);
> count = pcp->count;
> if (count) {
> int to_drain = min(count,
> @@ -2440,7 +2440,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
> free_pcppages_bulk(zone, to_drain, pcp, 0);
> count -= to_drain;
> }
> - spin_unlock(&pcp->lock);
> + raw_spin_unlock(&pcp->lock);
> } while (count);
> }
>
> @@ -5744,7 +5744,7 @@ static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonesta
> memset(pcp, 0, sizeof(*pcp));
> memset(pzstats, 0, sizeof(*pzstats));
>
> - spin_lock_init(&pcp->lock);
> + raw_spin_lock_init(&pcp->lock);
> for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
> INIT_LIST_HEAD(&pcp->lists[pindex]);
>
> @@ -5854,12 +5854,12 @@ static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
> * This can reduce zone lock contention without hurting
> * cache-hot pages sharing.
> */
> - spin_lock(&pcp->lock);
> + raw_spin_lock(&pcp->lock);
> if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch)
> pcp->flags |= PCPF_FREE_HIGH_BATCH;
> else
> pcp->flags &= ~PCPF_FREE_HIGH_BATCH;
> - spin_unlock(&pcp->lock);
> + raw_spin_unlock(&pcp->lock);
> }
>
> void setup_pcp_cacheinfo(unsigned int cpu)
> (END)
> @@ -2393,9 +2393,9 @@ int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
>
> to_drain = pcp->count - pcp->high;
> if (to_drain > 0) {
> - spin_lock(&pcp->lock);
> + raw_spin_lock(&pcp->lock);
> free_pcppages_bulk(zone, to_drain, pcp, 0);
> - spin_unlock(&pcp->lock);
> + raw_spin_unlock(&pcp->lock);
> todo++;
> }
>
> @@ -2415,9 +2415,9 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
> batch = READ_ONCE(pcp->batch);
> :
> @@ -2393,9 +2393,9 @@ int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
>
> to_drain = pcp->count - pcp->high;
> if (to_drain > 0) {
> - spin_lock(&pcp->lock);
> + raw_spin_lock(&pcp->lock);
> free_pcppages_bulk(zone, to_drain, pcp, 0);
> - spin_unlock(&pcp->lock);
> + raw_spin_unlock(&pcp->lock);
> todo++;
> }
>
> @@ -2415,9 +2415,9 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
> batch = READ_ONCE(pcp->batch);
> to_drain = min(pcp->count, batch);
> if (to_drain > 0) {
> - spin_lock(&pcp->lock);
> + raw_spin_lock(&pcp->lock);
> free_pcppages_bulk(zone, to_drain, pcp, 0);
> - spin_unlock(&pcp->lock);
> + raw_spin_unlock(&pcp->lock);
> }
> }
> #endif
> @@ -2431,7 +2431,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
> int count;
>
> do {
> - spin_lock(&pcp->lock);
> + raw_spin_lock(&pcp->lock);
> count = pcp->count;
> if (count) {
> int to_drain = min(count,
> @@ -2440,7 +2440,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
> free_pcppages_bulk(zone, to_drain, pcp, 0);
> count -= to_drain;
> }
> - spin_unlock(&pcp->lock);
> + raw_spin_unlock(&pcp->lock);
> } while (count);
> }
>
> @@ -5744,7 +5744,7 @@ static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonesta
> memset(pcp, 0, sizeof(*pcp));
> memset(pzstats, 0, sizeof(*pzstats));
>
> - spin_lock_init(&pcp->lock);
> + raw_spin_lock_init(&pcp->lock);
> for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
> INIT_LIST_HEAD(&pcp->lists[pindex]);
>
> @@ -5854,12 +5854,12 @@ static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
> * This can reduce zone lock contention without hurting
> * cache-hot pages sharing.
> */
> - spin_lock(&pcp->lock);
> + raw_spin_lock(&pcp->lock);
> if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch)
> pcp->flags |= PCPF_FREE_HIGH_BATCH;
> else
> pcp->flags &= ~PCPF_FREE_HIGH_BATCH;
> - spin_unlock(&pcp->lock);
> + raw_spin_unlock(&pcp->lock);
> }
>
> void setup_pcp_cacheinfo(unsigned int cpu)
Sorry for my bad, I read code wrong...
This problem is gone since:
commit d40797d6720e8 ("kasan: make kasan_record_aux_stack_noalloc() the default behaviour")
Thanks
--
Sincerely,
Yeoreum Yun
prev parent reply other threads:[~2025-06-05 5:14 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-04 4:47 Luka
2025-06-04 8:25 ` Yeoreum Yun
2025-06-05 5:13 ` Yeoreum Yun [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=aEEnbJKLdwOZm3UC@e129823.arm.com \
--to=yeoreum.yun@arm.com \
--cc=akpm@linux-foundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luka.2016.cs@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox