linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Dan Carpenter <dan.carpenter@linaro.org>
To: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: linux-mm@kvack.org
Subject: [bug report] mm/mempolicy: skip extra call to __alloc_pages_bulk in weighted interleave
Date: Tue, 1 Jul 2025 13:07:53 -0500	[thread overview]
Message-ID: <22b9509b-3d92-4ecd-91b4-7a08c1da956c@sabinyo.mountain> (raw)

Hello Joshua Hahn,

Commit 72cf498b9e8a ("mm/mempolicy: skip extra call to
__alloc_pages_bulk in weighted interleave") from Jun 26, 2025
(linux-next), leads to the following Smatch static checker warning:

	mm/mempolicy.c:2641 alloc_pages_bulk_weighted_interleave()
	error: uninitialized symbol 'i'.

mm/mempolicy.c
    2559 static unsigned long alloc_pages_bulk_weighted_interleave(gfp_t gfp,
    2560                 struct mempolicy *pol, unsigned long nr_pages,
    2561                 struct page **page_array)
    2562 {
    2563         struct weighted_interleave_state *state;
    2564         struct task_struct *me = current;
    2565         unsigned int cpuset_mems_cookie;
    2566         unsigned long total_allocated = 0;
    2567         unsigned long nr_allocated = 0;
    2568         unsigned long rounds;
    2569         unsigned long node_pages, delta;
    2570         u8 *weights, weight;
    2571         unsigned int weight_total = 0;
    2572         unsigned long rem_pages = nr_pages, carryover = 0;
    2573         nodemask_t nodes;
    2574         int nnodes, node;
    2575         int resume_node = MAX_NUMNODES - 1;
    2576         u8 resume_weight = 0;
    2577         int prev_node;
    2578         int i;
    2579 
    2580         if (!nr_pages)
    2581                 return 0;
    2582 
    2583         /* read the nodes onto the stack, retry if done during rebind */
    2584         do {
    2585                 cpuset_mems_cookie = read_mems_allowed_begin();
    2586                 nnodes = read_once_policy_nodemask(pol, &nodes);
    2587         } while (read_mems_allowed_retry(cpuset_mems_cookie));
    2588 
    2589         /* if the nodemask has become invalid, we cannot do anything */
    2590         if (!nnodes)
    2591                 return 0;
    2592 
    2593         /* Continue allocating from most recent node and adjust the nr_pages */
    2594         node = me->il_prev;
    2595         weight = me->il_weight;
    2596         if (weight && node_isset(node, nodes)) {
    2597                 if (rem_pages <= weight) {
    2598                         node_pages = rem_pages;
    2599                         me->il_weight -= node_pages;
    2600                         goto allocate;

i is not initialized here.

    2601                 }
    2602                 carryover = weight;
    2603         }
    2604         /* clear active weight in case of an allocation failure */
    2605         me->il_weight = 0;
    2606         prev_node = node;
    2607 
    2608         /* create a local copy of node weights to operate on outside rcu */
    2609         weights = kzalloc(nr_node_ids, GFP_KERNEL);
    2610         if (!weights)
    2611                 return 0;
    2612 
    2613         rcu_read_lock();
    2614         state = rcu_dereference(wi_state);
    2615         if (state) {
    2616                 memcpy(weights, state->iw_table, nr_node_ids * sizeof(u8));
    2617                 rcu_read_unlock();
    2618         } else {
    2619                 rcu_read_unlock();
    2620                 for (i = 0; i < nr_node_ids; i++)
    2621                         weights[i] = 1;
    2622         }
    2623 
    2624         /* calculate total, detect system default usage */
    2625         for_each_node_mask(node, nodes)
    2626                 weight_total += weights[node];
    2627 
    2628         /*
    2629          * Calculate rounds/partial rounds to minimize __alloc_pages_bulk calls.
    2630          * Track which node weighted interleave should resume from.
    2631          * Account for carryover. It is always allocated from the first node.
    2632          *
    2633          * if (rounds > 0) and (delta == 0), resume_node will always be
    2634          * the node following prev_node and its weight.
    2635          */
    2636         rounds = (rem_pages - carryover) / weight_total;
    2637         delta = (rem_pages - carryover) % weight_total;
    2638         resume_node = next_node_in(prev_node, nodes);
    2639         resume_weight = weights[resume_node];
    2640         node = carryover ? prev_node : next_node_in(prev_node, nodes);
--> 2641         for (i = 0; i < nnodes; i++) {
                             ^^^^^^^^^^^^^^^
Uninitialized variable.  In production people should use the config to
zero out stack variables but I always encourage developers to use
CONFIG_INIT_STACK_ALL_PATTERN=y in their testing.

    2642                 weight = weights[node];
    2643                 /* when delta is depleted, resume from that node */
    2644                 if (delta && delta < weight) {
    2645                         resume_node = node;
    2646                         resume_weight = weight - delta;
    2647                 }
    2648                 /* Add the node's portion of the delta, if there is one */
    2649                 node_pages = weight * rounds + min(delta, weight) + carryover;
    2650                 delta -= min(delta, weight);
    2651                 carryover = 0;
    2652 
    2653                 /* node_pages can be 0 if an allocation fails and rounds == 0 */
    2654                 if (!node_pages)
    2655                         break;
    2656 allocate:
    2657                 nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
    2658                                                   page_array);
    2659                 page_array += nr_allocated;
    2660                 total_allocated += nr_allocated;
    2661                 if (total_allocated == nr_pages)
    2662                         break;
    2663                 prev_node = node;
    2664                 node = next_node_in(prev_node, nodes);
    2665         }
    2666 
    2667         if (weights) {
    2668                 me->il_prev = resume_node;
    2669                 me->il_weight = resume_weight;
    2670                 kfree(weights);
    2671         }
    2672         return total_allocated;
    2673 }

regards,
dan carpenter


             reply	other threads:[~2025-07-01 18:08 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-07-01 18:07 Dan Carpenter [this message]
2025-07-01 18:52 ` Joshua Hahn

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=22b9509b-3d92-4ecd-91b4-7a08c1da956c@sabinyo.mountain \
    --to=dan.carpenter@linaro.org \
    --cc=joshua.hahnjy@gmail.com \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox