tree: https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master head: fd8dab197cca2746e1fcd399a218eec5164726d4 commit: 280bbecded35eea30bbb5537f4f0a8a7e0e1b784 [3853/4127] zsmalloc: turn zspage order into runtime variable config: parisc-randconfig-r023-20221029 compiler: hppa64-linux-gcc (GCC) 12.1.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/commit/?id=280bbecded35eea30bbb5537f4f0a8a7e0e1b784 git remote add linux-next https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git git fetch --no-tags linux-next master git checkout 280bbecded35eea30bbb5537f4f0a8a7e0e1b784 # save the config file mkdir build_dir && cp config build_dir/.config COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=parisc64 SHELL=/bin/bash If you fix the issue, kindly add following tag where applicable | Reported-by: kernel test robot All warnings (new ones prefixed by >>): mm/zsmalloc.c: In function 'zs_create_pool': >> mm/zsmalloc.c:2220:69: warning: right shift count >= width of type [-Wshift-count-overflow] 2220 | pool->min_alloc_size = (max_pages_per_zspage << PAGE_SHIFT) >> | ^~ vim +2220 mm/zsmalloc.c 2196 2197 /** 2198 * zs_create_pool - Creates an allocation pool to work from. 2199 * @name: pool name to be created 2200 * 2201 * This function must be called before anything when using 2202 * the zsmalloc allocator. 2203 * 2204 * On success, a pointer to the newly created pool is returned, 2205 * otherwise NULL. 2206 */ 2207 struct zs_pool *zs_create_pool(const char *name) 2208 { 2209 int i; 2210 struct zs_pool *pool; 2211 struct size_class *prev_class = NULL; 2212 u32 max_pages_per_zspage; 2213 2214 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 2215 if (!pool) 2216 return NULL; 2217 2218 max_pages_per_zspage = 1U << ZS_DEFAULT_PAGE_ORDER; 2219 /* min_alloc_size must be multiple of ZS_ALIGN */ > 2220 pool->min_alloc_size = (max_pages_per_zspage << PAGE_SHIFT) >> 2221 OBJ_INDEX_BITS; 2222 pool->min_alloc_size = max(pool->min_alloc_size, ZS_MIN_ALLOC_SIZE); 2223 2224 pool->num_size_classes = 2225 DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - pool->min_alloc_size, 2226 ZS_SIZE_CLASS_DELTA) + 1; 2227 2228 pool->size_class = kmalloc_array(pool->num_size_classes, 2229 sizeof(struct size_class *), 2230 GFP_KERNEL | __GFP_ZERO); 2231 if (!pool->size_class) 2232 goto err; 2233 2234 init_deferred_free(pool); 2235 rwlock_init(&pool->migrate_lock); 2236 2237 pool->name = kstrdup(name, GFP_KERNEL); 2238 if (!pool->name) 2239 goto err; 2240 2241 if (create_cache(pool)) 2242 goto err; 2243 2244 /* 2245 * Iterate reversely, because, size of size_class that we want to use 2246 * for merging should be larger or equal to current size. 2247 */ 2248 for (i = pool->num_size_classes - 1; i >= 0; i--) { 2249 int size; 2250 int pages_per_zspage; 2251 int objs_per_zspage; 2252 struct size_class *class; 2253 int fullness = 0; 2254 2255 size = pool->min_alloc_size + i * ZS_SIZE_CLASS_DELTA; 2256 if (size > ZS_MAX_ALLOC_SIZE) 2257 size = ZS_MAX_ALLOC_SIZE; 2258 pages_per_zspage = get_pages_per_zspage(size, 2259 max_pages_per_zspage); 2260 objs_per_zspage = pages_per_zspage * PAGE_SIZE / size; 2261 2262 /* 2263 * We iterate from biggest down to smallest classes, 2264 * so huge_class_size holds the size of the first huge 2265 * class. Any object bigger than or equal to that will 2266 * endup in the huge class. 2267 */ 2268 if (pages_per_zspage != 1 && objs_per_zspage != 1 && 2269 !huge_class_size) { 2270 huge_class_size = size; 2271 /* 2272 * The object uses ZS_HANDLE_SIZE bytes to store the 2273 * handle. We need to subtract it, because zs_malloc() 2274 * unconditionally adds handle size before it performs 2275 * size class search - so object may be smaller than 2276 * huge class size, yet it still can end up in the huge 2277 * class because it grows by ZS_HANDLE_SIZE extra bytes 2278 * right before class lookup. 2279 */ 2280 huge_class_size -= (ZS_HANDLE_SIZE - 1); 2281 } 2282 2283 /* 2284 * size_class is used for normal zsmalloc operation such 2285 * as alloc/free for that size. Although it is natural that we 2286 * have one size_class for each size, there is a chance that we 2287 * can get more memory utilization if we use one size_class for 2288 * many different sizes whose size_class have same 2289 * characteristics. So, we makes size_class point to 2290 * previous size_class if possible. 2291 */ 2292 if (prev_class) { 2293 if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) { 2294 pool->size_class[i] = prev_class; 2295 continue; 2296 } 2297 } 2298 2299 class = kzalloc(sizeof(struct size_class), GFP_KERNEL); 2300 if (!class) 2301 goto err; 2302 2303 class->size = size; 2304 class->index = i; 2305 class->pages_per_zspage = pages_per_zspage; 2306 class->objs_per_zspage = objs_per_zspage; 2307 spin_lock_init(&class->lock); 2308 pool->size_class[i] = class; 2309 for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS; 2310 fullness++) 2311 INIT_LIST_HEAD(&class->fullness_list[fullness]); 2312 2313 prev_class = class; 2314 } 2315 2316 /* debug only, don't abort if it fails */ 2317 zs_pool_stat_create(pool, name); 2318 2319 /* 2320 * Not critical since shrinker is only used to trigger internal 2321 * defragmentation of the pool which is pretty optional thing. If 2322 * registration fails we still can use the pool normally and user can 2323 * trigger compaction manually. Thus, ignore return code. 2324 */ 2325 zs_register_shrinker(pool); 2326 2327 return pool; 2328 2329 err: 2330 zs_destroy_pool(pool); 2331 return NULL; 2332 } 2333 EXPORT_SYMBOL_GPL(zs_create_pool); 2334 -- 0-DAY CI Kernel Test Service https://01.org/lkp