From: kernel test robot <lkp@intel.com>
To: Ananda <a.badmaev@clicknet.pro>, linux-mm@kvack.org
Cc: kbuild-all@lists.01.org, Ananda Badmaev <a.badmaev@clicknet.pro>
Subject: Re: [PATCH/RESEND] mm: add ztree - new allocator for use via zpool API
Date: Tue, 1 Mar 2022 01:35:17 +0800 [thread overview]
Message-ID: <202202282010.qo0U7ATX-lkp@intel.com> (raw)
In-Reply-To: <20220228110546.151513-1-a.badmaev@clicknet.pro>
Hi Ananda,
I love your patch! Perhaps something to improve:
[auto build test WARNING on hnaz-mm/master]
url: https://github.com/0day-ci/linux/commits/Ananda/mm-add-ztree-new-allocator-for-use-via-zpool-API/20220228-190619
base: https://github.com/hnaz/linux-mm master
config: sh-allmodconfig (https://download.01.org/0day-ci/archive/20220228/202202282010.qo0U7ATX-lkp@intel.com/config)
compiler: sh4-linux-gcc (GCC) 11.2.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/0day-ci/linux/commit/a30d4197bd92974faeafba46d6e74d3d8943c88c
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Ananda/mm-add-ztree-new-allocator-for-use-via-zpool-API/20220228-190619
git checkout a30d4197bd92974faeafba46d6e74d3d8943c88c
# save the config file to linux build tree
mkdir build_dir
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross O=build_dir ARCH=sh SHELL=/bin/bash M=mm/
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All warnings (new ones prefixed by >>):
>> mm/ztree.c:236:6: warning: no previous prototype for 'free_block_tree' [-Wmissing-prototypes]
236 | void free_block_tree(struct ztree_pool *pool, int block_type)
| ^~~~~~~~~~~~~~~
>> mm/ztree.c:282:20: warning: no previous prototype for 'ztree_create_pool' [-Wmissing-prototypes]
282 | struct ztree_pool *ztree_create_pool(gfp_t gfp, const struct ztree_ops *ops)
| ^~~~~~~~~~~~~~~~~
>> mm/ztree.c:327:6: warning: no previous prototype for 'ztree_destroy_pool' [-Wmissing-prototypes]
327 | void ztree_destroy_pool(struct ztree_pool *pool)
| ^~~~~~~~~~~~~~~~~~
>> mm/ztree.c:350:5: warning: no previous prototype for 'ztree_alloc' [-Wmissing-prototypes]
350 | int ztree_alloc(struct ztree_pool *pool, size_t size, gfp_t gfp,
| ^~~~~~~~~~~
>> mm/ztree.c:414:6: warning: no previous prototype for 'ztree_free' [-Wmissing-prototypes]
414 | void ztree_free(struct ztree_pool *pool, unsigned long handle)
| ^~~~~~~~~~
>> mm/ztree.c:474:5: warning: no previous prototype for 'ztree_reclaim_block' [-Wmissing-prototypes]
474 | int ztree_reclaim_block(struct ztree_pool *pool)
| ^~~~~~~~~~~~~~~~~~~
>> mm/ztree.c:554:7: warning: no previous prototype for 'ztree_map' [-Wmissing-prototypes]
554 | void *ztree_map(struct ztree_pool *pool, unsigned long handle)
| ^~~~~~~~~
>> mm/ztree.c:591:6: warning: no previous prototype for 'ztree_unmap' [-Wmissing-prototypes]
591 | void ztree_unmap(struct ztree_pool *pool, unsigned long handle)
| ^~~~~~~~~~~
>> mm/ztree.c:628:5: warning: no previous prototype for 'ztree_get_pool_size' [-Wmissing-prototypes]
628 | u64 ztree_get_pool_size(struct ztree_pool *pool)
| ^~~~~~~~~~~~~~~~~~~
Kconfig warnings: (for reference only)
WARNING: unmet direct dependencies detected for SND_SOC_LPASS_RX_MACRO
Depends on SOUND && !UML && SND && SND_SOC && COMMON_CLK
Selected by
- SND_SOC_SC7280 && SOUND && !UML && SND && SND_SOC && SND_SOC_QCOM && I2C && SOUNDWIRE
WARNING: unmet direct dependencies detected for SND_SOC_LPASS_TX_MACRO
Depends on SOUND && !UML && SND && SND_SOC && COMMON_CLK
Selected by
- SND_SOC_SC7280 && SOUND && !UML && SND && SND_SOC && SND_SOC_QCOM && I2C && SOUNDWIRE
vim +/free_block_tree +236 mm/ztree.c
231
232
233 /*
234 * free block tree with blocks of particular type
235 */
> 236 void free_block_tree(struct ztree_pool *pool, int block_type)
237 {
238 struct ztree_block *block, *tmp;
239 struct block_tree *tree;
240
241 tree = &(pool->block_trees)[block_type];
242 spin_lock(&tree->lock);
243 rbtree_postorder_for_each_entry_safe(block, tmp, &tree->root, block_node) {
244 free_pages((unsigned long)block->compressed_data,
245 tree_desc[block_type].order);
246 kmem_cache_free(pool->block_cache, block);
247 }
248 spin_unlock(&tree->lock);
249 }
250
251 /*
252 * Encodes the handle of a particular slot in the pool using metadata
253 */
254 static inline unsigned long metadata_to_handle(unsigned long block_type,
255 unsigned long block_index, unsigned long slot)
256 {
257 return (block_type << BLOCK_TYPE_SHIFT) + (block_index << SLOT_BITS) + slot;
258 }
259
260
261 /* Returns block type, block index and slot in the pool corresponding to handle */
262 static inline void handle_to_metadata(unsigned long handle, unsigned long *block_type,
263 unsigned long *block_index, unsigned long *slot)
264 {
265 *block_type = handle >> BLOCK_TYPE_SHIFT;
266 *block_index = (handle & BLOCK_INDEX_MASK) >> SLOT_BITS;
267 *slot = handle & SLOT_MASK;
268 }
269
270
271 /*****************
272 * API Functions
273 *****************/
274 /**
275 * ztree_create_pool() - create a new ztree pool array
276 * @gfp: gfp flags when allocating the ztree pool structure
277 * @ops: user-defined operations for the ztree pool
278 *
279 * Return: pointer to the new ztree pool or NULL if the metadata allocation
280 * failed.
281 */
> 282 struct ztree_pool *ztree_create_pool(gfp_t gfp, const struct ztree_ops *ops)
283 {
284 struct ztree_pool *pool;
285 struct block_tree *tree;
286 int i, block_types_nr;
287
288 pool = kmalloc(sizeof(struct ztree_pool), gfp);
289 if (!pool)
290 return NULL;
291
292 block_types_nr = ARRAY_SIZE(tree_desc);
293
294 pool->block_cache = kmem_cache_create("ztree_blocks",
295 sizeof(struct ztree_block), 0, 0, NULL);
296 if (!pool->block_cache) {
297 kfree(pool);
298 return NULL;
299 }
300
301 pool->block_trees = kmalloc(block_types_nr * sizeof(struct block_tree), gfp);
302 if (!pool->block_trees) {
303 kmem_cache_destroy(pool->block_cache);
304 kfree(pool);
305 return NULL;
306 }
307
308 /* init each basic block tree */
309 for (i = 0; i < block_types_nr; i++) {
310 tree = &(pool->block_trees)[i];
311 spin_lock_init(&tree->lock);
312 tree->root = RB_ROOT;
313 tree->last_block = NULL;
314 tree->current_block = NULL;
315 tree->counter = 0;
316 tree->block_count = 0;
317 }
318 pool->ops = ops;
319 return pool;
320 }
321
322 /**
323 * ztree_destroy_pool() - destroys an existing ztree pool
324 * @pool: the ztree pool to be destroyed
325 *
326 */
> 327 void ztree_destroy_pool(struct ztree_pool *pool)
328 {
329 int i;
330
331 for (i = 0; i < ARRAY_SIZE(tree_desc); i++)
332 free_block_tree(pool, i);
333 kmem_cache_destroy(pool->block_cache);
334 kfree(pool->block_trees);
335 kfree(pool);
336 }
337
338
339 /**
340 * ztree_alloc() - allocates a slot of appropriate size
341 * @pool: ztree pool from which to allocate
342 * @size: size in bytes of the desired allocation
343 * @gfp: gfp flags used if the pool needs to grow
344 * @handle: handle of the new allocation
345 *
346 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
347 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
348 * a new slot.
349 */
> 350 int ztree_alloc(struct ztree_pool *pool, size_t size, gfp_t gfp,
351 unsigned long *handle)
352 {
353 unsigned long block_type, slot;
354 struct ztree_block *block;
355 struct block_tree *tree;
356
357 if (!size)
358 return -EINVAL;
359
360 if (size > PAGE_SIZE)
361 return -ENOSPC;
362
363 /* find basic block type with suitable slot size */
364 for (block_type = 0; block_type < ARRAY_SIZE(tree_desc); block_type++) {
365 if (size <= tree_desc[block_type].slot_size)
366 break;
367 }
368 tree = &(pool->block_trees[block_type]);
369 spin_lock(&tree->lock);
370 /* check if there are free slots in the current and the last added blocks */
371 if (tree->current_block && tree->current_block->free_slots > 0) {
372 block = tree->current_block;
373 goto found;
374 }
375 if (tree->last_block && tree->last_block->free_slots > 0) {
376 block = tree->last_block;
377 goto found;
378 }
379 spin_unlock(&tree->lock);
380
381 /* not found block with free slots try to allocate new empty block */
382 block = alloc_block(pool, block_type, gfp);
383 spin_lock(&tree->lock);
384 if (block) {
385 tree->current_block = block;
386 goto found;
387 }
388 spin_unlock(&tree->lock);
389 return -ENOMEM;
390
391 found:
392 spin_lock(&block->lock);
393 block->free_slots--;
394 spin_unlock(&tree->lock);
395 /* find the first free slot in block */
396 for (slot = 0; slot < tree_desc[block_type].slots_per_block; slot++) {
397 if (block->slot_info[slot] == SLOT_FREE)
398 break;
399 }
400 block->slot_info[slot] = SLOT_OCCUPIED;
401 block->coeff = block->free_slots *
402 (tree_desc[block_type].slots_per_block - block->free_slots);
403 spin_unlock(&block->lock);
404 *handle = metadata_to_handle(block_type, block->block_index, slot);
405 return 0;
406 }
407
408 /**
409 * ztree_free() - frees the allocation associated with the given handle
410 * @pool: pool in which the allocation resided
411 * @handle: handle associated with the allocation returned by ztree_alloc()
412 *
413 */
> 414 void ztree_free(struct ztree_pool *pool, unsigned long handle)
415 {
416 unsigned long slot, block_type, block_index;
417 struct rb_node *tmp;
418 struct ztree_block *block;
419 struct block_tree *tree;
420
421 handle_to_metadata(handle, &block_type, &block_index, &slot);
422 tree = &(pool->block_trees[block_type]);
423
424 /* find block corresponding to handle */
425 spin_lock(&tree->lock);
426 tmp = rb_find(&block_index, &tree->root, index_comp);
427 if (!tmp) {
428 spin_unlock(&tree->lock);
429 pr_err("ztree block not found\n");
430 return;
431 }
432 block = rb_entry(tmp, struct ztree_block, block_node);
433 if (block->under_reclaim) {
434 spin_unlock(&tree->lock);
435 return;
436 }
437 block->free_slots++;
438 /* if all slots in block are empty delete whole block */
439 if (block->free_slots == tree_desc[block_type].slots_per_block) {
440 rb_erase(&block->block_node, &tree->root);
441 tree->block_count--;
442
443 /* if the last block to be deleted */
444 if (block == tree->last_block) {
445 tree->current_block = NULL;
446 tree->last_block = NULL;
447 /* otherwise set current block to last block */
448 } else {
449 tree->current_block = tree->last_block;
450 }
451 spin_unlock(&tree->lock);
452 free_pages((unsigned long)block->compressed_data, tree_desc[block_type].order);
453 kmem_cache_free(pool->block_cache, block);
454 return;
455 }
456 /* switch current block */
457 if (!tree->current_block || block->coeff >= tree->current_block->coeff)
458 tree->current_block = block;
459 spin_lock(&block->lock);
460 spin_unlock(&tree->lock);
461 block->slot_info[slot] = SLOT_FREE;
462 block->coeff = block->free_slots *
463 (tree_desc[block_type].slots_per_block - block->free_slots);
464 spin_unlock(&block->lock);
465 }
466
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
prev parent reply other threads:[~2022-02-28 17:36 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-28 11:05 Ananda
2022-02-28 17:35 ` kernel test robot [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=202202282010.qo0U7ATX-lkp@intel.com \
--to=lkp@intel.com \
--cc=a.badmaev@clicknet.pro \
--cc=kbuild-all@lists.01.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox