void __init kmem_cache_sizes_init(void) { unsigned int found = 0; cache_slabp = kmem_cache_create("slab_cache", sizeof(kmem_slab_t), 0, SLAB_HWCACHE_ALIGN, NULL, NULL); if (cache_slabp) { char **names = cache_sizes_name; cache_sizes_t *sizes = cache_sizes; do { /* For performance, all the general caches are L1 aligned. * This should be particularly beneficial on SMP boxes, as it * eliminates "false sharing". * Note for systems short on memory removing the alignment will * allow tighter packing of the smaller caches. */ if (!(sizes->cs_cachep = kmem_cache_create(*names++, sizes->cs_size, 0, SLAB_HWCACHE_ALIGN, NULL, NULL))) goto panic_time; if (!found) { /* Inc off-slab bufctl limit until the ceiling is hit. */ if (SLAB_BUFCTL(sizes->cs_cachep->c_flags)) found++; else bufctl_limit = (sizes->cs_size/sizeof(kmem_bufctl_t)); } sizes->cs_cachep->c_flags |= SLAB_CFLGS_GENERAL; sizes++; } while (sizes->cs_size); #if SLAB_SELFTEST kmem_self_test(); #endif /* SLAB_SELFTEST */ return; } panic_time: panic("kmem_cache_sizes_init: Error creating caches"); /* NOTREACHED */ }