>
> On Tue, 11 May 2021 at 17:07, <
glittao@gmail.com> wrote:
> > From: Oliver Glitta <
glittao@gmail.com>
> >
> > SLUB has resiliency_test() function which is hidden behind #ifdef
> > SLUB_RESILIENCY_TEST that is not part of Kconfig, so nobody
> > runs it. KUnit should be a proper replacement for it.
> >
> > Try changing byte in redzone after allocation and changing
> > pointer to next free node, first byte, 50th byte and redzone
> > byte. Check if validation finds errors.
> >
> > There are several differences from the original resiliency test:
> > Tests create own caches with known state instead of corrupting
> > shared kmalloc caches.
> >
> > The corruption of freepointer uses correct offset, the original
> > resiliency test got broken with freepointer changes.
> >
> > Scratch changing random byte test, because it does not have
> > meaning in this form where we need deterministic results.
> >
> > Add new option CONFIG_SLUB_KUNIT_TEST in Kconfig.
> > Tests next_pointer, first_word and clobber_50th_byte do not run
> > with KASAN option on. Because the test deliberately modifies non-allocated
> > objects.
> >
> > Use kunit_resource to count errors in cache and silence bug reports.
> > Count error whenever slab_bug() or slab_fix() is called or when
> > the count of pages is wrong.
> >
> > Signed-off-by: Oliver Glitta <
glittao@gmail.com>
>
> I think I had already reviewed v4, and the changes here are fine:
>
> Reviewed-by: Marco Elver <
elver@google.com>
Thank you again.
Yes this works. Thank you for that. I try to remove function but I didn't think about not setting it, so it didn't work. I will fix it.
> > +static struct kunit_case test_cases[] = {
> > + KUNIT_CASE(test_clobber_zone),
> > +
> > +#ifndef CONFIG_KASAN
> > + KUNIT_CASE(test_next_pointer),
> > + KUNIT_CASE(test_first_word),
> > + KUNIT_CASE(test_clobber_50th_byte),
> > +#endif
> > +
> > + KUNIT_CASE(test_clobber_redzone_free),
> > + {}
>
> This is better, and tells us which tests exactly were the ones causing
> problems with KASAN.
>
>
> > +};
> > +
> > +static struct kunit_suite test_suite = {
> > + .name = "slub_test",
> > + .init = test_init,
> > + .exit = test_exit,
> > + .test_cases = test_cases,
> > +};
> > +kunit_test_suite(test_suite);
> > +
> > +MODULE_LICENSE("GPL");
> > diff --git a/mm/slab.h b/mm/slab.h
> > index 18c1927cd196..9b690fa44cae 100644
> > --- a/mm/slab.h
> > +++ b/mm/slab.h
> > @@ -215,6 +215,7 @@ DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
> > DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
> > #endif
> > extern void print_tracking(struct kmem_cache *s, void *object);
> > +long validate_slab_cache(struct kmem_cache *s);
> > #else
> > static inline void print_tracking(struct kmem_cache *s, void *object)
> > {
> > diff --git a/mm/slub.c b/mm/slub.c
> > index feda53ae62ba..985fd6ef033c 100644
> > --- a/mm/slub.c
> > +++ b/mm/slub.c
> > @@ -35,6 +35,7 @@
> > #include <linux/prefetch.h>
> > #include <linux/memcontrol.h>
> > #include <linux/random.h>
> > +#include <kunit/test.h>
> >
> > #include <trace/events/kmem.h>
> >
> > @@ -447,6 +448,26 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
> > static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
> > static DEFINE_SPINLOCK(object_map_lock);
> >
> > +#if IS_ENABLED(CONFIG_KUNIT)
> > +static bool slab_add_kunit_errors(void)
> > +{
> > + struct kunit_resource *resource;
> > +
> > + if (likely(!current->kunit_test))
> > + return false;
> > +
> > + resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
> > + if (!resource)
> > + return false;
> > +
> > + (*(int *)resource->data)++;
> > + kunit_put_resource(resource);
> > + return true;
> > +}
> > +#else
> > +static inline bool slab_add_kunit_errors(void) { return false; }
> > +#endif
> > +
> > /*
> > * Determine a map of object in use on a page.
> > *
> > @@ -677,6 +698,9 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
> > struct va_format vaf;
> > va_list args;
> >
> > + if (slab_add_kunit_errors())
> > + return;
> > +
> > va_start(args, fmt);
> > vaf.fmt = fmt;
> >
vaf.va = &args;
> > @@ -740,6 +764,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
> > void object_err(struct kmem_cache *s, struct page *page,
> > u8 *object, char *reason)
> > {
> > + if (slab_add_kunit_errors())
> > + return;
> > +
> > slab_bug(s, "%s", reason);
> > print_trailer(s, page, object);
> > }
> > @@ -750,6 +777,9 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
> > va_list args;
> > char buf[100];
> >
> > + if (slab_add_kunit_errors())
> > + return;
> > +
> > va_start(args, fmt);
> > vsnprintf(buf, sizeof(buf), fmt, args);
> > va_end(args);
> > @@ -799,12 +829,16 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
> > while (end > fault && end[-1] == value)
> > end--;
> >
> > + if (slab_add_kunit_errors())
> > + goto skip_bug_print;
> > +
> > slab_bug(s, "%s overwritten", what);
> > pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
> > fault, end - 1, fault - addr,
> > fault[0], value);
> > print_trailer(s, page, object);
> >
> > +skip_bug_print:
> > restore_bytes(s, what, value, fault, end);
> > return 0;
> > }
> > @@ -4662,9 +4696,11 @@ static int validate_slab_node(struct kmem_cache *s,
> > validate_slab(s, page);
> > count++;
> > }
> > - if (count != n->nr_partial)
> > + if (count != n->nr_partial) {
> > pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
> > s->name, count, n->nr_partial);
> > + slab_add_kunit_errors();
> > + }
> >
> > if (!(s->flags & SLAB_STORE_USER))
> > goto out;
> > @@ -4673,16 +4709,18 @@ static int validate_slab_node(struct kmem_cache *s,
> > validate_slab(s, page);
> > count++;
> > }
> > - if (count != atomic_long_read(&n->nr_slabs))
> > + if (count != atomic_long_read(&n->nr_slabs)) {
> > pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
> > s->name, count, atomic_long_read(&n->nr_slabs));
> > + slab_add_kunit_errors();
> > + }
> >
> > out:
> > spin_unlock_irqrestore(&n->list_lock, flags);
> > return count;
> > }
> >
> > -static long validate_slab_cache(struct kmem_cache *s)
> > +long validate_slab_cache(struct kmem_cache *s)
> > {
> > int node;
> > unsigned long count = 0;
> > @@ -4694,6 +4732,8 @@ static long validate_slab_cache(struct kmem_cache *s)
> >
> > return count;
> > }
> > +EXPORT_SYMBOL(validate_slab_cache);
> > +
> > /*
> > * Generate lists of code addresses where slabcache objects are allocated
> > * and freed.
> > --
> > 2.31.1.272.g89b43f80a5
> >