15015a300SAlexander Potapenko // SPDX-License-Identifier: GPL-2.0
25015a300SAlexander Potapenko /*
35015a300SAlexander Potapenko * Test cases for SL[AOU]B/page initialization at alloc/free time.
45015a300SAlexander Potapenko */
55015a300SAlexander Potapenko #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65015a300SAlexander Potapenko
75015a300SAlexander Potapenko #include <linux/init.h>
85015a300SAlexander Potapenko #include <linux/kernel.h>
95015a300SAlexander Potapenko #include <linux/mm.h>
105015a300SAlexander Potapenko #include <linux/module.h>
115015a300SAlexander Potapenko #include <linux/slab.h>
125015a300SAlexander Potapenko #include <linux/string.h>
135015a300SAlexander Potapenko #include <linux/vmalloc.h>
145015a300SAlexander Potapenko
155015a300SAlexander Potapenko #define GARBAGE_INT (0x09A7BA9E)
165015a300SAlexander Potapenko #define GARBAGE_BYTE (0x9E)
175015a300SAlexander Potapenko
185015a300SAlexander Potapenko #define REPORT_FAILURES_IN_FN() \
195015a300SAlexander Potapenko do { \
205015a300SAlexander Potapenko if (failures) \
215015a300SAlexander Potapenko pr_info("%s failed %d out of %d times\n", \
225015a300SAlexander Potapenko __func__, failures, num_tests); \
235015a300SAlexander Potapenko else \
245015a300SAlexander Potapenko pr_info("all %d tests in %s passed\n", \
255015a300SAlexander Potapenko num_tests, __func__); \
265015a300SAlexander Potapenko } while (0)
275015a300SAlexander Potapenko
285015a300SAlexander Potapenko /* Calculate the number of uninitialized bytes in the buffer. */
count_nonzero_bytes(void * ptr,size_t size)295015a300SAlexander Potapenko static int __init count_nonzero_bytes(void *ptr, size_t size)
305015a300SAlexander Potapenko {
315015a300SAlexander Potapenko int i, ret = 0;
325015a300SAlexander Potapenko unsigned char *p = (unsigned char *)ptr;
335015a300SAlexander Potapenko
345015a300SAlexander Potapenko for (i = 0; i < size; i++)
355015a300SAlexander Potapenko if (p[i])
365015a300SAlexander Potapenko ret++;
375015a300SAlexander Potapenko return ret;
385015a300SAlexander Potapenko }
395015a300SAlexander Potapenko
405015a300SAlexander Potapenko /* Fill a buffer with garbage, skipping |skip| first bytes. */
fill_with_garbage_skip(void * ptr,int size,size_t skip)414ab7ace4SAlexander Potapenko static void __init fill_with_garbage_skip(void *ptr, int size, size_t skip)
425015a300SAlexander Potapenko {
434ab7ace4SAlexander Potapenko unsigned int *p = (unsigned int *)((char *)ptr + skip);
445015a300SAlexander Potapenko int i = 0;
455015a300SAlexander Potapenko
465015a300SAlexander Potapenko WARN_ON(skip > size);
474ab7ace4SAlexander Potapenko size -= skip;
484ab7ace4SAlexander Potapenko
495015a300SAlexander Potapenko while (size >= sizeof(*p)) {
505015a300SAlexander Potapenko p[i] = GARBAGE_INT;
515015a300SAlexander Potapenko i++;
525015a300SAlexander Potapenko size -= sizeof(*p);
535015a300SAlexander Potapenko }
545015a300SAlexander Potapenko if (size)
555015a300SAlexander Potapenko memset(&p[i], GARBAGE_BYTE, size);
565015a300SAlexander Potapenko }
575015a300SAlexander Potapenko
fill_with_garbage(void * ptr,size_t size)585015a300SAlexander Potapenko static void __init fill_with_garbage(void *ptr, size_t size)
595015a300SAlexander Potapenko {
605015a300SAlexander Potapenko fill_with_garbage_skip(ptr, size, 0);
615015a300SAlexander Potapenko }
625015a300SAlexander Potapenko
do_alloc_pages_order(int order,int * total_failures)635015a300SAlexander Potapenko static int __init do_alloc_pages_order(int order, int *total_failures)
645015a300SAlexander Potapenko {
655015a300SAlexander Potapenko struct page *page;
665015a300SAlexander Potapenko void *buf;
675015a300SAlexander Potapenko size_t size = PAGE_SIZE << order;
685015a300SAlexander Potapenko
695015a300SAlexander Potapenko page = alloc_pages(GFP_KERNEL, order);
70ea091fa5SXiaoke Wang if (!page)
71ea091fa5SXiaoke Wang goto err;
725015a300SAlexander Potapenko buf = page_address(page);
735015a300SAlexander Potapenko fill_with_garbage(buf, size);
745015a300SAlexander Potapenko __free_pages(page, order);
755015a300SAlexander Potapenko
765015a300SAlexander Potapenko page = alloc_pages(GFP_KERNEL, order);
77ea091fa5SXiaoke Wang if (!page)
78ea091fa5SXiaoke Wang goto err;
795015a300SAlexander Potapenko buf = page_address(page);
805015a300SAlexander Potapenko if (count_nonzero_bytes(buf, size))
815015a300SAlexander Potapenko (*total_failures)++;
825015a300SAlexander Potapenko fill_with_garbage(buf, size);
835015a300SAlexander Potapenko __free_pages(page, order);
845015a300SAlexander Potapenko return 1;
85ea091fa5SXiaoke Wang err:
86ea091fa5SXiaoke Wang (*total_failures)++;
87ea091fa5SXiaoke Wang return 1;
885015a300SAlexander Potapenko }
895015a300SAlexander Potapenko
905015a300SAlexander Potapenko /* Test the page allocator by calling alloc_pages with different orders. */
test_pages(int * total_failures)915015a300SAlexander Potapenko static int __init test_pages(int *total_failures)
925015a300SAlexander Potapenko {
935015a300SAlexander Potapenko int failures = 0, num_tests = 0;
945015a300SAlexander Potapenko int i;
955015a300SAlexander Potapenko
96*ded1ffeaSKirill A. Shutemov for (i = 0; i < NR_PAGE_ORDERS; i++)
975015a300SAlexander Potapenko num_tests += do_alloc_pages_order(i, &failures);
985015a300SAlexander Potapenko
995015a300SAlexander Potapenko REPORT_FAILURES_IN_FN();
1005015a300SAlexander Potapenko *total_failures += failures;
1015015a300SAlexander Potapenko return num_tests;
1025015a300SAlexander Potapenko }
1035015a300SAlexander Potapenko
1045015a300SAlexander Potapenko /* Test kmalloc() with given parameters. */
do_kmalloc_size(size_t size,int * total_failures)1055015a300SAlexander Potapenko static int __init do_kmalloc_size(size_t size, int *total_failures)
1065015a300SAlexander Potapenko {
1075015a300SAlexander Potapenko void *buf;
1085015a300SAlexander Potapenko
1095015a300SAlexander Potapenko buf = kmalloc(size, GFP_KERNEL);
110ea091fa5SXiaoke Wang if (!buf)
111ea091fa5SXiaoke Wang goto err;
1125015a300SAlexander Potapenko fill_with_garbage(buf, size);
1135015a300SAlexander Potapenko kfree(buf);
1145015a300SAlexander Potapenko
1155015a300SAlexander Potapenko buf = kmalloc(size, GFP_KERNEL);
116ea091fa5SXiaoke Wang if (!buf)
117ea091fa5SXiaoke Wang goto err;
1185015a300SAlexander Potapenko if (count_nonzero_bytes(buf, size))
1195015a300SAlexander Potapenko (*total_failures)++;
1205015a300SAlexander Potapenko fill_with_garbage(buf, size);
1215015a300SAlexander Potapenko kfree(buf);
1225015a300SAlexander Potapenko return 1;
123ea091fa5SXiaoke Wang err:
124ea091fa5SXiaoke Wang (*total_failures)++;
125ea091fa5SXiaoke Wang return 1;
1265015a300SAlexander Potapenko }
1275015a300SAlexander Potapenko
1285015a300SAlexander Potapenko /* Test vmalloc() with given parameters. */
do_vmalloc_size(size_t size,int * total_failures)1295015a300SAlexander Potapenko static int __init do_vmalloc_size(size_t size, int *total_failures)
1305015a300SAlexander Potapenko {
1315015a300SAlexander Potapenko void *buf;
1325015a300SAlexander Potapenko
1335015a300SAlexander Potapenko buf = vmalloc(size);
134ea091fa5SXiaoke Wang if (!buf)
135ea091fa5SXiaoke Wang goto err;
1365015a300SAlexander Potapenko fill_with_garbage(buf, size);
1375015a300SAlexander Potapenko vfree(buf);
1385015a300SAlexander Potapenko
1395015a300SAlexander Potapenko buf = vmalloc(size);
140ea091fa5SXiaoke Wang if (!buf)
141ea091fa5SXiaoke Wang goto err;
1425015a300SAlexander Potapenko if (count_nonzero_bytes(buf, size))
1435015a300SAlexander Potapenko (*total_failures)++;
1445015a300SAlexander Potapenko fill_with_garbage(buf, size);
1455015a300SAlexander Potapenko vfree(buf);
1465015a300SAlexander Potapenko return 1;
147ea091fa5SXiaoke Wang err:
148ea091fa5SXiaoke Wang (*total_failures)++;
149ea091fa5SXiaoke Wang return 1;
1505015a300SAlexander Potapenko }
1515015a300SAlexander Potapenko
1525015a300SAlexander Potapenko /* Test kmalloc()/vmalloc() by allocating objects of different sizes. */
test_kvmalloc(int * total_failures)1535015a300SAlexander Potapenko static int __init test_kvmalloc(int *total_failures)
1545015a300SAlexander Potapenko {
1555015a300SAlexander Potapenko int failures = 0, num_tests = 0;
1565015a300SAlexander Potapenko int i, size;
1575015a300SAlexander Potapenko
1585015a300SAlexander Potapenko for (i = 0; i < 20; i++) {
1595015a300SAlexander Potapenko size = 1 << i;
1605015a300SAlexander Potapenko num_tests += do_kmalloc_size(size, &failures);
1615015a300SAlexander Potapenko num_tests += do_vmalloc_size(size, &failures);
1625015a300SAlexander Potapenko }
1635015a300SAlexander Potapenko
1645015a300SAlexander Potapenko REPORT_FAILURES_IN_FN();
1655015a300SAlexander Potapenko *total_failures += failures;
1665015a300SAlexander Potapenko return num_tests;
1675015a300SAlexander Potapenko }
1685015a300SAlexander Potapenko
1695015a300SAlexander Potapenko #define CTOR_BYTES (sizeof(unsigned int))
1705015a300SAlexander Potapenko #define CTOR_PATTERN (0x41414141)
1715015a300SAlexander Potapenko /* Initialize the first 4 bytes of the object. */
test_ctor(void * obj)1725015a300SAlexander Potapenko static void test_ctor(void *obj)
1735015a300SAlexander Potapenko {
1745015a300SAlexander Potapenko *(unsigned int *)obj = CTOR_PATTERN;
1755015a300SAlexander Potapenko }
1765015a300SAlexander Potapenko
1775015a300SAlexander Potapenko /*
1785015a300SAlexander Potapenko * Check the invariants for the buffer allocated from a slab cache.
1795015a300SAlexander Potapenko * If the cache has a test constructor, the first 4 bytes of the object must
1805015a300SAlexander Potapenko * always remain equal to CTOR_PATTERN.
1815015a300SAlexander Potapenko * If the cache isn't an RCU-typesafe one, or if the allocation is done with
1825015a300SAlexander Potapenko * __GFP_ZERO, then the object contents must be zeroed after allocation.
1835015a300SAlexander Potapenko * If the cache is an RCU-typesafe one, the object contents must never be
1845015a300SAlexander Potapenko * zeroed after the first use. This is checked by memcmp() in
1855015a300SAlexander Potapenko * do_kmem_cache_size().
1865015a300SAlexander Potapenko */
check_buf(void * buf,int size,bool want_ctor,bool want_rcu,bool want_zero)1875015a300SAlexander Potapenko static bool __init check_buf(void *buf, int size, bool want_ctor,
1885015a300SAlexander Potapenko bool want_rcu, bool want_zero)
1895015a300SAlexander Potapenko {
1905015a300SAlexander Potapenko int bytes;
1915015a300SAlexander Potapenko bool fail = false;
1925015a300SAlexander Potapenko
1935015a300SAlexander Potapenko bytes = count_nonzero_bytes(buf, size);
1945015a300SAlexander Potapenko WARN_ON(want_ctor && want_zero);
1955015a300SAlexander Potapenko if (want_zero)
1965015a300SAlexander Potapenko return bytes;
1975015a300SAlexander Potapenko if (want_ctor) {
1985015a300SAlexander Potapenko if (*(unsigned int *)buf != CTOR_PATTERN)
1995015a300SAlexander Potapenko fail = 1;
2005015a300SAlexander Potapenko } else {
2015015a300SAlexander Potapenko if (bytes)
2025015a300SAlexander Potapenko fail = !want_rcu;
2035015a300SAlexander Potapenko }
2045015a300SAlexander Potapenko return fail;
2055015a300SAlexander Potapenko }
2065015a300SAlexander Potapenko
207dc5c5ad7SLaura Abbott #define BULK_SIZE 100
208dc5c5ad7SLaura Abbott static void *bulk_array[BULK_SIZE];
209dc5c5ad7SLaura Abbott
2105015a300SAlexander Potapenko /*
2115015a300SAlexander Potapenko * Test kmem_cache with given parameters:
2125015a300SAlexander Potapenko * want_ctor - use a constructor;
2135015a300SAlexander Potapenko * want_rcu - use SLAB_TYPESAFE_BY_RCU;
2145015a300SAlexander Potapenko * want_zero - use __GFP_ZERO.
2155015a300SAlexander Potapenko */
do_kmem_cache_size(size_t size,bool want_ctor,bool want_rcu,bool want_zero,int * total_failures)2165015a300SAlexander Potapenko static int __init do_kmem_cache_size(size_t size, bool want_ctor,
2175015a300SAlexander Potapenko bool want_rcu, bool want_zero,
2185015a300SAlexander Potapenko int *total_failures)
2195015a300SAlexander Potapenko {
2205015a300SAlexander Potapenko struct kmem_cache *c;
2215015a300SAlexander Potapenko int iter;
2225015a300SAlexander Potapenko bool fail = false;
2235015a300SAlexander Potapenko gfp_t alloc_mask = GFP_KERNEL | (want_zero ? __GFP_ZERO : 0);
2245015a300SAlexander Potapenko void *buf, *buf_copy;
2255015a300SAlexander Potapenko
2265015a300SAlexander Potapenko c = kmem_cache_create("test_cache", size, 1,
2275015a300SAlexander Potapenko want_rcu ? SLAB_TYPESAFE_BY_RCU : 0,
2285015a300SAlexander Potapenko want_ctor ? test_ctor : NULL);
2295015a300SAlexander Potapenko for (iter = 0; iter < 10; iter++) {
230dc5c5ad7SLaura Abbott /* Do a test of bulk allocations */
231dc5c5ad7SLaura Abbott if (!want_rcu && !want_ctor) {
232dc5c5ad7SLaura Abbott int ret;
233dc5c5ad7SLaura Abbott
234dc5c5ad7SLaura Abbott ret = kmem_cache_alloc_bulk(c, alloc_mask, BULK_SIZE, bulk_array);
235dc5c5ad7SLaura Abbott if (!ret) {
236dc5c5ad7SLaura Abbott fail = true;
237dc5c5ad7SLaura Abbott } else {
238dc5c5ad7SLaura Abbott int i;
239dc5c5ad7SLaura Abbott for (i = 0; i < ret; i++)
240dc5c5ad7SLaura Abbott fail |= check_buf(bulk_array[i], size, want_ctor, want_rcu, want_zero);
241dc5c5ad7SLaura Abbott kmem_cache_free_bulk(c, ret, bulk_array);
242dc5c5ad7SLaura Abbott }
243dc5c5ad7SLaura Abbott }
244dc5c5ad7SLaura Abbott
2455015a300SAlexander Potapenko buf = kmem_cache_alloc(c, alloc_mask);
2465015a300SAlexander Potapenko /* Check that buf is zeroed, if it must be. */
247dc5c5ad7SLaura Abbott fail |= check_buf(buf, size, want_ctor, want_rcu, want_zero);
2485015a300SAlexander Potapenko fill_with_garbage_skip(buf, size, want_ctor ? CTOR_BYTES : 0);
249d3a81161SArnd Bergmann
250d3a81161SArnd Bergmann if (!want_rcu) {
251d3a81161SArnd Bergmann kmem_cache_free(c, buf);
252d3a81161SArnd Bergmann continue;
253d3a81161SArnd Bergmann }
254d3a81161SArnd Bergmann
2555015a300SAlexander Potapenko /*
2565015a300SAlexander Potapenko * If this is an RCU cache, use a critical section to ensure we
2575015a300SAlexander Potapenko * can touch objects after they're freed.
2585015a300SAlexander Potapenko */
2595015a300SAlexander Potapenko rcu_read_lock();
2605015a300SAlexander Potapenko /*
2615015a300SAlexander Potapenko * Copy the buffer to check that it's not wiped on
2625015a300SAlexander Potapenko * free().
2635015a300SAlexander Potapenko */
264733d1d1aSAlexander Potapenko buf_copy = kmalloc(size, GFP_ATOMIC);
2655015a300SAlexander Potapenko if (buf_copy)
2665015a300SAlexander Potapenko memcpy(buf_copy, buf, size);
267d3a81161SArnd Bergmann
2684ab7ace4SAlexander Potapenko kmem_cache_free(c, buf);
2695015a300SAlexander Potapenko /*
2705015a300SAlexander Potapenko * Check that |buf| is intact after kmem_cache_free().
2715015a300SAlexander Potapenko * |want_zero| is false, because we wrote garbage to
2725015a300SAlexander Potapenko * the buffer already.
2735015a300SAlexander Potapenko */
2745015a300SAlexander Potapenko fail |= check_buf(buf, size, want_ctor, want_rcu,
2755015a300SAlexander Potapenko false);
2765015a300SAlexander Potapenko if (buf_copy) {
2775015a300SAlexander Potapenko fail |= (bool)memcmp(buf, buf_copy, size);
2785015a300SAlexander Potapenko kfree(buf_copy);
2795015a300SAlexander Potapenko }
2805015a300SAlexander Potapenko rcu_read_unlock();
2815015a300SAlexander Potapenko }
2825015a300SAlexander Potapenko kmem_cache_destroy(c);
2835015a300SAlexander Potapenko
2845015a300SAlexander Potapenko *total_failures += fail;
2855015a300SAlexander Potapenko return 1;
2865015a300SAlexander Potapenko }
2875015a300SAlexander Potapenko
2885015a300SAlexander Potapenko /*
2895015a300SAlexander Potapenko * Check that the data written to an RCU-allocated object survives
2905015a300SAlexander Potapenko * reallocation.
2915015a300SAlexander Potapenko */
do_kmem_cache_rcu_persistent(int size,int * total_failures)2925015a300SAlexander Potapenko static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures)
2935015a300SAlexander Potapenko {
2945015a300SAlexander Potapenko struct kmem_cache *c;
2955015a300SAlexander Potapenko void *buf, *buf_contents, *saved_ptr;
2965015a300SAlexander Potapenko void **used_objects;
2975015a300SAlexander Potapenko int i, iter, maxiter = 1024;
2985015a300SAlexander Potapenko bool fail = false;
2995015a300SAlexander Potapenko
3005015a300SAlexander Potapenko c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU,
3015015a300SAlexander Potapenko NULL);
3025015a300SAlexander Potapenko buf = kmem_cache_alloc(c, GFP_KERNEL);
303d4557faeSXiaoke Wang if (!buf)
304d4557faeSXiaoke Wang goto out;
3055015a300SAlexander Potapenko saved_ptr = buf;
3065015a300SAlexander Potapenko fill_with_garbage(buf, size);
3075015a300SAlexander Potapenko buf_contents = kmalloc(size, GFP_KERNEL);
308d4557faeSXiaoke Wang if (!buf_contents) {
309d4557faeSXiaoke Wang kmem_cache_free(c, buf);
3105015a300SAlexander Potapenko goto out;
311d4557faeSXiaoke Wang }
3125015a300SAlexander Potapenko used_objects = kmalloc_array(maxiter, sizeof(void *), GFP_KERNEL);
3135015a300SAlexander Potapenko if (!used_objects) {
314d4557faeSXiaoke Wang kmem_cache_free(c, buf);
3155015a300SAlexander Potapenko kfree(buf_contents);
3165015a300SAlexander Potapenko goto out;
3175015a300SAlexander Potapenko }
3185015a300SAlexander Potapenko memcpy(buf_contents, buf, size);
3195015a300SAlexander Potapenko kmem_cache_free(c, buf);
3205015a300SAlexander Potapenko /*
3215015a300SAlexander Potapenko * Run for a fixed number of iterations. If we never hit saved_ptr,
3225015a300SAlexander Potapenko * assume the test passes.
3235015a300SAlexander Potapenko */
3245015a300SAlexander Potapenko for (iter = 0; iter < maxiter; iter++) {
3255015a300SAlexander Potapenko buf = kmem_cache_alloc(c, GFP_KERNEL);
3265015a300SAlexander Potapenko used_objects[iter] = buf;
3275015a300SAlexander Potapenko if (buf == saved_ptr) {
3285015a300SAlexander Potapenko fail = memcmp(buf_contents, buf, size);
3295015a300SAlexander Potapenko for (i = 0; i <= iter; i++)
3305015a300SAlexander Potapenko kmem_cache_free(c, used_objects[i]);
3315015a300SAlexander Potapenko goto free_out;
3325015a300SAlexander Potapenko }
3335015a300SAlexander Potapenko }
3345015a300SAlexander Potapenko
335d4557faeSXiaoke Wang for (iter = 0; iter < maxiter; iter++)
336d4557faeSXiaoke Wang kmem_cache_free(c, used_objects[iter]);
337d4557faeSXiaoke Wang
3385015a300SAlexander Potapenko free_out:
3395015a300SAlexander Potapenko kfree(buf_contents);
3405015a300SAlexander Potapenko kfree(used_objects);
3415015a300SAlexander Potapenko out:
342d4557faeSXiaoke Wang kmem_cache_destroy(c);
3435015a300SAlexander Potapenko *total_failures += fail;
3445015a300SAlexander Potapenko return 1;
3455015a300SAlexander Potapenko }
3465015a300SAlexander Potapenko
do_kmem_cache_size_bulk(int size,int * total_failures)34703a9349aSAlexander Potapenko static int __init do_kmem_cache_size_bulk(int size, int *total_failures)
34803a9349aSAlexander Potapenko {
34903a9349aSAlexander Potapenko struct kmem_cache *c;
35003a9349aSAlexander Potapenko int i, iter, maxiter = 1024;
35103a9349aSAlexander Potapenko int num, bytes;
35203a9349aSAlexander Potapenko bool fail = false;
35303a9349aSAlexander Potapenko void *objects[10];
35403a9349aSAlexander Potapenko
35503a9349aSAlexander Potapenko c = kmem_cache_create("test_cache", size, size, 0, NULL);
35603a9349aSAlexander Potapenko for (iter = 0; (iter < maxiter) && !fail; iter++) {
35703a9349aSAlexander Potapenko num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects),
35803a9349aSAlexander Potapenko objects);
35903a9349aSAlexander Potapenko for (i = 0; i < num; i++) {
36003a9349aSAlexander Potapenko bytes = count_nonzero_bytes(objects[i], size);
36103a9349aSAlexander Potapenko if (bytes)
36203a9349aSAlexander Potapenko fail = true;
36303a9349aSAlexander Potapenko fill_with_garbage(objects[i], size);
36403a9349aSAlexander Potapenko }
36503a9349aSAlexander Potapenko
36603a9349aSAlexander Potapenko if (num)
36703a9349aSAlexander Potapenko kmem_cache_free_bulk(c, num, objects);
36803a9349aSAlexander Potapenko }
369e073e5efSAndrey Konovalov kmem_cache_destroy(c);
37003a9349aSAlexander Potapenko *total_failures += fail;
37103a9349aSAlexander Potapenko return 1;
37203a9349aSAlexander Potapenko }
37303a9349aSAlexander Potapenko
3745015a300SAlexander Potapenko /*
3755015a300SAlexander Potapenko * Test kmem_cache allocation by creating caches of different sizes, with and
3765015a300SAlexander Potapenko * without constructors, with and without SLAB_TYPESAFE_BY_RCU.
3775015a300SAlexander Potapenko */
test_kmemcache(int * total_failures)3785015a300SAlexander Potapenko static int __init test_kmemcache(int *total_failures)
3795015a300SAlexander Potapenko {
3805015a300SAlexander Potapenko int failures = 0, num_tests = 0;
3815015a300SAlexander Potapenko int i, flags, size;
3825015a300SAlexander Potapenko bool ctor, rcu, zero;
3835015a300SAlexander Potapenko
3845015a300SAlexander Potapenko for (i = 0; i < 10; i++) {
3855015a300SAlexander Potapenko size = 8 << i;
3865015a300SAlexander Potapenko for (flags = 0; flags < 8; flags++) {
3875015a300SAlexander Potapenko ctor = flags & 1;
3885015a300SAlexander Potapenko rcu = flags & 2;
3895015a300SAlexander Potapenko zero = flags & 4;
3905015a300SAlexander Potapenko if (ctor & zero)
3915015a300SAlexander Potapenko continue;
3925015a300SAlexander Potapenko num_tests += do_kmem_cache_size(size, ctor, rcu, zero,
3935015a300SAlexander Potapenko &failures);
3945015a300SAlexander Potapenko }
39503a9349aSAlexander Potapenko num_tests += do_kmem_cache_size_bulk(size, &failures);
3965015a300SAlexander Potapenko }
3975015a300SAlexander Potapenko REPORT_FAILURES_IN_FN();
3985015a300SAlexander Potapenko *total_failures += failures;
3995015a300SAlexander Potapenko return num_tests;
4005015a300SAlexander Potapenko }
4015015a300SAlexander Potapenko
4025015a300SAlexander Potapenko /* Test the behavior of SLAB_TYPESAFE_BY_RCU caches of different sizes. */
test_rcu_persistent(int * total_failures)4035015a300SAlexander Potapenko static int __init test_rcu_persistent(int *total_failures)
4045015a300SAlexander Potapenko {
4055015a300SAlexander Potapenko int failures = 0, num_tests = 0;
4065015a300SAlexander Potapenko int i, size;
4075015a300SAlexander Potapenko
4085015a300SAlexander Potapenko for (i = 0; i < 10; i++) {
4095015a300SAlexander Potapenko size = 8 << i;
4105015a300SAlexander Potapenko num_tests += do_kmem_cache_rcu_persistent(size, &failures);
4115015a300SAlexander Potapenko }
4125015a300SAlexander Potapenko REPORT_FAILURES_IN_FN();
4135015a300SAlexander Potapenko *total_failures += failures;
4145015a300SAlexander Potapenko return num_tests;
4155015a300SAlexander Potapenko }
4165015a300SAlexander Potapenko
4175015a300SAlexander Potapenko /*
4185015a300SAlexander Potapenko * Run the tests. Each test function returns the number of executed tests and
4195015a300SAlexander Potapenko * updates |failures| with the number of failed tests.
4205015a300SAlexander Potapenko */
test_meminit_init(void)4215015a300SAlexander Potapenko static int __init test_meminit_init(void)
4225015a300SAlexander Potapenko {
4235015a300SAlexander Potapenko int failures = 0, num_tests = 0;
4245015a300SAlexander Potapenko
4255015a300SAlexander Potapenko num_tests += test_pages(&failures);
4265015a300SAlexander Potapenko num_tests += test_kvmalloc(&failures);
4275015a300SAlexander Potapenko num_tests += test_kmemcache(&failures);
4285015a300SAlexander Potapenko num_tests += test_rcu_persistent(&failures);
4295015a300SAlexander Potapenko
4305015a300SAlexander Potapenko if (failures == 0)
4315015a300SAlexander Potapenko pr_info("all %d tests passed!\n", num_tests);
4325015a300SAlexander Potapenko else
4335015a300SAlexander Potapenko pr_info("failures: %d out of %d\n", failures, num_tests);
4345015a300SAlexander Potapenko
4355015a300SAlexander Potapenko return failures ? -EINVAL : 0;
4365015a300SAlexander Potapenko }
4375015a300SAlexander Potapenko module_init(test_meminit_init);
4385015a300SAlexander Potapenko
4395015a300SAlexander Potapenko MODULE_LICENSE("GPL");
440