1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
5 */
6
7 #include <kunit/test.h>
8
9 #include <linux/prime_numbers.h>
10 #include <linux/sched/signal.h>
11
12 #include <drm/drm_buddy.h>
13
14 #include "../lib/drm_random.h"
15
16 #define TIMEOUT(name__) \
17 unsigned long name__ = jiffies + MAX_SCHEDULE_TIMEOUT
18
19 static unsigned int random_seed;
20
get_size(int order,u64 chunk_size)21 static inline u64 get_size(int order, u64 chunk_size)
22 {
23 return (1 << order) * chunk_size;
24 }
25
26 __printf(2, 3)
__timeout(unsigned long timeout,const char * fmt,...)27 static bool __timeout(unsigned long timeout, const char *fmt, ...)
28 {
29 va_list va;
30
31 if (!signal_pending(current)) {
32 cond_resched();
33 if (time_before(jiffies, timeout))
34 return false;
35 }
36
37 if (fmt) {
38 va_start(va, fmt);
39 vprintk(fmt, va);
40 va_end(va);
41 }
42
43 return true;
44 }
45
__dump_block(struct kunit * test,struct drm_buddy * mm,struct drm_buddy_block * block,bool buddy)46 static void __dump_block(struct kunit *test, struct drm_buddy *mm,
47 struct drm_buddy_block *block, bool buddy)
48 {
49 kunit_err(test, "block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%d buddy=%d\n",
50 block->header, drm_buddy_block_state(block),
51 drm_buddy_block_order(block), drm_buddy_block_offset(block),
52 drm_buddy_block_size(mm, block), !block->parent, buddy);
53 }
54
dump_block(struct kunit * test,struct drm_buddy * mm,struct drm_buddy_block * block)55 static void dump_block(struct kunit *test, struct drm_buddy *mm,
56 struct drm_buddy_block *block)
57 {
58 struct drm_buddy_block *buddy;
59
60 __dump_block(test, mm, block, false);
61
62 buddy = drm_get_buddy(block);
63 if (buddy)
64 __dump_block(test, mm, buddy, true);
65 }
66
check_block(struct kunit * test,struct drm_buddy * mm,struct drm_buddy_block * block)67 static int check_block(struct kunit *test, struct drm_buddy *mm,
68 struct drm_buddy_block *block)
69 {
70 struct drm_buddy_block *buddy;
71 unsigned int block_state;
72 u64 block_size;
73 u64 offset;
74 int err = 0;
75
76 block_state = drm_buddy_block_state(block);
77
78 if (block_state != DRM_BUDDY_ALLOCATED &&
79 block_state != DRM_BUDDY_FREE && block_state != DRM_BUDDY_SPLIT) {
80 kunit_err(test, "block state mismatch\n");
81 err = -EINVAL;
82 }
83
84 block_size = drm_buddy_block_size(mm, block);
85 offset = drm_buddy_block_offset(block);
86
87 if (block_size < mm->chunk_size) {
88 kunit_err(test, "block size smaller than min size\n");
89 err = -EINVAL;
90 }
91
92 /* We can't use is_power_of_2() for a u64 on 32-bit systems. */
93 if (block_size & (block_size - 1)) {
94 kunit_err(test, "block size not power of two\n");
95 err = -EINVAL;
96 }
97
98 if (!IS_ALIGNED(block_size, mm->chunk_size)) {
99 kunit_err(test, "block size not aligned to min size\n");
100 err = -EINVAL;
101 }
102
103 if (!IS_ALIGNED(offset, mm->chunk_size)) {
104 kunit_err(test, "block offset not aligned to min size\n");
105 err = -EINVAL;
106 }
107
108 if (!IS_ALIGNED(offset, block_size)) {
109 kunit_err(test, "block offset not aligned to block size\n");
110 err = -EINVAL;
111 }
112
113 buddy = drm_get_buddy(block);
114
115 if (!buddy && block->parent) {
116 kunit_err(test, "buddy has gone fishing\n");
117 err = -EINVAL;
118 }
119
120 if (buddy) {
121 if (drm_buddy_block_offset(buddy) != (offset ^ block_size)) {
122 kunit_err(test, "buddy has wrong offset\n");
123 err = -EINVAL;
124 }
125
126 if (drm_buddy_block_size(mm, buddy) != block_size) {
127 kunit_err(test, "buddy size mismatch\n");
128 err = -EINVAL;
129 }
130
131 if (drm_buddy_block_state(buddy) == block_state &&
132 block_state == DRM_BUDDY_FREE) {
133 kunit_err(test, "block and its buddy are free\n");
134 err = -EINVAL;
135 }
136 }
137
138 return err;
139 }
140
check_blocks(struct kunit * test,struct drm_buddy * mm,struct list_head * blocks,u64 expected_size,bool is_contiguous)141 static int check_blocks(struct kunit *test, struct drm_buddy *mm,
142 struct list_head *blocks, u64 expected_size, bool is_contiguous)
143 {
144 struct drm_buddy_block *block;
145 struct drm_buddy_block *prev;
146 u64 total;
147 int err = 0;
148
149 block = NULL;
150 prev = NULL;
151 total = 0;
152
153 list_for_each_entry(block, blocks, link) {
154 err = check_block(test, mm, block);
155
156 if (!drm_buddy_block_is_allocated(block)) {
157 kunit_err(test, "block not allocated\n");
158 err = -EINVAL;
159 }
160
161 if (is_contiguous && prev) {
162 u64 prev_block_size;
163 u64 prev_offset;
164 u64 offset;
165
166 prev_offset = drm_buddy_block_offset(prev);
167 prev_block_size = drm_buddy_block_size(mm, prev);
168 offset = drm_buddy_block_offset(block);
169
170 if (offset != (prev_offset + prev_block_size)) {
171 kunit_err(test, "block offset mismatch\n");
172 err = -EINVAL;
173 }
174 }
175
176 if (err)
177 break;
178
179 total += drm_buddy_block_size(mm, block);
180 prev = block;
181 }
182
183 if (!err) {
184 if (total != expected_size) {
185 kunit_err(test, "size mismatch, expected=%llx, found=%llx\n",
186 expected_size, total);
187 err = -EINVAL;
188 }
189 return err;
190 }
191
192 if (prev) {
193 kunit_err(test, "prev block, dump:\n");
194 dump_block(test, mm, prev);
195 }
196
197 kunit_err(test, "bad block, dump:\n");
198 dump_block(test, mm, block);
199
200 return err;
201 }
202
check_mm(struct kunit * test,struct drm_buddy * mm)203 static int check_mm(struct kunit *test, struct drm_buddy *mm)
204 {
205 struct drm_buddy_block *root;
206 struct drm_buddy_block *prev;
207 unsigned int i;
208 u64 total;
209 int err = 0;
210
211 if (!mm->n_roots) {
212 kunit_err(test, "n_roots is zero\n");
213 return -EINVAL;
214 }
215
216 if (mm->n_roots != hweight64(mm->size)) {
217 kunit_err(test, "n_roots mismatch, n_roots=%u, expected=%lu\n",
218 mm->n_roots, hweight64(mm->size));
219 return -EINVAL;
220 }
221
222 root = NULL;
223 prev = NULL;
224 total = 0;
225
226 for (i = 0; i < mm->n_roots; ++i) {
227 struct drm_buddy_block *block;
228 unsigned int order;
229
230 root = mm->roots[i];
231 if (!root) {
232 kunit_err(test, "root(%u) is NULL\n", i);
233 err = -EINVAL;
234 break;
235 }
236
237 err = check_block(test, mm, root);
238
239 if (!drm_buddy_block_is_free(root)) {
240 kunit_err(test, "root not free\n");
241 err = -EINVAL;
242 }
243
244 order = drm_buddy_block_order(root);
245
246 if (!i) {
247 if (order != mm->max_order) {
248 kunit_err(test, "max order root missing\n");
249 err = -EINVAL;
250 }
251 }
252
253 if (prev) {
254 u64 prev_block_size;
255 u64 prev_offset;
256 u64 offset;
257
258 prev_offset = drm_buddy_block_offset(prev);
259 prev_block_size = drm_buddy_block_size(mm, prev);
260 offset = drm_buddy_block_offset(root);
261
262 if (offset != (prev_offset + prev_block_size)) {
263 kunit_err(test, "root offset mismatch\n");
264 err = -EINVAL;
265 }
266 }
267
268 block = list_first_entry_or_null(&mm->free_list[order],
269 struct drm_buddy_block, link);
270 if (block != root) {
271 kunit_err(test, "root mismatch at order=%u\n", order);
272 err = -EINVAL;
273 }
274
275 if (err)
276 break;
277
278 prev = root;
279 total += drm_buddy_block_size(mm, root);
280 }
281
282 if (!err) {
283 if (total != mm->size) {
284 kunit_err(test, "expected mm size=%llx, found=%llx\n",
285 mm->size, total);
286 err = -EINVAL;
287 }
288 return err;
289 }
290
291 if (prev) {
292 kunit_err(test, "prev root(%u), dump:\n", i - 1);
293 dump_block(test, mm, prev);
294 }
295
296 if (root) {
297 kunit_err(test, "bad root(%u), dump:\n", i);
298 dump_block(test, mm, root);
299 }
300
301 return err;
302 }
303
mm_config(u64 * size,u64 * chunk_size)304 static void mm_config(u64 *size, u64 *chunk_size)
305 {
306 DRM_RND_STATE(prng, random_seed);
307 u32 s, ms;
308
309 /* Nothing fancy, just try to get an interesting bit pattern */
310
311 prandom_seed_state(&prng, random_seed);
312
313 /* Let size be a random number of pages up to 8 GB (2M pages) */
314 s = 1 + drm_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
315 /* Let the chunk size be a random power of 2 less than size */
316 ms = BIT(drm_prandom_u32_max_state(ilog2(s), &prng));
317 /* Round size down to the chunk size */
318 s &= -ms;
319
320 /* Convert from pages to bytes */
321 *chunk_size = (u64)ms << 12;
322 *size = (u64)s << 12;
323 }
324
drm_test_buddy_alloc_pathological(struct kunit * test)325 static void drm_test_buddy_alloc_pathological(struct kunit *test)
326 {
327 u64 mm_size, size, start = 0;
328 struct drm_buddy_block *block;
329 const int max_order = 3;
330 unsigned long flags = 0;
331 int order, top;
332 struct drm_buddy mm;
333 LIST_HEAD(blocks);
334 LIST_HEAD(holes);
335 LIST_HEAD(tmp);
336
337 /*
338 * Create a pot-sized mm, then allocate one of each possible
339 * order within. This should leave the mm with exactly one
340 * page left. Free the largest block, then whittle down again.
341 * Eventually we will have a fully 50% fragmented mm.
342 */
343
344 mm_size = PAGE_SIZE << max_order;
345 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
346 "buddy_init failed\n");
347
348 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
349
350 for (top = max_order; top; top--) {
351 /* Make room by freeing the largest allocated block */
352 block = list_first_entry_or_null(&blocks, typeof(*block), link);
353 if (block) {
354 list_del(&block->link);
355 drm_buddy_free_block(&mm, block);
356 }
357
358 for (order = top; order--;) {
359 size = get_size(order, PAGE_SIZE);
360 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
361 mm_size, size, size,
362 &tmp, flags),
363 "buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
364 order, top);
365
366 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
367 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
368
369 list_move_tail(&block->link, &blocks);
370 }
371
372 /* There should be one final page for this sub-allocation */
373 size = get_size(0, PAGE_SIZE);
374 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
375 size, size, &tmp, flags),
376 "buddy_alloc hit -ENOMEM for hole\n");
377
378 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
379 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
380
381 list_move_tail(&block->link, &holes);
382
383 size = get_size(top, PAGE_SIZE);
384 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
385 size, size, &tmp, flags),
386 "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
387 top, max_order);
388 }
389
390 drm_buddy_free_list(&mm, &holes);
391
392 /* Nothing larger than blocks of chunk_size now available */
393 for (order = 1; order <= max_order; order++) {
394 size = get_size(order, PAGE_SIZE);
395 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
396 size, size, &tmp, flags),
397 "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
398 order);
399 }
400
401 list_splice_tail(&holes, &blocks);
402 drm_buddy_free_list(&mm, &blocks);
403 drm_buddy_fini(&mm);
404 }
405
drm_test_buddy_alloc_smoke(struct kunit * test)406 static void drm_test_buddy_alloc_smoke(struct kunit *test)
407 {
408 u64 mm_size, chunk_size, start = 0;
409 unsigned long flags = 0;
410 struct drm_buddy mm;
411 int *order;
412 int i;
413
414 DRM_RND_STATE(prng, random_seed);
415 TIMEOUT(end_time);
416
417 mm_config(&mm_size, &chunk_size);
418
419 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, chunk_size),
420 "buddy_init failed\n");
421
422 order = drm_random_order(mm.max_order + 1, &prng);
423 KUNIT_ASSERT_TRUE(test, order);
424
425 for (i = 0; i <= mm.max_order; ++i) {
426 struct drm_buddy_block *block;
427 int max_order = order[i];
428 bool timeout = false;
429 LIST_HEAD(blocks);
430 u64 total, size;
431 LIST_HEAD(tmp);
432 int order, err;
433
434 KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
435 "pre-mm check failed, abort\n");
436
437 order = max_order;
438 total = 0;
439
440 do {
441 retry:
442 size = get_size(order, chunk_size);
443 err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags);
444 if (err) {
445 if (err == -ENOMEM) {
446 KUNIT_FAIL(test, "buddy_alloc hit -ENOMEM with order=%d\n",
447 order);
448 } else {
449 if (order--) {
450 err = 0;
451 goto retry;
452 }
453
454 KUNIT_FAIL(test, "buddy_alloc with order=%d failed\n",
455 order);
456 }
457
458 break;
459 }
460
461 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
462 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
463
464 list_move_tail(&block->link, &blocks);
465 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), order,
466 "buddy_alloc order mismatch\n");
467
468 total += drm_buddy_block_size(&mm, block);
469
470 if (__timeout(end_time, NULL)) {
471 timeout = true;
472 break;
473 }
474 } while (total < mm.size);
475
476 if (!err)
477 err = check_blocks(test, &mm, &blocks, total, false);
478
479 drm_buddy_free_list(&mm, &blocks);
480
481 if (!err) {
482 KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm),
483 "post-mm check failed\n");
484 }
485
486 if (err || timeout)
487 break;
488
489 cond_resched();
490 }
491
492 kfree(order);
493 drm_buddy_fini(&mm);
494 }
495
drm_test_buddy_alloc_pessimistic(struct kunit * test)496 static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
497 {
498 u64 mm_size, size, start = 0;
499 struct drm_buddy_block *block, *bn;
500 const unsigned int max_order = 16;
501 unsigned long flags = 0;
502 struct drm_buddy mm;
503 unsigned int order;
504 LIST_HEAD(blocks);
505 LIST_HEAD(tmp);
506
507 /*
508 * Create a pot-sized mm, then allocate one of each possible
509 * order within. This should leave the mm with exactly one
510 * page left.
511 */
512
513 mm_size = PAGE_SIZE << max_order;
514 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
515 "buddy_init failed\n");
516
517 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
518
519 for (order = 0; order < max_order; order++) {
520 size = get_size(order, PAGE_SIZE);
521 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
522 size, size, &tmp, flags),
523 "buddy_alloc hit -ENOMEM with order=%d\n",
524 order);
525
526 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
527 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
528
529 list_move_tail(&block->link, &blocks);
530 }
531
532 /* And now the last remaining block available */
533 size = get_size(0, PAGE_SIZE);
534 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
535 size, size, &tmp, flags),
536 "buddy_alloc hit -ENOMEM on final alloc\n");
537
538 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
539 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
540
541 list_move_tail(&block->link, &blocks);
542
543 /* Should be completely full! */
544 for (order = max_order; order--;) {
545 size = get_size(order, PAGE_SIZE);
546 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
547 size, size, &tmp, flags),
548 "buddy_alloc unexpectedly succeeded, it should be full!");
549 }
550
551 block = list_last_entry(&blocks, typeof(*block), link);
552 list_del(&block->link);
553 drm_buddy_free_block(&mm, block);
554
555 /* As we free in increasing size, we make available larger blocks */
556 order = 1;
557 list_for_each_entry_safe(block, bn, &blocks, link) {
558 list_del(&block->link);
559 drm_buddy_free_block(&mm, block);
560
561 size = get_size(order, PAGE_SIZE);
562 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
563 size, size, &tmp, flags),
564 "buddy_alloc hit -ENOMEM with order=%d\n",
565 order);
566
567 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
568 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
569
570 list_del(&block->link);
571 drm_buddy_free_block(&mm, block);
572 order++;
573 }
574
575 /* To confirm, now the whole mm should be available */
576 size = get_size(max_order, PAGE_SIZE);
577 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
578 size, size, &tmp, flags),
579 "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
580 max_order);
581
582 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
583 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
584
585 list_del(&block->link);
586 drm_buddy_free_block(&mm, block);
587 drm_buddy_free_list(&mm, &blocks);
588 drm_buddy_fini(&mm);
589 }
590
drm_test_buddy_alloc_optimistic(struct kunit * test)591 static void drm_test_buddy_alloc_optimistic(struct kunit *test)
592 {
593 u64 mm_size, size, start = 0;
594 struct drm_buddy_block *block;
595 unsigned long flags = 0;
596 const int max_order = 16;
597 struct drm_buddy mm;
598 LIST_HEAD(blocks);
599 LIST_HEAD(tmp);
600 int order;
601
602 /*
603 * Create a mm with one block of each order available, and
604 * try to allocate them all.
605 */
606
607 mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
608
609 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
610 "buddy_init failed\n");
611
612 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
613
614 for (order = 0; order <= max_order; order++) {
615 size = get_size(order, PAGE_SIZE);
616 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
617 size, size, &tmp, flags),
618 "buddy_alloc hit -ENOMEM with order=%d\n",
619 order);
620
621 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
622 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
623
624 list_move_tail(&block->link, &blocks);
625 }
626
627 /* Should be completely full! */
628 size = get_size(0, PAGE_SIZE);
629 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
630 size, size, &tmp, flags),
631 "buddy_alloc unexpectedly succeeded, it should be full!");
632
633 drm_buddy_free_list(&mm, &blocks);
634 drm_buddy_fini(&mm);
635 }
636
drm_test_buddy_alloc_range(struct kunit * test)637 static void drm_test_buddy_alloc_range(struct kunit *test)
638 {
639 unsigned long flags = DRM_BUDDY_RANGE_ALLOCATION;
640 u64 offset, size, rem, chunk_size, end;
641 unsigned long page_num;
642 struct drm_buddy mm;
643 LIST_HEAD(blocks);
644
645 mm_config(&size, &chunk_size);
646
647 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, size, chunk_size),
648 "buddy_init failed");
649
650 KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
651 "pre-mm check failed, abort!");
652
653 rem = mm.size;
654 offset = 0;
655
656 for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
657 struct drm_buddy_block *block;
658 LIST_HEAD(tmp);
659
660 size = min(page_num * mm.chunk_size, rem);
661 end = offset + size;
662
663 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, offset, end,
664 size, mm.chunk_size,
665 &tmp, flags),
666 "alloc_range with offset=%llx, size=%llx failed\n", offset, size);
667
668 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
669 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_range has no blocks\n");
670
671 KUNIT_ASSERT_EQ_MSG(test, drm_buddy_block_offset(block), offset,
672 "alloc_range start offset mismatch, found=%llx, expected=%llx\n",
673 drm_buddy_block_offset(block), offset);
674
675 KUNIT_ASSERT_FALSE(test, check_blocks(test, &mm, &tmp, size, true));
676
677 list_splice_tail(&tmp, &blocks);
678
679 offset += size;
680
681 rem -= size;
682 if (!rem)
683 break;
684
685 cond_resched();
686 }
687
688 drm_buddy_free_list(&mm, &blocks);
689
690 KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm), "post-mm check failed\n");
691
692 drm_buddy_fini(&mm);
693 }
694
drm_test_buddy_alloc_limit(struct kunit * test)695 static void drm_test_buddy_alloc_limit(struct kunit *test)
696 {
697 u64 size = U64_MAX, start = 0;
698 struct drm_buddy_block *block;
699 unsigned long flags = 0;
700 LIST_HEAD(allocated);
701 struct drm_buddy mm;
702
703 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
704
705 KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
706 "mm.max_order(%d) != %d\n", mm.max_order,
707 DRM_BUDDY_MAX_ORDER);
708
709 size = mm.chunk_size << mm.max_order;
710 KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
711 PAGE_SIZE, &allocated, flags));
712
713 block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
714 KUNIT_EXPECT_TRUE(test, block);
715
716 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
717 "block order(%d) != %d\n",
718 drm_buddy_block_order(block), mm.max_order);
719
720 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
721 BIT_ULL(mm.max_order) * PAGE_SIZE,
722 "block size(%llu) != %llu\n",
723 drm_buddy_block_size(&mm, block),
724 BIT_ULL(mm.max_order) * PAGE_SIZE);
725
726 drm_buddy_free_list(&mm, &allocated);
727 drm_buddy_fini(&mm);
728 }
729
drm_buddy_suite_init(struct kunit_suite * suite)730 static int drm_buddy_suite_init(struct kunit_suite *suite)
731 {
732 while (!random_seed)
733 random_seed = get_random_u32();
734
735 kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n", random_seed);
736
737 return 0;
738 }
739
740 static struct kunit_case drm_buddy_tests[] = {
741 KUNIT_CASE(drm_test_buddy_alloc_limit),
742 KUNIT_CASE(drm_test_buddy_alloc_range),
743 KUNIT_CASE(drm_test_buddy_alloc_optimistic),
744 KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
745 KUNIT_CASE(drm_test_buddy_alloc_smoke),
746 KUNIT_CASE(drm_test_buddy_alloc_pathological),
747 {}
748 };
749
750 static struct kunit_suite drm_buddy_test_suite = {
751 .name = "drm_buddy",
752 .suite_init = drm_buddy_suite_init,
753 .test_cases = drm_buddy_tests,
754 };
755
756 kunit_test_suite(drm_buddy_test_suite);
757
758 MODULE_AUTHOR("Intel Corporation");
759 MODULE_LICENSE("GPL");
760