1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include <linux/kmemleak.h>
7 #include <linux/module.h>
8 #include <linux/sizes.h>
9
10 #include <drm/drm_buddy.h>
11
12 static struct kmem_cache *slab_blocks;
13
drm_block_alloc(struct drm_buddy * mm,struct drm_buddy_block * parent,unsigned int order,u64 offset)14 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
15 struct drm_buddy_block *parent,
16 unsigned int order,
17 u64 offset)
18 {
19 struct drm_buddy_block *block;
20
21 BUG_ON(order > DRM_BUDDY_MAX_ORDER);
22
23 block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
24 if (!block)
25 return NULL;
26
27 block->header = offset;
28 block->header |= order;
29 block->parent = parent;
30
31 BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
32 return block;
33 }
34
drm_block_free(struct drm_buddy * mm,struct drm_buddy_block * block)35 static void drm_block_free(struct drm_buddy *mm,
36 struct drm_buddy_block *block)
37 {
38 kmem_cache_free(slab_blocks, block);
39 }
40
list_insert_sorted(struct drm_buddy * mm,struct drm_buddy_block * block)41 static void list_insert_sorted(struct drm_buddy *mm,
42 struct drm_buddy_block *block)
43 {
44 struct drm_buddy_block *node;
45 struct list_head *head;
46
47 head = &mm->free_list[drm_buddy_block_order(block)];
48 if (list_empty(head)) {
49 list_add(&block->link, head);
50 return;
51 }
52
53 list_for_each_entry(node, head, link)
54 if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
55 break;
56
57 __list_add(&block->link, node->link.prev, &node->link);
58 }
59
mark_allocated(struct drm_buddy_block * block)60 static void mark_allocated(struct drm_buddy_block *block)
61 {
62 block->header &= ~DRM_BUDDY_HEADER_STATE;
63 block->header |= DRM_BUDDY_ALLOCATED;
64
65 list_del(&block->link);
66 }
67
mark_free(struct drm_buddy * mm,struct drm_buddy_block * block)68 static void mark_free(struct drm_buddy *mm,
69 struct drm_buddy_block *block)
70 {
71 block->header &= ~DRM_BUDDY_HEADER_STATE;
72 block->header |= DRM_BUDDY_FREE;
73
74 list_insert_sorted(mm, block);
75 }
76
mark_split(struct drm_buddy_block * block)77 static void mark_split(struct drm_buddy_block *block)
78 {
79 block->header &= ~DRM_BUDDY_HEADER_STATE;
80 block->header |= DRM_BUDDY_SPLIT;
81
82 list_del(&block->link);
83 }
84
85 /**
86 * drm_buddy_init - init memory manager
87 *
88 * @mm: DRM buddy manager to initialize
89 * @size: size in bytes to manage
90 * @chunk_size: minimum page size in bytes for our allocations
91 *
92 * Initializes the memory manager and its resources.
93 *
94 * Returns:
95 * 0 on success, error code on failure.
96 */
drm_buddy_init(struct drm_buddy * mm,u64 size,u64 chunk_size)97 int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
98 {
99 unsigned int i;
100 u64 offset;
101
102 if (size < chunk_size)
103 return -EINVAL;
104
105 if (chunk_size < PAGE_SIZE)
106 return -EINVAL;
107
108 if (!is_power_of_2(chunk_size))
109 return -EINVAL;
110
111 size = round_down(size, chunk_size);
112
113 mm->size = size;
114 mm->avail = size;
115 mm->chunk_size = chunk_size;
116 mm->max_order = ilog2(size) - ilog2(chunk_size);
117
118 BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
119
120 mm->free_list = kmalloc_array(mm->max_order + 1,
121 sizeof(struct list_head),
122 GFP_KERNEL);
123 if (!mm->free_list)
124 return -ENOMEM;
125
126 for (i = 0; i <= mm->max_order; ++i)
127 INIT_LIST_HEAD(&mm->free_list[i]);
128
129 mm->n_roots = hweight64(size);
130
131 mm->roots = kmalloc_array(mm->n_roots,
132 sizeof(struct drm_buddy_block *),
133 GFP_KERNEL);
134 if (!mm->roots)
135 goto out_free_list;
136
137 offset = 0;
138 i = 0;
139
140 /*
141 * Split into power-of-two blocks, in case we are given a size that is
142 * not itself a power-of-two.
143 */
144 do {
145 struct drm_buddy_block *root;
146 unsigned int order;
147 u64 root_size;
148
149 order = ilog2(size) - ilog2(chunk_size);
150 root_size = chunk_size << order;
151
152 root = drm_block_alloc(mm, NULL, order, offset);
153 if (!root)
154 goto out_free_roots;
155
156 mark_free(mm, root);
157
158 BUG_ON(i > mm->max_order);
159 BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
160
161 mm->roots[i] = root;
162
163 offset += root_size;
164 size -= root_size;
165 i++;
166 } while (size);
167
168 return 0;
169
170 out_free_roots:
171 while (i--)
172 drm_block_free(mm, mm->roots[i]);
173 kfree(mm->roots);
174 out_free_list:
175 kfree(mm->free_list);
176 return -ENOMEM;
177 }
178 EXPORT_SYMBOL(drm_buddy_init);
179
180 /**
181 * drm_buddy_fini - tear down the memory manager
182 *
183 * @mm: DRM buddy manager to free
184 *
185 * Cleanup memory manager resources and the freelist
186 */
drm_buddy_fini(struct drm_buddy * mm)187 void drm_buddy_fini(struct drm_buddy *mm)
188 {
189 int i;
190
191 for (i = 0; i < mm->n_roots; ++i) {
192 WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
193 drm_block_free(mm, mm->roots[i]);
194 }
195
196 WARN_ON(mm->avail != mm->size);
197
198 kfree(mm->roots);
199 kfree(mm->free_list);
200 }
201 EXPORT_SYMBOL(drm_buddy_fini);
202
split_block(struct drm_buddy * mm,struct drm_buddy_block * block)203 static int split_block(struct drm_buddy *mm,
204 struct drm_buddy_block *block)
205 {
206 unsigned int block_order = drm_buddy_block_order(block) - 1;
207 u64 offset = drm_buddy_block_offset(block);
208
209 BUG_ON(!drm_buddy_block_is_free(block));
210 BUG_ON(!drm_buddy_block_order(block));
211
212 block->left = drm_block_alloc(mm, block, block_order, offset);
213 if (!block->left)
214 return -ENOMEM;
215
216 block->right = drm_block_alloc(mm, block, block_order,
217 offset + (mm->chunk_size << block_order));
218 if (!block->right) {
219 drm_block_free(mm, block->left);
220 return -ENOMEM;
221 }
222
223 mark_free(mm, block->left);
224 mark_free(mm, block->right);
225
226 mark_split(block);
227
228 return 0;
229 }
230
231 static struct drm_buddy_block *
__get_buddy(struct drm_buddy_block * block)232 __get_buddy(struct drm_buddy_block *block)
233 {
234 struct drm_buddy_block *parent;
235
236 parent = block->parent;
237 if (!parent)
238 return NULL;
239
240 if (parent->left == block)
241 return parent->right;
242
243 return parent->left;
244 }
245
246 /**
247 * drm_get_buddy - get buddy address
248 *
249 * @block: DRM buddy block
250 *
251 * Returns the corresponding buddy block for @block, or NULL
252 * if this is a root block and can't be merged further.
253 * Requires some kind of locking to protect against
254 * any concurrent allocate and free operations.
255 */
256 struct drm_buddy_block *
drm_get_buddy(struct drm_buddy_block * block)257 drm_get_buddy(struct drm_buddy_block *block)
258 {
259 return __get_buddy(block);
260 }
261 EXPORT_SYMBOL(drm_get_buddy);
262
__drm_buddy_free(struct drm_buddy * mm,struct drm_buddy_block * block)263 static void __drm_buddy_free(struct drm_buddy *mm,
264 struct drm_buddy_block *block)
265 {
266 struct drm_buddy_block *parent;
267
268 while ((parent = block->parent)) {
269 struct drm_buddy_block *buddy;
270
271 buddy = __get_buddy(block);
272
273 if (!drm_buddy_block_is_free(buddy))
274 break;
275
276 list_del(&buddy->link);
277
278 drm_block_free(mm, block);
279 drm_block_free(mm, buddy);
280
281 block = parent;
282 }
283
284 mark_free(mm, block);
285 }
286
287 /**
288 * drm_buddy_free_block - free a block
289 *
290 * @mm: DRM buddy manager
291 * @block: block to be freed
292 */
drm_buddy_free_block(struct drm_buddy * mm,struct drm_buddy_block * block)293 void drm_buddy_free_block(struct drm_buddy *mm,
294 struct drm_buddy_block *block)
295 {
296 BUG_ON(!drm_buddy_block_is_allocated(block));
297 mm->avail += drm_buddy_block_size(mm, block);
298 __drm_buddy_free(mm, block);
299 }
300 EXPORT_SYMBOL(drm_buddy_free_block);
301
302 /**
303 * drm_buddy_free_list - free blocks
304 *
305 * @mm: DRM buddy manager
306 * @objects: input list head to free blocks
307 */
drm_buddy_free_list(struct drm_buddy * mm,struct list_head * objects)308 void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
309 {
310 struct drm_buddy_block *block, *on;
311
312 list_for_each_entry_safe(block, on, objects, link) {
313 drm_buddy_free_block(mm, block);
314 cond_resched();
315 }
316 INIT_LIST_HEAD(objects);
317 }
318 EXPORT_SYMBOL(drm_buddy_free_list);
319
overlaps(u64 s1,u64 e1,u64 s2,u64 e2)320 static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
321 {
322 return s1 <= e2 && e1 >= s2;
323 }
324
contains(u64 s1,u64 e1,u64 s2,u64 e2)325 static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
326 {
327 return s1 <= s2 && e1 >= e2;
328 }
329
330 static struct drm_buddy_block *
alloc_range_bias(struct drm_buddy * mm,u64 start,u64 end,unsigned int order)331 alloc_range_bias(struct drm_buddy *mm,
332 u64 start, u64 end,
333 unsigned int order)
334 {
335 u64 req_size = mm->chunk_size << order;
336 struct drm_buddy_block *block;
337 struct drm_buddy_block *buddy;
338 LIST_HEAD(dfs);
339 int err;
340 int i;
341
342 end = end - 1;
343
344 for (i = 0; i < mm->n_roots; ++i)
345 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
346
347 do {
348 u64 block_start;
349 u64 block_end;
350
351 block = list_first_entry_or_null(&dfs,
352 struct drm_buddy_block,
353 tmp_link);
354 if (!block)
355 break;
356
357 list_del(&block->tmp_link);
358
359 if (drm_buddy_block_order(block) < order)
360 continue;
361
362 block_start = drm_buddy_block_offset(block);
363 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
364
365 if (!overlaps(start, end, block_start, block_end))
366 continue;
367
368 if (drm_buddy_block_is_allocated(block))
369 continue;
370
371 if (block_start < start || block_end > end) {
372 u64 adjusted_start = max(block_start, start);
373 u64 adjusted_end = min(block_end, end);
374
375 if (round_down(adjusted_end + 1, req_size) <=
376 round_up(adjusted_start, req_size))
377 continue;
378 }
379
380 if (contains(start, end, block_start, block_end) &&
381 order == drm_buddy_block_order(block)) {
382 /*
383 * Find the free block within the range.
384 */
385 if (drm_buddy_block_is_free(block))
386 return block;
387
388 continue;
389 }
390
391 if (!drm_buddy_block_is_split(block)) {
392 err = split_block(mm, block);
393 if (unlikely(err))
394 goto err_undo;
395 }
396
397 list_add(&block->right->tmp_link, &dfs);
398 list_add(&block->left->tmp_link, &dfs);
399 } while (1);
400
401 return ERR_PTR(-ENOSPC);
402
403 err_undo:
404 /*
405 * We really don't want to leave around a bunch of split blocks, since
406 * bigger is better, so make sure we merge everything back before we
407 * free the allocated blocks.
408 */
409 buddy = __get_buddy(block);
410 if (buddy &&
411 (drm_buddy_block_is_free(block) &&
412 drm_buddy_block_is_free(buddy)))
413 __drm_buddy_free(mm, block);
414 return ERR_PTR(err);
415 }
416
417 static struct drm_buddy_block *
get_maxblock(struct drm_buddy * mm,unsigned int order)418 get_maxblock(struct drm_buddy *mm, unsigned int order)
419 {
420 struct drm_buddy_block *max_block = NULL, *node;
421 unsigned int i;
422
423 for (i = order; i <= mm->max_order; ++i) {
424 if (!list_empty(&mm->free_list[i])) {
425 node = list_last_entry(&mm->free_list[i],
426 struct drm_buddy_block,
427 link);
428 if (!max_block) {
429 max_block = node;
430 continue;
431 }
432
433 if (drm_buddy_block_offset(node) >
434 drm_buddy_block_offset(max_block)) {
435 max_block = node;
436 }
437 }
438 }
439
440 return max_block;
441 }
442
443 static struct drm_buddy_block *
alloc_from_freelist(struct drm_buddy * mm,unsigned int order,unsigned long flags)444 alloc_from_freelist(struct drm_buddy *mm,
445 unsigned int order,
446 unsigned long flags)
447 {
448 struct drm_buddy_block *block = NULL;
449 unsigned int tmp;
450 int err;
451
452 if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
453 block = get_maxblock(mm, order);
454 if (block)
455 /* Store the obtained block order */
456 tmp = drm_buddy_block_order(block);
457 } else {
458 for (tmp = order; tmp <= mm->max_order; ++tmp) {
459 if (!list_empty(&mm->free_list[tmp])) {
460 block = list_last_entry(&mm->free_list[tmp],
461 struct drm_buddy_block,
462 link);
463 if (block)
464 break;
465 }
466 }
467 }
468
469 if (!block)
470 return ERR_PTR(-ENOSPC);
471
472 BUG_ON(!drm_buddy_block_is_free(block));
473
474 while (tmp != order) {
475 err = split_block(mm, block);
476 if (unlikely(err))
477 goto err_undo;
478
479 block = block->right;
480 tmp--;
481 }
482 return block;
483
484 err_undo:
485 if (tmp != order)
486 __drm_buddy_free(mm, block);
487 return ERR_PTR(err);
488 }
489
__alloc_range(struct drm_buddy * mm,struct list_head * dfs,u64 start,u64 size,struct list_head * blocks)490 static int __alloc_range(struct drm_buddy *mm,
491 struct list_head *dfs,
492 u64 start, u64 size,
493 struct list_head *blocks)
494 {
495 struct drm_buddy_block *block;
496 struct drm_buddy_block *buddy;
497 LIST_HEAD(allocated);
498 u64 end;
499 int err;
500
501 end = start + size - 1;
502
503 do {
504 u64 block_start;
505 u64 block_end;
506
507 block = list_first_entry_or_null(dfs,
508 struct drm_buddy_block,
509 tmp_link);
510 if (!block)
511 break;
512
513 list_del(&block->tmp_link);
514
515 block_start = drm_buddy_block_offset(block);
516 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
517
518 if (!overlaps(start, end, block_start, block_end))
519 continue;
520
521 if (drm_buddy_block_is_allocated(block)) {
522 err = -ENOSPC;
523 goto err_free;
524 }
525
526 if (contains(start, end, block_start, block_end)) {
527 if (!drm_buddy_block_is_free(block)) {
528 err = -ENOSPC;
529 goto err_free;
530 }
531
532 mark_allocated(block);
533 mm->avail -= drm_buddy_block_size(mm, block);
534 list_add_tail(&block->link, &allocated);
535 continue;
536 }
537
538 if (!drm_buddy_block_is_split(block)) {
539 err = split_block(mm, block);
540 if (unlikely(err))
541 goto err_undo;
542 }
543
544 list_add(&block->right->tmp_link, dfs);
545 list_add(&block->left->tmp_link, dfs);
546 } while (1);
547
548 list_splice_tail(&allocated, blocks);
549 return 0;
550
551 err_undo:
552 /*
553 * We really don't want to leave around a bunch of split blocks, since
554 * bigger is better, so make sure we merge everything back before we
555 * free the allocated blocks.
556 */
557 buddy = __get_buddy(block);
558 if (buddy &&
559 (drm_buddy_block_is_free(block) &&
560 drm_buddy_block_is_free(buddy)))
561 __drm_buddy_free(mm, block);
562
563 err_free:
564 drm_buddy_free_list(mm, &allocated);
565 return err;
566 }
567
__drm_buddy_alloc_range(struct drm_buddy * mm,u64 start,u64 size,struct list_head * blocks)568 static int __drm_buddy_alloc_range(struct drm_buddy *mm,
569 u64 start,
570 u64 size,
571 struct list_head *blocks)
572 {
573 LIST_HEAD(dfs);
574 int i;
575
576 for (i = 0; i < mm->n_roots; ++i)
577 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
578
579 return __alloc_range(mm, &dfs, start, size, blocks);
580 }
581
582 /**
583 * drm_buddy_block_trim - free unused pages
584 *
585 * @mm: DRM buddy manager
586 * @new_size: original size requested
587 * @blocks: Input and output list of allocated blocks.
588 * MUST contain single block as input to be trimmed.
589 * On success will contain the newly allocated blocks
590 * making up the @new_size. Blocks always appear in
591 * ascending order
592 *
593 * For contiguous allocation, we round up the size to the nearest
594 * power of two value, drivers consume *actual* size, so remaining
595 * portions are unused and can be optionally freed with this function
596 *
597 * Returns:
598 * 0 on success, error code on failure.
599 */
drm_buddy_block_trim(struct drm_buddy * mm,u64 new_size,struct list_head * blocks)600 int drm_buddy_block_trim(struct drm_buddy *mm,
601 u64 new_size,
602 struct list_head *blocks)
603 {
604 struct drm_buddy_block *parent;
605 struct drm_buddy_block *block;
606 LIST_HEAD(dfs);
607 u64 new_start;
608 int err;
609
610 if (!list_is_singular(blocks))
611 return -EINVAL;
612
613 block = list_first_entry(blocks,
614 struct drm_buddy_block,
615 link);
616
617 if (WARN_ON(!drm_buddy_block_is_allocated(block)))
618 return -EINVAL;
619
620 if (new_size > drm_buddy_block_size(mm, block))
621 return -EINVAL;
622
623 if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
624 return -EINVAL;
625
626 if (new_size == drm_buddy_block_size(mm, block))
627 return 0;
628
629 list_del(&block->link);
630 mark_free(mm, block);
631 mm->avail += drm_buddy_block_size(mm, block);
632
633 /* Prevent recursively freeing this node */
634 parent = block->parent;
635 block->parent = NULL;
636
637 new_start = drm_buddy_block_offset(block);
638 list_add(&block->tmp_link, &dfs);
639 err = __alloc_range(mm, &dfs, new_start, new_size, blocks);
640 if (err) {
641 mark_allocated(block);
642 mm->avail -= drm_buddy_block_size(mm, block);
643 list_add(&block->link, blocks);
644 }
645
646 block->parent = parent;
647 return err;
648 }
649 EXPORT_SYMBOL(drm_buddy_block_trim);
650
651 /**
652 * drm_buddy_alloc_blocks - allocate power-of-two blocks
653 *
654 * @mm: DRM buddy manager to allocate from
655 * @start: start of the allowed range for this block
656 * @end: end of the allowed range for this block
657 * @size: size of the allocation
658 * @min_page_size: alignment of the allocation
659 * @blocks: output list head to add allocated blocks
660 * @flags: DRM_BUDDY_*_ALLOCATION flags
661 *
662 * alloc_range_bias() called on range limitations, which traverses
663 * the tree and returns the desired block.
664 *
665 * alloc_from_freelist() called when *no* range restrictions
666 * are enforced, which picks the block from the freelist.
667 *
668 * Returns:
669 * 0 on success, error code on failure.
670 */
drm_buddy_alloc_blocks(struct drm_buddy * mm,u64 start,u64 end,u64 size,u64 min_page_size,struct list_head * blocks,unsigned long flags)671 int drm_buddy_alloc_blocks(struct drm_buddy *mm,
672 u64 start, u64 end, u64 size,
673 u64 min_page_size,
674 struct list_head *blocks,
675 unsigned long flags)
676 {
677 struct drm_buddy_block *block = NULL;
678 unsigned int min_order, order;
679 unsigned long pages;
680 LIST_HEAD(allocated);
681 int err;
682
683 if (size < mm->chunk_size)
684 return -EINVAL;
685
686 if (min_page_size < mm->chunk_size)
687 return -EINVAL;
688
689 if (!is_power_of_2(min_page_size))
690 return -EINVAL;
691
692 if (!IS_ALIGNED(start | end | size, mm->chunk_size))
693 return -EINVAL;
694
695 if (end > mm->size)
696 return -EINVAL;
697
698 if (range_overflows(start, size, mm->size))
699 return -EINVAL;
700
701 /* Actual range allocation */
702 if (start + size == end)
703 return __drm_buddy_alloc_range(mm, start, size, blocks);
704
705 if (!IS_ALIGNED(size, min_page_size))
706 return -EINVAL;
707
708 pages = size >> ilog2(mm->chunk_size);
709 order = fls(pages) - 1;
710 min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
711
712 do {
713 order = min(order, (unsigned int)fls(pages) - 1);
714 BUG_ON(order > mm->max_order);
715 BUG_ON(order < min_order);
716
717 do {
718 if (flags & DRM_BUDDY_RANGE_ALLOCATION)
719 /* Allocate traversing within the range */
720 block = alloc_range_bias(mm, start, end, order);
721 else
722 /* Allocate from freelist */
723 block = alloc_from_freelist(mm, order, flags);
724
725 if (!IS_ERR(block))
726 break;
727
728 if (order-- == min_order) {
729 err = -ENOSPC;
730 goto err_free;
731 }
732 } while (1);
733
734 mark_allocated(block);
735 mm->avail -= drm_buddy_block_size(mm, block);
736 kmemleak_update_trace(block);
737 list_add_tail(&block->link, &allocated);
738
739 pages -= BIT(order);
740
741 if (!pages)
742 break;
743 } while (1);
744
745 list_splice_tail(&allocated, blocks);
746 return 0;
747
748 err_free:
749 drm_buddy_free_list(mm, &allocated);
750 return err;
751 }
752 EXPORT_SYMBOL(drm_buddy_alloc_blocks);
753
754 /**
755 * drm_buddy_block_print - print block information
756 *
757 * @mm: DRM buddy manager
758 * @block: DRM buddy block
759 * @p: DRM printer to use
760 */
drm_buddy_block_print(struct drm_buddy * mm,struct drm_buddy_block * block,struct drm_printer * p)761 void drm_buddy_block_print(struct drm_buddy *mm,
762 struct drm_buddy_block *block,
763 struct drm_printer *p)
764 {
765 u64 start = drm_buddy_block_offset(block);
766 u64 size = drm_buddy_block_size(mm, block);
767
768 drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
769 }
770 EXPORT_SYMBOL(drm_buddy_block_print);
771
772 /**
773 * drm_buddy_print - print allocator state
774 *
775 * @mm: DRM buddy manager
776 * @p: DRM printer to use
777 */
drm_buddy_print(struct drm_buddy * mm,struct drm_printer * p)778 void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
779 {
780 int order;
781
782 drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
783 mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
784
785 for (order = mm->max_order; order >= 0; order--) {
786 struct drm_buddy_block *block;
787 u64 count = 0, free;
788
789 list_for_each_entry(block, &mm->free_list[order], link) {
790 BUG_ON(!drm_buddy_block_is_free(block));
791 count++;
792 }
793
794 drm_printf(p, "order-%2d ", order);
795
796 free = count * (mm->chunk_size << order);
797 if (free < SZ_1M)
798 drm_printf(p, "free: %8llu KiB", free >> 10);
799 else
800 drm_printf(p, "free: %8llu MiB", free >> 20);
801
802 drm_printf(p, ", blocks: %llu\n", count);
803 }
804 }
805 EXPORT_SYMBOL(drm_buddy_print);
806
drm_buddy_module_exit(void)807 static void drm_buddy_module_exit(void)
808 {
809 kmem_cache_destroy(slab_blocks);
810 }
811
drm_buddy_module_init(void)812 static int __init drm_buddy_module_init(void)
813 {
814 slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
815 if (!slab_blocks)
816 return -ENOMEM;
817
818 return 0;
819 }
820
821 module_init(drm_buddy_module_init);
822 module_exit(drm_buddy_module_exit);
823
824 MODULE_DESCRIPTION("DRM Buddy Allocator");
825 MODULE_LICENSE("Dual MIT/GPL");
826