xref: /openbmc/linux/drivers/gpu/drm/drm_mm.c (revision e7065e20)
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28 
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43 
44 #include "drmP.h"
45 #include "drm_mm.h"
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 #include <linux/export.h>
49 
50 #define MM_UNUSED_TARGET 4
51 
52 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53 {
54 	struct drm_mm_node *child;
55 
56 	if (atomic)
57 		child = kzalloc(sizeof(*child), GFP_ATOMIC);
58 	else
59 		child = kzalloc(sizeof(*child), GFP_KERNEL);
60 
61 	if (unlikely(child == NULL)) {
62 		spin_lock(&mm->unused_lock);
63 		if (list_empty(&mm->unused_nodes))
64 			child = NULL;
65 		else {
66 			child =
67 			    list_entry(mm->unused_nodes.next,
68 				       struct drm_mm_node, node_list);
69 			list_del(&child->node_list);
70 			--mm->num_unused;
71 		}
72 		spin_unlock(&mm->unused_lock);
73 	}
74 	return child;
75 }
76 
77 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
78  * drm_mm:	memory manager struct we are pre-allocating for
79  *
80  * Returns 0 on success or -ENOMEM if allocation fails.
81  */
82 int drm_mm_pre_get(struct drm_mm *mm)
83 {
84 	struct drm_mm_node *node;
85 
86 	spin_lock(&mm->unused_lock);
87 	while (mm->num_unused < MM_UNUSED_TARGET) {
88 		spin_unlock(&mm->unused_lock);
89 		node = kzalloc(sizeof(*node), GFP_KERNEL);
90 		spin_lock(&mm->unused_lock);
91 
92 		if (unlikely(node == NULL)) {
93 			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
94 			spin_unlock(&mm->unused_lock);
95 			return ret;
96 		}
97 		++mm->num_unused;
98 		list_add_tail(&node->node_list, &mm->unused_nodes);
99 	}
100 	spin_unlock(&mm->unused_lock);
101 	return 0;
102 }
103 EXPORT_SYMBOL(drm_mm_pre_get);
104 
105 static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
106 {
107 	return hole_node->start + hole_node->size;
108 }
109 
110 static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
111 {
112 	struct drm_mm_node *next_node =
113 		list_entry(hole_node->node_list.next, struct drm_mm_node,
114 			   node_list);
115 
116 	return next_node->start;
117 }
118 
119 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
120 				 struct drm_mm_node *node,
121 				 unsigned long size, unsigned alignment)
122 {
123 	struct drm_mm *mm = hole_node->mm;
124 	unsigned long tmp = 0, wasted = 0;
125 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
126 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
127 
128 	BUG_ON(!hole_node->hole_follows || node->allocated);
129 
130 	if (alignment)
131 		tmp = hole_start % alignment;
132 
133 	if (!tmp) {
134 		hole_node->hole_follows = 0;
135 		list_del_init(&hole_node->hole_stack);
136 	} else
137 		wasted = alignment - tmp;
138 
139 	node->start = hole_start + wasted;
140 	node->size = size;
141 	node->mm = mm;
142 	node->allocated = 1;
143 
144 	INIT_LIST_HEAD(&node->hole_stack);
145 	list_add(&node->node_list, &hole_node->node_list);
146 
147 	BUG_ON(node->start + node->size > hole_end);
148 
149 	if (node->start + node->size < hole_end) {
150 		list_add(&node->hole_stack, &mm->hole_stack);
151 		node->hole_follows = 1;
152 	} else {
153 		node->hole_follows = 0;
154 	}
155 }
156 
157 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
158 					     unsigned long size,
159 					     unsigned alignment,
160 					     int atomic)
161 {
162 	struct drm_mm_node *node;
163 
164 	node = drm_mm_kmalloc(hole_node->mm, atomic);
165 	if (unlikely(node == NULL))
166 		return NULL;
167 
168 	drm_mm_insert_helper(hole_node, node, size, alignment);
169 
170 	return node;
171 }
172 EXPORT_SYMBOL(drm_mm_get_block_generic);
173 
174 /**
175  * Search for free space and insert a preallocated memory node. Returns
176  * -ENOSPC if no suitable free area is available. The preallocated memory node
177  * must be cleared.
178  */
179 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
180 		       unsigned long size, unsigned alignment)
181 {
182 	struct drm_mm_node *hole_node;
183 
184 	hole_node = drm_mm_search_free(mm, size, alignment, 0);
185 	if (!hole_node)
186 		return -ENOSPC;
187 
188 	drm_mm_insert_helper(hole_node, node, size, alignment);
189 
190 	return 0;
191 }
192 EXPORT_SYMBOL(drm_mm_insert_node);
193 
194 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
195 				       struct drm_mm_node *node,
196 				       unsigned long size, unsigned alignment,
197 				       unsigned long start, unsigned long end)
198 {
199 	struct drm_mm *mm = hole_node->mm;
200 	unsigned long tmp = 0, wasted = 0;
201 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
202 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
203 
204 	BUG_ON(!hole_node->hole_follows || node->allocated);
205 
206 	if (hole_start < start)
207 		wasted += start - hole_start;
208 	if (alignment)
209 		tmp = (hole_start + wasted) % alignment;
210 
211 	if (tmp)
212 		wasted += alignment - tmp;
213 
214 	if (!wasted) {
215 		hole_node->hole_follows = 0;
216 		list_del_init(&hole_node->hole_stack);
217 	}
218 
219 	node->start = hole_start + wasted;
220 	node->size = size;
221 	node->mm = mm;
222 	node->allocated = 1;
223 
224 	INIT_LIST_HEAD(&node->hole_stack);
225 	list_add(&node->node_list, &hole_node->node_list);
226 
227 	BUG_ON(node->start + node->size > hole_end);
228 	BUG_ON(node->start + node->size > end);
229 
230 	if (node->start + node->size < hole_end) {
231 		list_add(&node->hole_stack, &mm->hole_stack);
232 		node->hole_follows = 1;
233 	} else {
234 		node->hole_follows = 0;
235 	}
236 }
237 
238 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
239 						unsigned long size,
240 						unsigned alignment,
241 						unsigned long start,
242 						unsigned long end,
243 						int atomic)
244 {
245 	struct drm_mm_node *node;
246 
247 	node = drm_mm_kmalloc(hole_node->mm, atomic);
248 	if (unlikely(node == NULL))
249 		return NULL;
250 
251 	drm_mm_insert_helper_range(hole_node, node, size, alignment,
252 				   start, end);
253 
254 	return node;
255 }
256 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
257 
258 /**
259  * Search for free space and insert a preallocated memory node. Returns
260  * -ENOSPC if no suitable free area is available. This is for range
261  * restricted allocations. The preallocated memory node must be cleared.
262  */
263 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
264 				unsigned long size, unsigned alignment,
265 				unsigned long start, unsigned long end)
266 {
267 	struct drm_mm_node *hole_node;
268 
269 	hole_node = drm_mm_search_free_in_range(mm, size, alignment,
270 						start, end, 0);
271 	if (!hole_node)
272 		return -ENOSPC;
273 
274 	drm_mm_insert_helper_range(hole_node, node, size, alignment,
275 				   start, end);
276 
277 	return 0;
278 }
279 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
280 
281 /**
282  * Remove a memory node from the allocator.
283  */
284 void drm_mm_remove_node(struct drm_mm_node *node)
285 {
286 	struct drm_mm *mm = node->mm;
287 	struct drm_mm_node *prev_node;
288 
289 	BUG_ON(node->scanned_block || node->scanned_prev_free
290 				   || node->scanned_next_free);
291 
292 	prev_node =
293 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
294 
295 	if (node->hole_follows) {
296 		BUG_ON(drm_mm_hole_node_start(node)
297 				== drm_mm_hole_node_end(node));
298 		list_del(&node->hole_stack);
299 	} else
300 		BUG_ON(drm_mm_hole_node_start(node)
301 				!= drm_mm_hole_node_end(node));
302 
303 	if (!prev_node->hole_follows) {
304 		prev_node->hole_follows = 1;
305 		list_add(&prev_node->hole_stack, &mm->hole_stack);
306 	} else
307 		list_move(&prev_node->hole_stack, &mm->hole_stack);
308 
309 	list_del(&node->node_list);
310 	node->allocated = 0;
311 }
312 EXPORT_SYMBOL(drm_mm_remove_node);
313 
314 /*
315  * Remove a memory node from the allocator and free the allocated struct
316  * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
317  * drm_mm_get_block functions.
318  */
319 void drm_mm_put_block(struct drm_mm_node *node)
320 {
321 
322 	struct drm_mm *mm = node->mm;
323 
324 	drm_mm_remove_node(node);
325 
326 	spin_lock(&mm->unused_lock);
327 	if (mm->num_unused < MM_UNUSED_TARGET) {
328 		list_add(&node->node_list, &mm->unused_nodes);
329 		++mm->num_unused;
330 	} else
331 		kfree(node);
332 	spin_unlock(&mm->unused_lock);
333 }
334 EXPORT_SYMBOL(drm_mm_put_block);
335 
336 static int check_free_hole(unsigned long start, unsigned long end,
337 			   unsigned long size, unsigned alignment)
338 {
339 	unsigned wasted = 0;
340 
341 	if (end - start < size)
342 		return 0;
343 
344 	if (alignment) {
345 		unsigned tmp = start % alignment;
346 		if (tmp)
347 			wasted = alignment - tmp;
348 	}
349 
350 	if (end >= start + size + wasted) {
351 		return 1;
352 	}
353 
354 	return 0;
355 }
356 
357 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
358 				       unsigned long size,
359 				       unsigned alignment, int best_match)
360 {
361 	struct drm_mm_node *entry;
362 	struct drm_mm_node *best;
363 	unsigned long best_size;
364 
365 	BUG_ON(mm->scanned_blocks);
366 
367 	best = NULL;
368 	best_size = ~0UL;
369 
370 	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
371 		BUG_ON(!entry->hole_follows);
372 		if (!check_free_hole(drm_mm_hole_node_start(entry),
373 				     drm_mm_hole_node_end(entry),
374 				     size, alignment))
375 			continue;
376 
377 		if (!best_match)
378 			return entry;
379 
380 		if (entry->size < best_size) {
381 			best = entry;
382 			best_size = entry->size;
383 		}
384 	}
385 
386 	return best;
387 }
388 EXPORT_SYMBOL(drm_mm_search_free);
389 
390 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
391 						unsigned long size,
392 						unsigned alignment,
393 						unsigned long start,
394 						unsigned long end,
395 						int best_match)
396 {
397 	struct drm_mm_node *entry;
398 	struct drm_mm_node *best;
399 	unsigned long best_size;
400 
401 	BUG_ON(mm->scanned_blocks);
402 
403 	best = NULL;
404 	best_size = ~0UL;
405 
406 	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
407 		unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
408 			start : drm_mm_hole_node_start(entry);
409 		unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
410 			end : drm_mm_hole_node_end(entry);
411 
412 		BUG_ON(!entry->hole_follows);
413 		if (!check_free_hole(adj_start, adj_end, size, alignment))
414 			continue;
415 
416 		if (!best_match)
417 			return entry;
418 
419 		if (entry->size < best_size) {
420 			best = entry;
421 			best_size = entry->size;
422 		}
423 	}
424 
425 	return best;
426 }
427 EXPORT_SYMBOL(drm_mm_search_free_in_range);
428 
429 /**
430  * Moves an allocation. To be used with embedded struct drm_mm_node.
431  */
432 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
433 {
434 	list_replace(&old->node_list, &new->node_list);
435 	list_replace(&old->hole_stack, &new->hole_stack);
436 	new->hole_follows = old->hole_follows;
437 	new->mm = old->mm;
438 	new->start = old->start;
439 	new->size = old->size;
440 
441 	old->allocated = 0;
442 	new->allocated = 1;
443 }
444 EXPORT_SYMBOL(drm_mm_replace_node);
445 
446 /**
447  * Initializa lru scanning.
448  *
449  * This simply sets up the scanning routines with the parameters for the desired
450  * hole.
451  *
452  * Warning: As long as the scan list is non-empty, no other operations than
453  * adding/removing nodes to/from the scan list are allowed.
454  */
455 void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
456 		      unsigned alignment)
457 {
458 	mm->scan_alignment = alignment;
459 	mm->scan_size = size;
460 	mm->scanned_blocks = 0;
461 	mm->scan_hit_start = 0;
462 	mm->scan_hit_size = 0;
463 	mm->scan_check_range = 0;
464 	mm->prev_scanned_node = NULL;
465 }
466 EXPORT_SYMBOL(drm_mm_init_scan);
467 
468 /**
469  * Initializa lru scanning.
470  *
471  * This simply sets up the scanning routines with the parameters for the desired
472  * hole. This version is for range-restricted scans.
473  *
474  * Warning: As long as the scan list is non-empty, no other operations than
475  * adding/removing nodes to/from the scan list are allowed.
476  */
477 void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
478 				 unsigned alignment,
479 				 unsigned long start,
480 				 unsigned long end)
481 {
482 	mm->scan_alignment = alignment;
483 	mm->scan_size = size;
484 	mm->scanned_blocks = 0;
485 	mm->scan_hit_start = 0;
486 	mm->scan_hit_size = 0;
487 	mm->scan_start = start;
488 	mm->scan_end = end;
489 	mm->scan_check_range = 1;
490 	mm->prev_scanned_node = NULL;
491 }
492 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
493 
494 /**
495  * Add a node to the scan list that might be freed to make space for the desired
496  * hole.
497  *
498  * Returns non-zero, if a hole has been found, zero otherwise.
499  */
500 int drm_mm_scan_add_block(struct drm_mm_node *node)
501 {
502 	struct drm_mm *mm = node->mm;
503 	struct drm_mm_node *prev_node;
504 	unsigned long hole_start, hole_end;
505 	unsigned long adj_start;
506 	unsigned long adj_end;
507 
508 	mm->scanned_blocks++;
509 
510 	BUG_ON(node->scanned_block);
511 	node->scanned_block = 1;
512 
513 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
514 			       node_list);
515 
516 	node->scanned_preceeds_hole = prev_node->hole_follows;
517 	prev_node->hole_follows = 1;
518 	list_del(&node->node_list);
519 	node->node_list.prev = &prev_node->node_list;
520 	node->node_list.next = &mm->prev_scanned_node->node_list;
521 	mm->prev_scanned_node = node;
522 
523 	hole_start = drm_mm_hole_node_start(prev_node);
524 	hole_end = drm_mm_hole_node_end(prev_node);
525 	if (mm->scan_check_range) {
526 		adj_start = hole_start < mm->scan_start ?
527 			mm->scan_start : hole_start;
528 		adj_end = hole_end > mm->scan_end ?
529 			mm->scan_end : hole_end;
530 	} else {
531 		adj_start = hole_start;
532 		adj_end = hole_end;
533 	}
534 
535 	if (check_free_hole(adj_start , adj_end,
536 			    mm->scan_size, mm->scan_alignment)) {
537 		mm->scan_hit_start = hole_start;
538 		mm->scan_hit_size = hole_end;
539 
540 		return 1;
541 	}
542 
543 	return 0;
544 }
545 EXPORT_SYMBOL(drm_mm_scan_add_block);
546 
547 /**
548  * Remove a node from the scan list.
549  *
550  * Nodes _must_ be removed in the exact same order from the scan list as they
551  * have been added, otherwise the internal state of the memory manager will be
552  * corrupted.
553  *
554  * When the scan list is empty, the selected memory nodes can be freed. An
555  * immediately following drm_mm_search_free with best_match = 0 will then return
556  * the just freed block (because its at the top of the free_stack list).
557  *
558  * Returns one if this block should be evicted, zero otherwise. Will always
559  * return zero when no hole has been found.
560  */
561 int drm_mm_scan_remove_block(struct drm_mm_node *node)
562 {
563 	struct drm_mm *mm = node->mm;
564 	struct drm_mm_node *prev_node;
565 
566 	mm->scanned_blocks--;
567 
568 	BUG_ON(!node->scanned_block);
569 	node->scanned_block = 0;
570 
571 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
572 			       node_list);
573 
574 	prev_node->hole_follows = node->scanned_preceeds_hole;
575 	INIT_LIST_HEAD(&node->node_list);
576 	list_add(&node->node_list, &prev_node->node_list);
577 
578 	/* Only need to check for containement because start&size for the
579 	 * complete resulting free block (not just the desired part) is
580 	 * stored. */
581 	if (node->start >= mm->scan_hit_start &&
582 	    node->start + node->size
583 	    		<= mm->scan_hit_start + mm->scan_hit_size) {
584 		return 1;
585 	}
586 
587 	return 0;
588 }
589 EXPORT_SYMBOL(drm_mm_scan_remove_block);
590 
591 int drm_mm_clean(struct drm_mm * mm)
592 {
593 	struct list_head *head = &mm->head_node.node_list;
594 
595 	return (head->next->next == head);
596 }
597 EXPORT_SYMBOL(drm_mm_clean);
598 
599 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
600 {
601 	INIT_LIST_HEAD(&mm->hole_stack);
602 	INIT_LIST_HEAD(&mm->unused_nodes);
603 	mm->num_unused = 0;
604 	mm->scanned_blocks = 0;
605 	spin_lock_init(&mm->unused_lock);
606 
607 	/* Clever trick to avoid a special case in the free hole tracking. */
608 	INIT_LIST_HEAD(&mm->head_node.node_list);
609 	INIT_LIST_HEAD(&mm->head_node.hole_stack);
610 	mm->head_node.hole_follows = 1;
611 	mm->head_node.scanned_block = 0;
612 	mm->head_node.scanned_prev_free = 0;
613 	mm->head_node.scanned_next_free = 0;
614 	mm->head_node.mm = mm;
615 	mm->head_node.start = start + size;
616 	mm->head_node.size = start - mm->head_node.start;
617 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
618 
619 	return 0;
620 }
621 EXPORT_SYMBOL(drm_mm_init);
622 
623 void drm_mm_takedown(struct drm_mm * mm)
624 {
625 	struct drm_mm_node *entry, *next;
626 
627 	if (!list_empty(&mm->head_node.node_list)) {
628 		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
629 		return;
630 	}
631 
632 	spin_lock(&mm->unused_lock);
633 	list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
634 		list_del(&entry->node_list);
635 		kfree(entry);
636 		--mm->num_unused;
637 	}
638 	spin_unlock(&mm->unused_lock);
639 
640 	BUG_ON(mm->num_unused != 0);
641 }
642 EXPORT_SYMBOL(drm_mm_takedown);
643 
644 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
645 {
646 	struct drm_mm_node *entry;
647 	unsigned long total_used = 0, total_free = 0, total = 0;
648 	unsigned long hole_start, hole_end, hole_size;
649 
650 	hole_start = drm_mm_hole_node_start(&mm->head_node);
651 	hole_end = drm_mm_hole_node_end(&mm->head_node);
652 	hole_size = hole_end - hole_start;
653 	if (hole_size)
654 		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
655 			prefix, hole_start, hole_end,
656 			hole_size);
657 	total_free += hole_size;
658 
659 	drm_mm_for_each_node(entry, mm) {
660 		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
661 			prefix, entry->start, entry->start + entry->size,
662 			entry->size);
663 		total_used += entry->size;
664 
665 		if (entry->hole_follows) {
666 			hole_start = drm_mm_hole_node_start(entry);
667 			hole_end = drm_mm_hole_node_end(entry);
668 			hole_size = hole_end - hole_start;
669 			printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
670 				prefix, hole_start, hole_end,
671 				hole_size);
672 			total_free += hole_size;
673 		}
674 	}
675 	total = total_free + total_used;
676 
677 	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
678 		total_used, total_free);
679 }
680 EXPORT_SYMBOL(drm_mm_debug_table);
681 
682 #if defined(CONFIG_DEBUG_FS)
683 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
684 {
685 	struct drm_mm_node *entry;
686 	unsigned long total_used = 0, total_free = 0, total = 0;
687 	unsigned long hole_start, hole_end, hole_size;
688 
689 	hole_start = drm_mm_hole_node_start(&mm->head_node);
690 	hole_end = drm_mm_hole_node_end(&mm->head_node);
691 	hole_size = hole_end - hole_start;
692 	if (hole_size)
693 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
694 				hole_start, hole_end, hole_size);
695 	total_free += hole_size;
696 
697 	drm_mm_for_each_node(entry, mm) {
698 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
699 				entry->start, entry->start + entry->size,
700 				entry->size);
701 		total_used += entry->size;
702 		if (entry->hole_follows) {
703 			hole_start = drm_mm_hole_node_start(entry);
704 			hole_end = drm_mm_hole_node_end(entry);
705 			hole_size = hole_end - hole_start;
706 			seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
707 					hole_start, hole_end, hole_size);
708 			total_free += hole_size;
709 		}
710 	}
711 	total = total_free + total_used;
712 
713 	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
714 	return 0;
715 }
716 EXPORT_SYMBOL(drm_mm_dump_table);
717 #endif
718