xref: /openbmc/linux/include/drm/drm_mm.h (revision 05fc0321)
1 /**************************************************************************
2  *
3  * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
4  * Copyright 2016 Intel Corporation
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  *
28  **************************************************************************/
29 /*
30  * Authors:
31  * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32  */
33 
34 #ifndef _DRM_MM_H_
35 #define _DRM_MM_H_
36 
37 /*
38  * Generic range manager structs
39  */
40 #include <linux/bug.h>
41 #include <linux/rbtree.h>
42 #include <linux/kernel.h>
43 #include <linux/list.h>
44 #include <linux/spinlock.h>
45 #ifdef CONFIG_DRM_DEBUG_MM
46 #include <linux/stackdepot.h>
47 #endif
48 #include <drm/drm_print.h>
49 
50 #ifdef CONFIG_DRM_DEBUG_MM
51 #define DRM_MM_BUG_ON(expr) BUG_ON(expr)
52 #else
53 #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
54 #endif
55 
56 enum drm_mm_search_flags {
57 	DRM_MM_SEARCH_DEFAULT =		0,
58 	DRM_MM_SEARCH_BEST =		1 << 0,
59 	DRM_MM_SEARCH_BELOW =		1 << 1,
60 };
61 
62 enum drm_mm_allocator_flags {
63 	DRM_MM_CREATE_DEFAULT =		0,
64 	DRM_MM_CREATE_TOP =		1 << 0,
65 };
66 
67 #define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
68 #define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
69 
70 /**
71  * struct drm_mm_node - allocated block in the DRM allocator
72  *
73  * This represents an allocated block in a &drm_mm allocator. Except for
74  * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is
75  * entirely opaque and should only be accessed through the provided funcions.
76  * Since allocation of these nodes is entirely handled by the driver they can be
77  * embedded.
78  */
79 struct drm_mm_node {
80 	/** @color: Opaque driver-private tag. */
81 	unsigned long color;
82 	/** @start: Start address of the allocated block. */
83 	u64 start;
84 	/** @size: Size of the allocated block. */
85 	u64 size;
86 	/* private: */
87 	struct list_head node_list;
88 	struct list_head hole_stack;
89 	struct rb_node rb;
90 	unsigned hole_follows : 1;
91 	unsigned allocated : 1;
92 	bool scanned_block : 1;
93 	u64 __subtree_last;
94 	struct drm_mm *mm;
95 #ifdef CONFIG_DRM_DEBUG_MM
96 	depot_stack_handle_t stack;
97 #endif
98 };
99 
100 /**
101  * struct drm_mm - DRM allocator
102  *
103  * DRM range allocator with a few special functions and features geared towards
104  * managing GPU memory. Except for the @color_adjust callback the structure is
105  * entirely opaque and should only be accessed through the provided functions
106  * and macros. This structure can be embedded into larger driver structures.
107  */
108 struct drm_mm {
109 	/**
110 	 * @color_adjust:
111 	 *
112 	 * Optional driver callback to further apply restrictions on a hole. The
113 	 * node argument points at the node containing the hole from which the
114 	 * block would be allocated (see drm_mm_hole_follows() and friends). The
115 	 * other arguments are the size of the block to be allocated. The driver
116 	 * can adjust the start and end as needed to e.g. insert guard pages.
117 	 */
118 	void (*color_adjust)(const struct drm_mm_node *node,
119 			     unsigned long color,
120 			     u64 *start, u64 *end);
121 
122 	/* private: */
123 	/* List of all memory nodes that immediately precede a free hole. */
124 	struct list_head hole_stack;
125 	/* head_node.node_list is the list of all memory nodes, ordered
126 	 * according to the (increasing) start address of the memory node. */
127 	struct drm_mm_node head_node;
128 	/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
129 	struct rb_root interval_tree;
130 
131 	unsigned long scan_active;
132 };
133 
134 /**
135  * struct drm_mm_scan - DRM allocator eviction roaster data
136  *
137  * This structure tracks data needed for the eviction roaster set up using
138  * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and
139  * drm_mm_scan_remove_block(). The structure is entirely opaque and should only
140  * be accessed through the provided functions and macros. It is meant to be
141  * allocated temporarily by the driver on the stack.
142  */
143 struct drm_mm_scan {
144 	/* private: */
145 	struct drm_mm *mm;
146 
147 	u64 size;
148 	u64 alignment;
149 	u64 remainder_mask;
150 
151 	u64 range_start;
152 	u64 range_end;
153 
154 	u64 hit_start;
155 	u64 hit_end;
156 
157 	unsigned long color;
158 	unsigned int flags;
159 };
160 
161 /**
162  * drm_mm_node_allocated - checks whether a node is allocated
163  * @node: drm_mm_node to check
164  *
165  * Drivers are required to clear a node prior to using it with the
166  * drm_mm range manager.
167  *
168  * Drivers should use this helper for proper encapsulation of drm_mm
169  * internals.
170  *
171  * Returns:
172  * True if the @node is allocated.
173  */
174 static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
175 {
176 	return node->allocated;
177 }
178 
179 /**
180  * drm_mm_initialized - checks whether an allocator is initialized
181  * @mm: drm_mm to check
182  *
183  * Drivers should clear the struct drm_mm prior to initialisation if they
184  * want to use this function.
185  *
186  * Drivers should use this helper for proper encapsulation of drm_mm
187  * internals.
188  *
189  * Returns:
190  * True if the @mm is initialized.
191  */
192 static inline bool drm_mm_initialized(const struct drm_mm *mm)
193 {
194 	return mm->hole_stack.next;
195 }
196 
197 /**
198  * drm_mm_hole_follows - checks whether a hole follows this node
199  * @node: drm_mm_node to check
200  *
201  * Holes are embedded into the drm_mm using the tail of a drm_mm_node.
202  * If you wish to know whether a hole follows this particular node,
203  * query this function. See also drm_mm_hole_node_start() and
204  * drm_mm_hole_node_end().
205  *
206  * Returns:
207  * True if a hole follows the @node.
208  */
209 static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
210 {
211 	return node->hole_follows;
212 }
213 
214 static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
215 {
216 	return hole_node->start + hole_node->size;
217 }
218 
219 /**
220  * drm_mm_hole_node_start - computes the start of the hole following @node
221  * @hole_node: drm_mm_node which implicitly tracks the following hole
222  *
223  * This is useful for driver-specific debug dumpers. Otherwise drivers should
224  * not inspect holes themselves. Drivers must check first whether a hole indeed
225  * follows by looking at drm_mm_hole_follows()
226  *
227  * Returns:
228  * Start of the subsequent hole.
229  */
230 static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
231 {
232 	DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node));
233 	return __drm_mm_hole_node_start(hole_node);
234 }
235 
236 static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
237 {
238 	return list_next_entry(hole_node, node_list)->start;
239 }
240 
241 /**
242  * drm_mm_hole_node_end - computes the end of the hole following @node
243  * @hole_node: drm_mm_node which implicitly tracks the following hole
244  *
245  * This is useful for driver-specific debug dumpers. Otherwise drivers should
246  * not inspect holes themselves. Drivers must check first whether a hole indeed
247  * follows by looking at drm_mm_hole_follows().
248  *
249  * Returns:
250  * End of the subsequent hole.
251  */
252 static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
253 {
254 	return __drm_mm_hole_node_end(hole_node);
255 }
256 
257 /**
258  * drm_mm_nodes - list of nodes under the drm_mm range manager
259  * @mm: the struct drm_mm range manger
260  *
261  * As the drm_mm range manager hides its node_list deep with its
262  * structure, extracting it looks painful and repetitive. This is
263  * not expected to be used outside of the drm_mm_for_each_node()
264  * macros and similar internal functions.
265  *
266  * Returns:
267  * The node list, may be empty.
268  */
269 #define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
270 
271 /**
272  * drm_mm_for_each_node - iterator to walk over all allocated nodes
273  * @entry: &struct drm_mm_node to assign to in each iteration step
274  * @mm: &drm_mm allocator to walk
275  *
276  * This iterator walks over all nodes in the range allocator. It is implemented
277  * with list_for_each(), so not save against removal of elements.
278  */
279 #define drm_mm_for_each_node(entry, mm) \
280 	list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
281 
282 /**
283  * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
284  * @entry: &struct drm_mm_node to assign to in each iteration step
285  * @next: &struct drm_mm_node to store the next step
286  * @mm: &drm_mm allocator to walk
287  *
288  * This iterator walks over all nodes in the range allocator. It is implemented
289  * with list_for_each_safe(), so save against removal of elements.
290  */
291 #define drm_mm_for_each_node_safe(entry, next, mm) \
292 	list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
293 
294 #define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
295 	for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
296 	     &entry->hole_stack != &(mm)->hole_stack ? \
297 	     hole_start = drm_mm_hole_node_start(entry), \
298 	     hole_end = drm_mm_hole_node_end(entry), \
299 	     1 : 0; \
300 	     entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
301 
302 /**
303  * drm_mm_for_each_hole - iterator to walk over all holes
304  * @entry: &drm_mm_node used internally to track progress
305  * @mm: &drm_mm allocator to walk
306  * @hole_start: ulong variable to assign the hole start to on each iteration
307  * @hole_end: ulong variable to assign the hole end to on each iteration
308  *
309  * This iterator walks over all holes in the range allocator. It is implemented
310  * with list_for_each(), so not save against removal of elements. @entry is used
311  * internally and will not reflect a real drm_mm_node for the very first hole.
312  * Hence users of this iterator may not access it.
313  *
314  * Implementation Note:
315  * We need to inline list_for_each_entry in order to be able to set hole_start
316  * and hole_end on each iteration while keeping the macro sane.
317  *
318  * The __drm_mm_for_each_hole version is similar, but with added support for
319  * going backwards.
320  */
321 #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
322 	__drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0)
323 
324 /*
325  * Basic range manager support (drm_mm.c)
326  */
327 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
328 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
329 					struct drm_mm_node *node,
330 					u64 size,
331 					u64 alignment,
332 					unsigned long color,
333 					u64 start,
334 					u64 end,
335 					enum drm_mm_search_flags sflags,
336 					enum drm_mm_allocator_flags aflags);
337 
338 /**
339  * drm_mm_insert_node_in_range - ranged search for space and insert @node
340  * @mm: drm_mm to allocate from
341  * @node: preallocate node to insert
342  * @size: size of the allocation
343  * @alignment: alignment of the allocation
344  * @start: start of the allowed range for this node
345  * @end: end of the allowed range for this node
346  * @flags: flags to fine-tune the allocation
347  *
348  * This is a simplified version of drm_mm_insert_node_in_range_generic() with
349  * @color set to 0.
350  *
351  * The preallocated node must be cleared to 0.
352  *
353  * Returns:
354  * 0 on success, -ENOSPC if there's no suitable hole.
355  */
356 static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
357 					      struct drm_mm_node *node,
358 					      u64 size,
359 					      u64 alignment,
360 					      u64 start,
361 					      u64 end,
362 					      enum drm_mm_search_flags flags)
363 {
364 	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
365 						   0, start, end, flags,
366 						   DRM_MM_CREATE_DEFAULT);
367 }
368 
369 /**
370  * drm_mm_insert_node_generic - search for space and insert @node
371  * @mm: drm_mm to allocate from
372  * @node: preallocate node to insert
373  * @size: size of the allocation
374  * @alignment: alignment of the allocation
375  * @color: opaque tag value to use for this node
376  * @sflags: flags to fine-tune the allocation search
377  * @aflags: flags to fine-tune the allocation behavior
378  *
379  * This is a simplified version of drm_mm_insert_node_in_range_generic() with no
380  * range restrictions applied.
381  *
382  * The preallocated node must be cleared to 0.
383  *
384  * Returns:
385  * 0 on success, -ENOSPC if there's no suitable hole.
386  */
387 static inline int
388 drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
389 			   u64 size, u64 alignment,
390 			   unsigned long color,
391 			   enum drm_mm_search_flags sflags,
392 			   enum drm_mm_allocator_flags aflags)
393 {
394 	return drm_mm_insert_node_in_range_generic(mm, node,
395 						   size, alignment, 0,
396 						   0, U64_MAX,
397 						   sflags, aflags);
398 }
399 
400 /**
401  * drm_mm_insert_node - search for space and insert @node
402  * @mm: drm_mm to allocate from
403  * @node: preallocate node to insert
404  * @size: size of the allocation
405  * @alignment: alignment of the allocation
406  * @flags: flags to fine-tune the allocation
407  *
408  * This is a simplified version of drm_mm_insert_node_generic() with @color set
409  * to 0.
410  *
411  * The preallocated node must be cleared to 0.
412  *
413  * Returns:
414  * 0 on success, -ENOSPC if there's no suitable hole.
415  */
416 static inline int drm_mm_insert_node(struct drm_mm *mm,
417 				     struct drm_mm_node *node,
418 				     u64 size,
419 				     u64 alignment,
420 				     enum drm_mm_search_flags flags)
421 {
422 	return drm_mm_insert_node_generic(mm, node,
423 					  size, alignment, 0,
424 					  flags, DRM_MM_CREATE_DEFAULT);
425 }
426 
427 void drm_mm_remove_node(struct drm_mm_node *node);
428 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
429 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
430 void drm_mm_takedown(struct drm_mm *mm);
431 
432 /**
433  * drm_mm_clean - checks whether an allocator is clean
434  * @mm: drm_mm allocator to check
435  *
436  * Returns:
437  * True if the allocator is completely free, false if there's still a node
438  * allocated in it.
439  */
440 static inline bool drm_mm_clean(const struct drm_mm *mm)
441 {
442 	return list_empty(drm_mm_nodes(mm));
443 }
444 
445 struct drm_mm_node *
446 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
447 
448 /**
449  * drm_mm_for_each_node_in_range - iterator to walk over a range of
450  * allocated nodes
451  * @node__: drm_mm_node structure to assign to in each iteration step
452  * @mm__: drm_mm allocator to walk
453  * @start__: starting offset, the first node will overlap this
454  * @end__: ending offset, the last node will start before this (but may overlap)
455  *
456  * This iterator walks over all nodes in the range allocator that lie
457  * between @start and @end. It is implemented similarly to list_for_each(),
458  * but using the internal interval tree to accelerate the search for the
459  * starting node, and so not safe against removal of elements. It assumes
460  * that @end is within (or is the upper limit of) the drm_mm allocator.
461  */
462 #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__)	\
463 	for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
464 	     node__ && node__->start < (end__);				\
465 	     node__ = list_next_entry(node__, node_list))
466 
467 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
468 				 struct drm_mm *mm,
469 				 u64 size, u64 alignment, unsigned long color,
470 				 u64 start, u64 end,
471 				 unsigned int flags);
472 
473 /**
474  * drm_mm_scan_init - initialize lru scanning
475  * @scan: scan state
476  * @mm: drm_mm to scan
477  * @size: size of the allocation
478  * @alignment: alignment of the allocation
479  * @color: opaque tag value to use for the allocation
480  * @flags: flags to specify how the allocation will be performed afterwards
481  *
482  * This is a simplified version of drm_mm_scan_init_with_range() with no range
483  * restrictions applied.
484  *
485  * This simply sets up the scanning routines with the parameters for the desired
486  * hole.
487  *
488  * Warning:
489  * As long as the scan list is non-empty, no other operations than
490  * adding/removing nodes to/from the scan list are allowed.
491  */
492 static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
493 				    struct drm_mm *mm,
494 				    u64 size,
495 				    u64 alignment,
496 				    unsigned long color,
497 				    unsigned int flags)
498 {
499 	drm_mm_scan_init_with_range(scan, mm,
500 				    size, alignment, color,
501 				    0, U64_MAX,
502 				    flags);
503 }
504 
505 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
506 			   struct drm_mm_node *node);
507 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
508 			      struct drm_mm_node *node);
509 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan);
510 
511 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p);
512 
513 #endif
514