xref: /openbmc/linux/include/drm/drm_mm.h (revision 9a71e277)
1 /**************************************************************************
2  *
3  * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
4  * Copyright 2016 Intel Corporation
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  *
28  **************************************************************************/
29 /*
30  * Authors:
31  * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32  */
33 
34 #ifndef _DRM_MM_H_
35 #define _DRM_MM_H_
36 
37 /*
38  * Generic range manager structs
39  */
40 #include <linux/bug.h>
41 #include <linux/rbtree.h>
42 #include <linux/kernel.h>
43 #include <linux/list.h>
44 #include <linux/spinlock.h>
45 #ifdef CONFIG_DEBUG_FS
46 #include <linux/seq_file.h>
47 #endif
48 #ifdef CONFIG_DRM_DEBUG_MM
49 #include <linux/stackdepot.h>
50 #endif
51 
52 #ifdef CONFIG_DRM_DEBUG_MM
53 #define DRM_MM_BUG_ON(expr) BUG_ON(expr)
54 #else
55 #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
56 #endif
57 
58 enum drm_mm_search_flags {
59 	DRM_MM_SEARCH_DEFAULT =		0,
60 	DRM_MM_SEARCH_BEST =		1 << 0,
61 	DRM_MM_SEARCH_BELOW =		1 << 1,
62 };
63 
64 enum drm_mm_allocator_flags {
65 	DRM_MM_CREATE_DEFAULT =		0,
66 	DRM_MM_CREATE_TOP =		1 << 0,
67 };
68 
69 #define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
70 #define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
71 
72 struct drm_mm_node {
73 	struct list_head node_list;
74 	struct list_head hole_stack;
75 	struct rb_node rb;
76 	unsigned hole_follows : 1;
77 	unsigned scanned_block : 1;
78 	unsigned scanned_prev_free : 1;
79 	unsigned scanned_next_free : 1;
80 	unsigned scanned_preceeds_hole : 1;
81 	unsigned allocated : 1;
82 	unsigned long color;
83 	u64 start;
84 	u64 size;
85 	u64 __subtree_last;
86 	struct drm_mm *mm;
87 #ifdef CONFIG_DRM_DEBUG_MM
88 	depot_stack_handle_t stack;
89 #endif
90 };
91 
92 struct drm_mm {
93 	/* List of all memory nodes that immediately precede a free hole. */
94 	struct list_head hole_stack;
95 	/* head_node.node_list is the list of all memory nodes, ordered
96 	 * according to the (increasing) start address of the memory node. */
97 	struct drm_mm_node head_node;
98 	/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
99 	struct rb_root interval_tree;
100 
101 	void (*color_adjust)(const struct drm_mm_node *node,
102 			     unsigned long color,
103 			     u64 *start, u64 *end);
104 
105 	unsigned long scan_active;
106 };
107 
108 struct drm_mm_scan {
109 	struct drm_mm *mm;
110 
111 	u64 size;
112 	u64 alignment;
113 
114 	u64 range_start;
115 	u64 range_end;
116 
117 	u64 hit_start;
118 	u64 hit_end;
119 
120 	struct drm_mm_node *prev_scanned_node;
121 
122 	unsigned long color;
123 	bool check_range : 1;
124 };
125 
126 /**
127  * drm_mm_node_allocated - checks whether a node is allocated
128  * @node: drm_mm_node to check
129  *
130  * Drivers are required to clear a node prior to using it with the
131  * drm_mm range manager.
132  *
133  * Drivers should use this helper for proper encapsulation of drm_mm
134  * internals.
135  *
136  * Returns:
137  * True if the @node is allocated.
138  */
139 static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
140 {
141 	return node->allocated;
142 }
143 
144 /**
145  * drm_mm_initialized - checks whether an allocator is initialized
146  * @mm: drm_mm to check
147  *
148  * Drivers should clear the struct drm_mm prior to initialisation if they
149  * want to use this function.
150  *
151  * Drivers should use this helper for proper encapsulation of drm_mm
152  * internals.
153  *
154  * Returns:
155  * True if the @mm is initialized.
156  */
157 static inline bool drm_mm_initialized(const struct drm_mm *mm)
158 {
159 	return mm->hole_stack.next;
160 }
161 
162 static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
163 {
164 	return hole_node->start + hole_node->size;
165 }
166 
167 /**
168  * drm_mm_hole_node_start - computes the start of the hole following @node
169  * @hole_node: drm_mm_node which implicitly tracks the following hole
170  *
171  * This is useful for driver-specific debug dumpers. Otherwise drivers should
172  * not inspect holes themselves. Drivers must check first whether a hole indeed
173  * follows by looking at node->hole_follows.
174  *
175  * Returns:
176  * Start of the subsequent hole.
177  */
178 static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
179 {
180 	DRM_MM_BUG_ON(!hole_node->hole_follows);
181 	return __drm_mm_hole_node_start(hole_node);
182 }
183 
184 static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
185 {
186 	return list_next_entry(hole_node, node_list)->start;
187 }
188 
189 /**
190  * drm_mm_hole_node_end - computes the end of the hole following @node
191  * @hole_node: drm_mm_node which implicitly tracks the following hole
192  *
193  * This is useful for driver-specific debug dumpers. Otherwise drivers should
194  * not inspect holes themselves. Drivers must check first whether a hole indeed
195  * follows by looking at node->hole_follows.
196  *
197  * Returns:
198  * End of the subsequent hole.
199  */
200 static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
201 {
202 	return __drm_mm_hole_node_end(hole_node);
203 }
204 
205 /**
206  * drm_mm_nodes - list of nodes under the drm_mm range manager
207  * @mm: the struct drm_mm range manger
208  *
209  * As the drm_mm range manager hides its node_list deep with its
210  * structure, extracting it looks painful and repetitive. This is
211  * not expected to be used outside of the drm_mm_for_each_node()
212  * macros and similar internal functions.
213  *
214  * Returns:
215  * The node list, may be empty.
216  */
217 #define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
218 
219 /**
220  * drm_mm_for_each_node - iterator to walk over all allocated nodes
221  * @entry: drm_mm_node structure to assign to in each iteration step
222  * @mm: drm_mm allocator to walk
223  *
224  * This iterator walks over all nodes in the range allocator. It is implemented
225  * with list_for_each, so not save against removal of elements.
226  */
227 #define drm_mm_for_each_node(entry, mm) \
228 	list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
229 
230 /**
231  * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
232  * @entry: drm_mm_node structure to assign to in each iteration step
233  * @next: drm_mm_node structure to store the next step
234  * @mm: drm_mm allocator to walk
235  *
236  * This iterator walks over all nodes in the range allocator. It is implemented
237  * with list_for_each_safe, so save against removal of elements.
238  */
239 #define drm_mm_for_each_node_safe(entry, next, mm) \
240 	list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
241 
242 #define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
243 	for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
244 	     &entry->hole_stack != &(mm)->hole_stack ? \
245 	     hole_start = drm_mm_hole_node_start(entry), \
246 	     hole_end = drm_mm_hole_node_end(entry), \
247 	     1 : 0; \
248 	     entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
249 
250 /**
251  * drm_mm_for_each_hole - iterator to walk over all holes
252  * @entry: drm_mm_node used internally to track progress
253  * @mm: drm_mm allocator to walk
254  * @hole_start: ulong variable to assign the hole start to on each iteration
255  * @hole_end: ulong variable to assign the hole end to on each iteration
256  *
257  * This iterator walks over all holes in the range allocator. It is implemented
258  * with list_for_each, so not save against removal of elements. @entry is used
259  * internally and will not reflect a real drm_mm_node for the very first hole.
260  * Hence users of this iterator may not access it.
261  *
262  * Implementation Note:
263  * We need to inline list_for_each_entry in order to be able to set hole_start
264  * and hole_end on each iteration while keeping the macro sane.
265  *
266  * The __drm_mm_for_each_hole version is similar, but with added support for
267  * going backwards.
268  */
269 #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
270 	__drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0)
271 
272 /*
273  * Basic range manager support (drm_mm.c)
274  */
275 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
276 
277 int drm_mm_insert_node_generic(struct drm_mm *mm,
278 			       struct drm_mm_node *node,
279 			       u64 size,
280 			       u64 alignment,
281 			       unsigned long color,
282 			       enum drm_mm_search_flags sflags,
283 			       enum drm_mm_allocator_flags aflags);
284 /**
285  * drm_mm_insert_node - search for space and insert @node
286  * @mm: drm_mm to allocate from
287  * @node: preallocate node to insert
288  * @size: size of the allocation
289  * @alignment: alignment of the allocation
290  * @flags: flags to fine-tune the allocation
291  *
292  * This is a simplified version of drm_mm_insert_node_generic() with @color set
293  * to 0.
294  *
295  * The preallocated node must be cleared to 0.
296  *
297  * Returns:
298  * 0 on success, -ENOSPC if there's no suitable hole.
299  */
300 static inline int drm_mm_insert_node(struct drm_mm *mm,
301 				     struct drm_mm_node *node,
302 				     u64 size,
303 				     u64 alignment,
304 				     enum drm_mm_search_flags flags)
305 {
306 	return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
307 					  DRM_MM_CREATE_DEFAULT);
308 }
309 
310 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
311 					struct drm_mm_node *node,
312 					u64 size,
313 					u64 alignment,
314 					unsigned long color,
315 					u64 start,
316 					u64 end,
317 					enum drm_mm_search_flags sflags,
318 					enum drm_mm_allocator_flags aflags);
319 /**
320  * drm_mm_insert_node_in_range - ranged search for space and insert @node
321  * @mm: drm_mm to allocate from
322  * @node: preallocate node to insert
323  * @size: size of the allocation
324  * @alignment: alignment of the allocation
325  * @start: start of the allowed range for this node
326  * @end: end of the allowed range for this node
327  * @flags: flags to fine-tune the allocation
328  *
329  * This is a simplified version of drm_mm_insert_node_in_range_generic() with
330  * @color set to 0.
331  *
332  * The preallocated node must be cleared to 0.
333  *
334  * Returns:
335  * 0 on success, -ENOSPC if there's no suitable hole.
336  */
337 static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
338 					      struct drm_mm_node *node,
339 					      u64 size,
340 					      u64 alignment,
341 					      u64 start,
342 					      u64 end,
343 					      enum drm_mm_search_flags flags)
344 {
345 	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
346 						   0, start, end, flags,
347 						   DRM_MM_CREATE_DEFAULT);
348 }
349 
350 void drm_mm_remove_node(struct drm_mm_node *node);
351 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
352 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
353 void drm_mm_takedown(struct drm_mm *mm);
354 
355 /**
356  * drm_mm_clean - checks whether an allocator is clean
357  * @mm: drm_mm allocator to check
358  *
359  * Returns:
360  * True if the allocator is completely free, false if there's still a node
361  * allocated in it.
362  */
363 static inline bool drm_mm_clean(const struct drm_mm *mm)
364 {
365 	return list_empty(drm_mm_nodes(mm));
366 }
367 
368 struct drm_mm_node *
369 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
370 
371 /**
372  * drm_mm_for_each_node_in_range - iterator to walk over a range of
373  * allocated nodes
374  * @node__: drm_mm_node structure to assign to in each iteration step
375  * @mm__: drm_mm allocator to walk
376  * @start__: starting offset, the first node will overlap this
377  * @end__: ending offset, the last node will start before this (but may overlap)
378  *
379  * This iterator walks over all nodes in the range allocator that lie
380  * between @start and @end. It is implemented similarly to list_for_each(),
381  * but using the internal interval tree to accelerate the search for the
382  * starting node, and so not safe against removal of elements. It assumes
383  * that @end is within (or is the upper limit of) the drm_mm allocator.
384  */
385 #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__)	\
386 	for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
387 	     node__ && node__->start < (end__);				\
388 	     node__ = list_next_entry(node__, node_list))
389 
390 void drm_mm_scan_init(struct drm_mm_scan *scan,
391 		      struct drm_mm *mm,
392 		      u64 size,
393 		      u64 alignment,
394 		      unsigned long color);
395 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
396 				 struct drm_mm *mm,
397 				 u64 size,
398 				 u64 alignment,
399 				 unsigned long color,
400 				 u64 start,
401 				 u64 end);
402 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
403 			   struct drm_mm_node *node);
404 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
405 			      struct drm_mm_node *node);
406 
407 void drm_mm_debug_table(const struct drm_mm *mm, const char *prefix);
408 #ifdef CONFIG_DEBUG_FS
409 int drm_mm_dump_table(struct seq_file *m, const struct drm_mm *mm);
410 #endif
411 
412 #endif
413