1 /************************************************************************** 2 * 3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. 4 * Copyright 2016 Intel Corporation 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 * 28 **************************************************************************/ 29 /* 30 * Authors: 31 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 32 */ 33 34 #ifndef _DRM_MM_H_ 35 #define _DRM_MM_H_ 36 37 /* 38 * Generic range manager structs 39 */ 40 #include <linux/bug.h> 41 #include <linux/rbtree.h> 42 #include <linux/kernel.h> 43 #include <linux/list.h> 44 #include <linux/spinlock.h> 45 #ifdef CONFIG_DEBUG_FS 46 #include <linux/seq_file.h> 47 #endif 48 #ifdef CONFIG_DRM_DEBUG_MM 49 #include <linux/stackdepot.h> 50 #endif 51 52 #ifdef CONFIG_DRM_DEBUG_MM 53 #define DRM_MM_BUG_ON(expr) BUG_ON(expr) 54 #else 55 #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) 56 #endif 57 58 enum drm_mm_search_flags { 59 DRM_MM_SEARCH_DEFAULT = 0, 60 DRM_MM_SEARCH_BEST = 1 << 0, 61 DRM_MM_SEARCH_BELOW = 1 << 1, 62 }; 63 64 enum drm_mm_allocator_flags { 65 DRM_MM_CREATE_DEFAULT = 0, 66 DRM_MM_CREATE_TOP = 1 << 0, 67 }; 68 69 #define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT 70 #define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP 71 72 struct drm_mm_node { 73 struct list_head node_list; 74 struct list_head hole_stack; 75 struct rb_node rb; 76 unsigned hole_follows : 1; 77 unsigned scanned_block : 1; 78 unsigned scanned_prev_free : 1; 79 unsigned scanned_next_free : 1; 80 unsigned scanned_preceeds_hole : 1; 81 unsigned allocated : 1; 82 unsigned long color; 83 u64 start; 84 u64 size; 85 u64 __subtree_last; 86 struct drm_mm *mm; 87 #ifdef CONFIG_DRM_DEBUG_MM 88 depot_stack_handle_t stack; 89 #endif 90 }; 91 92 struct drm_mm { 93 /* List of all memory nodes that immediately precede a free hole. */ 94 struct list_head hole_stack; 95 /* head_node.node_list is the list of all memory nodes, ordered 96 * according to the (increasing) start address of the memory node. */ 97 struct drm_mm_node head_node; 98 /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ 99 struct rb_root interval_tree; 100 101 unsigned int scan_check_range : 1; 102 unsigned int scanned_blocks; 103 unsigned long scan_color; 104 u64 scan_alignment; 105 u64 scan_size; 106 u64 scan_hit_start; 107 u64 scan_hit_end; 108 u64 scan_start; 109 u64 scan_end; 110 struct drm_mm_node *prev_scanned_node; 111 112 void (*color_adjust)(const struct drm_mm_node *node, 113 unsigned long color, 114 u64 *start, u64 *end); 115 }; 116 117 /** 118 * drm_mm_node_allocated - checks whether a node is allocated 119 * @node: drm_mm_node to check 120 * 121 * Drivers are required to clear a node prior to using it with the 122 * drm_mm range manager. 123 * 124 * Drivers should use this helper for proper encapsulation of drm_mm 125 * internals. 126 * 127 * Returns: 128 * True if the @node is allocated. 129 */ 130 static inline bool drm_mm_node_allocated(const struct drm_mm_node *node) 131 { 132 return node->allocated; 133 } 134 135 /** 136 * drm_mm_initialized - checks whether an allocator is initialized 137 * @mm: drm_mm to check 138 * 139 * Drivers should clear the struct drm_mm prior to initialisation if they 140 * want to use this function. 141 * 142 * Drivers should use this helper for proper encapsulation of drm_mm 143 * internals. 144 * 145 * Returns: 146 * True if the @mm is initialized. 147 */ 148 static inline bool drm_mm_initialized(const struct drm_mm *mm) 149 { 150 return mm->hole_stack.next; 151 } 152 153 static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node) 154 { 155 return hole_node->start + hole_node->size; 156 } 157 158 /** 159 * drm_mm_hole_node_start - computes the start of the hole following @node 160 * @hole_node: drm_mm_node which implicitly tracks the following hole 161 * 162 * This is useful for driver-specific debug dumpers. Otherwise drivers should 163 * not inspect holes themselves. Drivers must check first whether a hole indeed 164 * follows by looking at node->hole_follows. 165 * 166 * Returns: 167 * Start of the subsequent hole. 168 */ 169 static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node) 170 { 171 DRM_MM_BUG_ON(!hole_node->hole_follows); 172 return __drm_mm_hole_node_start(hole_node); 173 } 174 175 static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node) 176 { 177 return list_next_entry(hole_node, node_list)->start; 178 } 179 180 /** 181 * drm_mm_hole_node_end - computes the end of the hole following @node 182 * @hole_node: drm_mm_node which implicitly tracks the following hole 183 * 184 * This is useful for driver-specific debug dumpers. Otherwise drivers should 185 * not inspect holes themselves. Drivers must check first whether a hole indeed 186 * follows by looking at node->hole_follows. 187 * 188 * Returns: 189 * End of the subsequent hole. 190 */ 191 static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node) 192 { 193 return __drm_mm_hole_node_end(hole_node); 194 } 195 196 /** 197 * drm_mm_nodes - list of nodes under the drm_mm range manager 198 * @mm: the struct drm_mm range manger 199 * 200 * As the drm_mm range manager hides its node_list deep with its 201 * structure, extracting it looks painful and repetitive. This is 202 * not expected to be used outside of the drm_mm_for_each_node() 203 * macros and similar internal functions. 204 * 205 * Returns: 206 * The node list, may be empty. 207 */ 208 #define drm_mm_nodes(mm) (&(mm)->head_node.node_list) 209 210 /** 211 * drm_mm_for_each_node - iterator to walk over all allocated nodes 212 * @entry: drm_mm_node structure to assign to in each iteration step 213 * @mm: drm_mm allocator to walk 214 * 215 * This iterator walks over all nodes in the range allocator. It is implemented 216 * with list_for_each, so not save against removal of elements. 217 */ 218 #define drm_mm_for_each_node(entry, mm) \ 219 list_for_each_entry(entry, drm_mm_nodes(mm), node_list) 220 221 /** 222 * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes 223 * @entry: drm_mm_node structure to assign to in each iteration step 224 * @next: drm_mm_node structure to store the next step 225 * @mm: drm_mm allocator to walk 226 * 227 * This iterator walks over all nodes in the range allocator. It is implemented 228 * with list_for_each_safe, so save against removal of elements. 229 */ 230 #define drm_mm_for_each_node_safe(entry, next, mm) \ 231 list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list) 232 233 #define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ 234 for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ 235 &entry->hole_stack != &(mm)->hole_stack ? \ 236 hole_start = drm_mm_hole_node_start(entry), \ 237 hole_end = drm_mm_hole_node_end(entry), \ 238 1 : 0; \ 239 entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack)) 240 241 /** 242 * drm_mm_for_each_hole - iterator to walk over all holes 243 * @entry: drm_mm_node used internally to track progress 244 * @mm: drm_mm allocator to walk 245 * @hole_start: ulong variable to assign the hole start to on each iteration 246 * @hole_end: ulong variable to assign the hole end to on each iteration 247 * 248 * This iterator walks over all holes in the range allocator. It is implemented 249 * with list_for_each, so not save against removal of elements. @entry is used 250 * internally and will not reflect a real drm_mm_node for the very first hole. 251 * Hence users of this iterator may not access it. 252 * 253 * Implementation Note: 254 * We need to inline list_for_each_entry in order to be able to set hole_start 255 * and hole_end on each iteration while keeping the macro sane. 256 * 257 * The __drm_mm_for_each_hole version is similar, but with added support for 258 * going backwards. 259 */ 260 #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ 261 __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0) 262 263 /* 264 * Basic range manager support (drm_mm.c) 265 */ 266 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); 267 268 int drm_mm_insert_node_generic(struct drm_mm *mm, 269 struct drm_mm_node *node, 270 u64 size, 271 u64 alignment, 272 unsigned long color, 273 enum drm_mm_search_flags sflags, 274 enum drm_mm_allocator_flags aflags); 275 /** 276 * drm_mm_insert_node - search for space and insert @node 277 * @mm: drm_mm to allocate from 278 * @node: preallocate node to insert 279 * @size: size of the allocation 280 * @alignment: alignment of the allocation 281 * @flags: flags to fine-tune the allocation 282 * 283 * This is a simplified version of drm_mm_insert_node_generic() with @color set 284 * to 0. 285 * 286 * The preallocated node must be cleared to 0. 287 * 288 * Returns: 289 * 0 on success, -ENOSPC if there's no suitable hole. 290 */ 291 static inline int drm_mm_insert_node(struct drm_mm *mm, 292 struct drm_mm_node *node, 293 u64 size, 294 u64 alignment, 295 enum drm_mm_search_flags flags) 296 { 297 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags, 298 DRM_MM_CREATE_DEFAULT); 299 } 300 301 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, 302 struct drm_mm_node *node, 303 u64 size, 304 u64 alignment, 305 unsigned long color, 306 u64 start, 307 u64 end, 308 enum drm_mm_search_flags sflags, 309 enum drm_mm_allocator_flags aflags); 310 /** 311 * drm_mm_insert_node_in_range - ranged search for space and insert @node 312 * @mm: drm_mm to allocate from 313 * @node: preallocate node to insert 314 * @size: size of the allocation 315 * @alignment: alignment of the allocation 316 * @start: start of the allowed range for this node 317 * @end: end of the allowed range for this node 318 * @flags: flags to fine-tune the allocation 319 * 320 * This is a simplified version of drm_mm_insert_node_in_range_generic() with 321 * @color set to 0. 322 * 323 * The preallocated node must be cleared to 0. 324 * 325 * Returns: 326 * 0 on success, -ENOSPC if there's no suitable hole. 327 */ 328 static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, 329 struct drm_mm_node *node, 330 u64 size, 331 u64 alignment, 332 u64 start, 333 u64 end, 334 enum drm_mm_search_flags flags) 335 { 336 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 337 0, start, end, flags, 338 DRM_MM_CREATE_DEFAULT); 339 } 340 341 void drm_mm_remove_node(struct drm_mm_node *node); 342 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); 343 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size); 344 void drm_mm_takedown(struct drm_mm *mm); 345 bool drm_mm_clean(const struct drm_mm *mm); 346 347 struct drm_mm_node * 348 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); 349 350 /** 351 * drm_mm_for_each_node_in_range - iterator to walk over a range of 352 * allocated nodes 353 * @node__: drm_mm_node structure to assign to in each iteration step 354 * @mm__: drm_mm allocator to walk 355 * @start__: starting offset, the first node will overlap this 356 * @end__: ending offset, the last node will start before this (but may overlap) 357 * 358 * This iterator walks over all nodes in the range allocator that lie 359 * between @start and @end. It is implemented similarly to list_for_each(), 360 * but using the internal interval tree to accelerate the search for the 361 * starting node, and so not safe against removal of elements. It assumes 362 * that @end is within (or is the upper limit of) the drm_mm allocator. 363 */ 364 #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \ 365 for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \ 366 node__ && node__->start < (end__); \ 367 node__ = list_next_entry(node__, node_list)) 368 369 void drm_mm_init_scan(struct drm_mm *mm, 370 u64 size, 371 u64 alignment, 372 unsigned long color); 373 void drm_mm_init_scan_with_range(struct drm_mm *mm, 374 u64 size, 375 u64 alignment, 376 unsigned long color, 377 u64 start, 378 u64 end); 379 bool drm_mm_scan_add_block(struct drm_mm_node *node); 380 bool drm_mm_scan_remove_block(struct drm_mm_node *node); 381 382 void drm_mm_debug_table(const struct drm_mm *mm, const char *prefix); 383 #ifdef CONFIG_DEBUG_FS 384 int drm_mm_dump_table(struct seq_file *m, const struct drm_mm *mm); 385 #endif 386 387 #endif 388