1 /************************************************************************** 2 * 3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * 27 **************************************************************************/ 28 /* 29 * Authors: 30 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 31 */ 32 33 #ifndef _DRM_MM_H_ 34 #define _DRM_MM_H_ 35 36 /* 37 * Generic range manager structs 38 */ 39 #include <linux/bug.h> 40 #include <linux/rbtree.h> 41 #include <linux/kernel.h> 42 #include <linux/list.h> 43 #include <linux/spinlock.h> 44 #ifdef CONFIG_DEBUG_FS 45 #include <linux/seq_file.h> 46 #endif 47 #ifdef CONFIG_DRM_DEBUG_MM 48 #include <linux/stackdepot.h> 49 #endif 50 51 enum drm_mm_search_flags { 52 DRM_MM_SEARCH_DEFAULT = 0, 53 DRM_MM_SEARCH_BEST = 1 << 0, 54 DRM_MM_SEARCH_BELOW = 1 << 1, 55 }; 56 57 enum drm_mm_allocator_flags { 58 DRM_MM_CREATE_DEFAULT = 0, 59 DRM_MM_CREATE_TOP = 1 << 0, 60 }; 61 62 #define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT 63 #define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP 64 65 struct drm_mm_node { 66 struct list_head node_list; 67 struct list_head hole_stack; 68 struct rb_node rb; 69 unsigned hole_follows : 1; 70 unsigned scanned_block : 1; 71 unsigned scanned_prev_free : 1; 72 unsigned scanned_next_free : 1; 73 unsigned scanned_preceeds_hole : 1; 74 unsigned allocated : 1; 75 unsigned long color; 76 u64 start; 77 u64 size; 78 u64 __subtree_last; 79 struct drm_mm *mm; 80 #ifdef CONFIG_DRM_DEBUG_MM 81 depot_stack_handle_t stack; 82 #endif 83 }; 84 85 struct drm_mm { 86 /* List of all memory nodes that immediately precede a free hole. */ 87 struct list_head hole_stack; 88 /* head_node.node_list is the list of all memory nodes, ordered 89 * according to the (increasing) start address of the memory node. */ 90 struct drm_mm_node head_node; 91 /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ 92 struct rb_root interval_tree; 93 94 unsigned int scan_check_range : 1; 95 unsigned scan_alignment; 96 unsigned long scan_color; 97 u64 scan_size; 98 u64 scan_hit_start; 99 u64 scan_hit_end; 100 unsigned scanned_blocks; 101 u64 scan_start; 102 u64 scan_end; 103 struct drm_mm_node *prev_scanned_node; 104 105 void (*color_adjust)(const struct drm_mm_node *node, 106 unsigned long color, 107 u64 *start, u64 *end); 108 }; 109 110 /** 111 * drm_mm_node_allocated - checks whether a node is allocated 112 * @node: drm_mm_node to check 113 * 114 * Drivers should use this helpers for proper encapusulation of drm_mm 115 * internals. 116 * 117 * Returns: 118 * True if the @node is allocated. 119 */ 120 static inline bool drm_mm_node_allocated(const struct drm_mm_node *node) 121 { 122 return node->allocated; 123 } 124 125 /** 126 * drm_mm_initialized - checks whether an allocator is initialized 127 * @mm: drm_mm to check 128 * 129 * Drivers should use this helpers for proper encapusulation of drm_mm 130 * internals. 131 * 132 * Returns: 133 * True if the @mm is initialized. 134 */ 135 static inline bool drm_mm_initialized(const struct drm_mm *mm) 136 { 137 return mm->hole_stack.next; 138 } 139 140 static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node) 141 { 142 return hole_node->start + hole_node->size; 143 } 144 145 /** 146 * drm_mm_hole_node_start - computes the start of the hole following @node 147 * @hole_node: drm_mm_node which implicitly tracks the following hole 148 * 149 * This is useful for driver-sepific debug dumpers. Otherwise drivers should not 150 * inspect holes themselves. Drivers must check first whether a hole indeed 151 * follows by looking at node->hole_follows. 152 * 153 * Returns: 154 * Start of the subsequent hole. 155 */ 156 static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node) 157 { 158 BUG_ON(!hole_node->hole_follows); 159 return __drm_mm_hole_node_start(hole_node); 160 } 161 162 static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node) 163 { 164 return list_next_entry(hole_node, node_list)->start; 165 } 166 167 /** 168 * drm_mm_hole_node_end - computes the end of the hole following @node 169 * @hole_node: drm_mm_node which implicitly tracks the following hole 170 * 171 * This is useful for driver-sepific debug dumpers. Otherwise drivers should not 172 * inspect holes themselves. Drivers must check first whether a hole indeed 173 * follows by looking at node->hole_follows. 174 * 175 * Returns: 176 * End of the subsequent hole. 177 */ 178 static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node) 179 { 180 return __drm_mm_hole_node_end(hole_node); 181 } 182 183 #define __drm_mm_nodes(mm) (&(mm)->head_node.node_list) 184 185 /** 186 * drm_mm_for_each_node - iterator to walk over all allocated nodes 187 * @entry: drm_mm_node structure to assign to in each iteration step 188 * @mm: drm_mm allocator to walk 189 * 190 * This iterator walks over all nodes in the range allocator. It is implemented 191 * with list_for_each, so not save against removal of elements. 192 */ 193 #define drm_mm_for_each_node(entry, mm) \ 194 list_for_each_entry(entry, __drm_mm_nodes(mm), node_list) 195 196 /** 197 * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes 198 * @entry: drm_mm_node structure to assign to in each iteration step 199 * @next: drm_mm_node structure to store the next step 200 * @mm: drm_mm allocator to walk 201 * 202 * This iterator walks over all nodes in the range allocator. It is implemented 203 * with list_for_each_safe, so save against removal of elements. 204 */ 205 #define drm_mm_for_each_node_safe(entry, next, mm) \ 206 list_for_each_entry_safe(entry, next, __drm_mm_nodes(mm), node_list) 207 208 #define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ 209 for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ 210 &entry->hole_stack != &(mm)->hole_stack ? \ 211 hole_start = drm_mm_hole_node_start(entry), \ 212 hole_end = drm_mm_hole_node_end(entry), \ 213 1 : 0; \ 214 entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack)) 215 216 /** 217 * drm_mm_for_each_hole - iterator to walk over all holes 218 * @entry: drm_mm_node used internally to track progress 219 * @mm: drm_mm allocator to walk 220 * @hole_start: ulong variable to assign the hole start to on each iteration 221 * @hole_end: ulong variable to assign the hole end to on each iteration 222 * 223 * This iterator walks over all holes in the range allocator. It is implemented 224 * with list_for_each, so not save against removal of elements. @entry is used 225 * internally and will not reflect a real drm_mm_node for the very first hole. 226 * Hence users of this iterator may not access it. 227 * 228 * Implementation Note: 229 * We need to inline list_for_each_entry in order to be able to set hole_start 230 * and hole_end on each iteration while keeping the macro sane. 231 * 232 * The __drm_mm_for_each_hole version is similar, but with added support for 233 * going backwards. 234 */ 235 #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ 236 __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0) 237 238 /* 239 * Basic range manager support (drm_mm.c) 240 */ 241 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); 242 243 int drm_mm_insert_node_generic(struct drm_mm *mm, 244 struct drm_mm_node *node, 245 u64 size, 246 unsigned alignment, 247 unsigned long color, 248 enum drm_mm_search_flags sflags, 249 enum drm_mm_allocator_flags aflags); 250 /** 251 * drm_mm_insert_node - search for space and insert @node 252 * @mm: drm_mm to allocate from 253 * @node: preallocate node to insert 254 * @size: size of the allocation 255 * @alignment: alignment of the allocation 256 * @flags: flags to fine-tune the allocation 257 * 258 * This is a simplified version of drm_mm_insert_node_generic() with @color set 259 * to 0. 260 * 261 * The preallocated node must be cleared to 0. 262 * 263 * Returns: 264 * 0 on success, -ENOSPC if there's no suitable hole. 265 */ 266 static inline int drm_mm_insert_node(struct drm_mm *mm, 267 struct drm_mm_node *node, 268 u64 size, 269 unsigned alignment, 270 enum drm_mm_search_flags flags) 271 { 272 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags, 273 DRM_MM_CREATE_DEFAULT); 274 } 275 276 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, 277 struct drm_mm_node *node, 278 u64 size, 279 unsigned alignment, 280 unsigned long color, 281 u64 start, 282 u64 end, 283 enum drm_mm_search_flags sflags, 284 enum drm_mm_allocator_flags aflags); 285 /** 286 * drm_mm_insert_node_in_range - ranged search for space and insert @node 287 * @mm: drm_mm to allocate from 288 * @node: preallocate node to insert 289 * @size: size of the allocation 290 * @alignment: alignment of the allocation 291 * @start: start of the allowed range for this node 292 * @end: end of the allowed range for this node 293 * @flags: flags to fine-tune the allocation 294 * 295 * This is a simplified version of drm_mm_insert_node_in_range_generic() with 296 * @color set to 0. 297 * 298 * The preallocated node must be cleared to 0. 299 * 300 * Returns: 301 * 0 on success, -ENOSPC if there's no suitable hole. 302 */ 303 static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, 304 struct drm_mm_node *node, 305 u64 size, 306 unsigned alignment, 307 u64 start, 308 u64 end, 309 enum drm_mm_search_flags flags) 310 { 311 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 312 0, start, end, flags, 313 DRM_MM_CREATE_DEFAULT); 314 } 315 316 void drm_mm_remove_node(struct drm_mm_node *node); 317 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); 318 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size); 319 void drm_mm_takedown(struct drm_mm *mm); 320 bool drm_mm_clean(const struct drm_mm *mm); 321 322 struct drm_mm_node * 323 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); 324 325 /** 326 * drm_mm_for_each_node_in_range - iterator to walk over a range of 327 * allocated nodes 328 * @node__: drm_mm_node structure to assign to in each iteration step 329 * @mm__: drm_mm allocator to walk 330 * @start__: starting offset, the first node will overlap this 331 * @end__: ending offset, the last node will start before this (but may overlap) 332 * 333 * This iterator walks over all nodes in the range allocator that lie 334 * between @start and @end. It is implemented similarly to list_for_each(), 335 * but using the internal interval tree to accelerate the search for the 336 * starting node, and so not safe against removal of elements. It assumes 337 * that @end is within (or is the upper limit of) the drm_mm allocator. 338 */ 339 #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \ 340 for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \ 341 node__ && node__->start < (end__); \ 342 node__ = list_next_entry(node__, node_list)) 343 344 void drm_mm_init_scan(struct drm_mm *mm, 345 u64 size, 346 unsigned alignment, 347 unsigned long color); 348 void drm_mm_init_scan_with_range(struct drm_mm *mm, 349 u64 size, 350 unsigned alignment, 351 unsigned long color, 352 u64 start, 353 u64 end); 354 bool drm_mm_scan_add_block(struct drm_mm_node *node); 355 bool drm_mm_scan_remove_block(struct drm_mm_node *node); 356 357 void drm_mm_debug_table(const struct drm_mm *mm, const char *prefix); 358 #ifdef CONFIG_DEBUG_FS 359 int drm_mm_dump_table(struct seq_file *m, const struct drm_mm *mm); 360 #endif 361 362 #endif 363