1249d6048SJerome Glisse /************************************************************************** 2249d6048SJerome Glisse * 3249d6048SJerome Glisse * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. 4249d6048SJerome Glisse * All Rights Reserved. 5249d6048SJerome Glisse * 6249d6048SJerome Glisse * Permission is hereby granted, free of charge, to any person obtaining a 7249d6048SJerome Glisse * copy of this software and associated documentation files (the 8249d6048SJerome Glisse * "Software"), to deal in the Software without restriction, including 9249d6048SJerome Glisse * without limitation the rights to use, copy, modify, merge, publish, 10249d6048SJerome Glisse * distribute, sub license, and/or sell copies of the Software, and to 11249d6048SJerome Glisse * permit persons to whom the Software is furnished to do so, subject to 12249d6048SJerome Glisse * the following conditions: 13249d6048SJerome Glisse * 14249d6048SJerome Glisse * The above copyright notice and this permission notice (including the 15249d6048SJerome Glisse * next paragraph) shall be included in all copies or substantial portions 16249d6048SJerome Glisse * of the Software. 17249d6048SJerome Glisse * 18249d6048SJerome Glisse * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19249d6048SJerome Glisse * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20249d6048SJerome Glisse * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21249d6048SJerome Glisse * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22249d6048SJerome Glisse * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23249d6048SJerome Glisse * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24249d6048SJerome Glisse * USE OR OTHER DEALINGS IN THE SOFTWARE. 25249d6048SJerome Glisse * 26249d6048SJerome Glisse * 27249d6048SJerome Glisse **************************************************************************/ 28249d6048SJerome Glisse /* 29249d6048SJerome Glisse * Authors: 30249d6048SJerome Glisse * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 31249d6048SJerome Glisse */ 32249d6048SJerome Glisse 33249d6048SJerome Glisse #ifndef _DRM_MM_H_ 34249d6048SJerome Glisse #define _DRM_MM_H_ 35249d6048SJerome Glisse 36249d6048SJerome Glisse /* 37249d6048SJerome Glisse * Generic range manager structs 38249d6048SJerome Glisse */ 3986e81f0eSDavid Herrmann #include <linux/bug.h> 40202b52b7SChris Wilson #include <linux/rbtree.h> 4186e81f0eSDavid Herrmann #include <linux/kernel.h> 42249d6048SJerome Glisse #include <linux/list.h> 4386e81f0eSDavid Herrmann #include <linux/spinlock.h> 44f1938cd6SDave Airlie #ifdef CONFIG_DEBUG_FS 45f1938cd6SDave Airlie #include <linux/seq_file.h> 46f1938cd6SDave Airlie #endif 475705670dSChris Wilson #ifdef CONFIG_DRM_DEBUG_MM 485705670dSChris Wilson #include <linux/stackdepot.h> 495705670dSChris Wilson #endif 50249d6048SJerome Glisse 51b3ee963fSChris Wilson #ifdef CONFIG_DRM_DEBUG_MM 52b3ee963fSChris Wilson #define DRM_MM_BUG_ON(expr) BUG_ON(expr) 53b3ee963fSChris Wilson #else 54b3ee963fSChris Wilson #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) 55b3ee963fSChris Wilson #endif 56b3ee963fSChris Wilson 5731e5d7c6SDavid Herrmann enum drm_mm_search_flags { 5831e5d7c6SDavid Herrmann DRM_MM_SEARCH_DEFAULT = 0, 5931e5d7c6SDavid Herrmann DRM_MM_SEARCH_BEST = 1 << 0, 6062347f9eSLauri Kasanen DRM_MM_SEARCH_BELOW = 1 << 1, 6131e5d7c6SDavid Herrmann }; 6231e5d7c6SDavid Herrmann 6362347f9eSLauri Kasanen enum drm_mm_allocator_flags { 6462347f9eSLauri Kasanen DRM_MM_CREATE_DEFAULT = 0, 6562347f9eSLauri Kasanen DRM_MM_CREATE_TOP = 1 << 0, 6662347f9eSLauri Kasanen }; 6762347f9eSLauri Kasanen 6862347f9eSLauri Kasanen #define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT 6962347f9eSLauri Kasanen #define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP 7062347f9eSLauri Kasanen 71249d6048SJerome Glisse struct drm_mm_node { 72d1024ce9SDaniel Vetter struct list_head node_list; 73ea7b1dd4SDaniel Vetter struct list_head hole_stack; 74202b52b7SChris Wilson struct rb_node rb; 75ea7b1dd4SDaniel Vetter unsigned hole_follows : 1; 76709ea971SDaniel Vetter unsigned scanned_block : 1; 77709ea971SDaniel Vetter unsigned scanned_prev_free : 1; 78709ea971SDaniel Vetter unsigned scanned_next_free : 1; 79ea7b1dd4SDaniel Vetter unsigned scanned_preceeds_hole : 1; 80b0b7af18SDaniel Vetter unsigned allocated : 1; 816b9d89b4SChris Wilson unsigned long color; 82440fd528SThierry Reding u64 start; 83440fd528SThierry Reding u64 size; 84202b52b7SChris Wilson u64 __subtree_last; 85249d6048SJerome Glisse struct drm_mm *mm; 865705670dSChris Wilson #ifdef CONFIG_DRM_DEBUG_MM 875705670dSChris Wilson depot_stack_handle_t stack; 885705670dSChris Wilson #endif 89249d6048SJerome Glisse }; 90249d6048SJerome Glisse 91249d6048SJerome Glisse struct drm_mm { 9225985edcSLucas De Marchi /* List of all memory nodes that immediately precede a free hole. */ 93ea7b1dd4SDaniel Vetter struct list_head hole_stack; 94ea7b1dd4SDaniel Vetter /* head_node.node_list is the list of all memory nodes, ordered 95ea7b1dd4SDaniel Vetter * according to the (increasing) start address of the memory node. */ 96ea7b1dd4SDaniel Vetter struct drm_mm_node head_node; 97202b52b7SChris Wilson /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ 98202b52b7SChris Wilson struct rb_root interval_tree; 99202b52b7SChris Wilson 100d935cc61SDaniel Vetter unsigned int scan_check_range : 1; 10171733207SChris Wilson unsigned int scanned_blocks; 1026b9d89b4SChris Wilson unsigned long scan_color; 10371733207SChris Wilson u64 scan_alignment; 104440fd528SThierry Reding u64 scan_size; 105440fd528SThierry Reding u64 scan_hit_start; 106440fd528SThierry Reding u64 scan_hit_end; 107440fd528SThierry Reding u64 scan_start; 108440fd528SThierry Reding u64 scan_end; 109ae0cec28SDaniel Vetter struct drm_mm_node *prev_scanned_node; 1106b9d89b4SChris Wilson 11145b186f1SChris Wilson void (*color_adjust)(const struct drm_mm_node *node, 11245b186f1SChris Wilson unsigned long color, 113440fd528SThierry Reding u64 *start, u64 *end); 114249d6048SJerome Glisse }; 115249d6048SJerome Glisse 116e18c0412SDaniel Vetter /** 117e18c0412SDaniel Vetter * drm_mm_node_allocated - checks whether a node is allocated 118e18c0412SDaniel Vetter * @node: drm_mm_node to check 119e18c0412SDaniel Vetter * 120e18c0412SDaniel Vetter * Drivers should use this helpers for proper encapusulation of drm_mm 121e18c0412SDaniel Vetter * internals. 122e18c0412SDaniel Vetter * 123e18c0412SDaniel Vetter * Returns: 124e18c0412SDaniel Vetter * True if the @node is allocated. 125e18c0412SDaniel Vetter */ 12645b186f1SChris Wilson static inline bool drm_mm_node_allocated(const struct drm_mm_node *node) 127b0b7af18SDaniel Vetter { 128b0b7af18SDaniel Vetter return node->allocated; 129b0b7af18SDaniel Vetter } 130b0b7af18SDaniel Vetter 131e18c0412SDaniel Vetter /** 132e18c0412SDaniel Vetter * drm_mm_initialized - checks whether an allocator is initialized 133e18c0412SDaniel Vetter * @mm: drm_mm to check 134e18c0412SDaniel Vetter * 135e18c0412SDaniel Vetter * Drivers should use this helpers for proper encapusulation of drm_mm 136e18c0412SDaniel Vetter * internals. 137e18c0412SDaniel Vetter * 138e18c0412SDaniel Vetter * Returns: 139e18c0412SDaniel Vetter * True if the @mm is initialized. 140e18c0412SDaniel Vetter */ 14145b186f1SChris Wilson static inline bool drm_mm_initialized(const struct drm_mm *mm) 14231a5b8ceSDaniel Vetter { 143ea7b1dd4SDaniel Vetter return mm->hole_stack.next; 14431a5b8ceSDaniel Vetter } 1459e8944abSChris Wilson 14645b186f1SChris Wilson static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node) 1479e8944abSChris Wilson { 1489e8944abSChris Wilson return hole_node->start + hole_node->size; 1499e8944abSChris Wilson } 1509e8944abSChris Wilson 151e18c0412SDaniel Vetter /** 152e18c0412SDaniel Vetter * drm_mm_hole_node_start - computes the start of the hole following @node 153e18c0412SDaniel Vetter * @hole_node: drm_mm_node which implicitly tracks the following hole 154e18c0412SDaniel Vetter * 155e18c0412SDaniel Vetter * This is useful for driver-sepific debug dumpers. Otherwise drivers should not 156e18c0412SDaniel Vetter * inspect holes themselves. Drivers must check first whether a hole indeed 157e18c0412SDaniel Vetter * follows by looking at node->hole_follows. 158e18c0412SDaniel Vetter * 159e18c0412SDaniel Vetter * Returns: 160e18c0412SDaniel Vetter * Start of the subsequent hole. 161e18c0412SDaniel Vetter */ 16245b186f1SChris Wilson static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node) 1639e8944abSChris Wilson { 164b3ee963fSChris Wilson DRM_MM_BUG_ON(!hole_node->hole_follows); 1659e8944abSChris Wilson return __drm_mm_hole_node_start(hole_node); 1669e8944abSChris Wilson } 1679e8944abSChris Wilson 16845b186f1SChris Wilson static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node) 1699e8944abSChris Wilson { 17087069f44SGeliang Tang return list_next_entry(hole_node, node_list)->start; 1719e8944abSChris Wilson } 1729e8944abSChris Wilson 173e18c0412SDaniel Vetter /** 174e18c0412SDaniel Vetter * drm_mm_hole_node_end - computes the end of the hole following @node 175e18c0412SDaniel Vetter * @hole_node: drm_mm_node which implicitly tracks the following hole 176e18c0412SDaniel Vetter * 177e18c0412SDaniel Vetter * This is useful for driver-sepific debug dumpers. Otherwise drivers should not 178e18c0412SDaniel Vetter * inspect holes themselves. Drivers must check first whether a hole indeed 179e18c0412SDaniel Vetter * follows by looking at node->hole_follows. 180e18c0412SDaniel Vetter * 181e18c0412SDaniel Vetter * Returns: 182e18c0412SDaniel Vetter * End of the subsequent hole. 183e18c0412SDaniel Vetter */ 18445b186f1SChris Wilson static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node) 1859e8944abSChris Wilson { 1869e8944abSChris Wilson return __drm_mm_hole_node_end(hole_node); 1879e8944abSChris Wilson } 1889e8944abSChris Wilson 1892bc98c86SChris Wilson /** 1902bc98c86SChris Wilson * drm_mm_nodes - list of nodes under the drm_mm range manager 1912bc98c86SChris Wilson * @mm: the struct drm_mm range manger 1922bc98c86SChris Wilson * 1932bc98c86SChris Wilson * As the drm_mm range manager hides its node_list deep with its 1942bc98c86SChris Wilson * structure, extracting it looks painful and repetitive. This is 1952bc98c86SChris Wilson * not expected to be used outside of the drm_mm_for_each_node() 1962bc98c86SChris Wilson * macros and similar internal functions. 1972bc98c86SChris Wilson * 1982bc98c86SChris Wilson * Returns: 1992bc98c86SChris Wilson * The node list, may be empty. 2002bc98c86SChris Wilson */ 2012bc98c86SChris Wilson #define drm_mm_nodes(mm) (&(mm)->head_node.node_list) 202ad579002SChris Wilson 203e18c0412SDaniel Vetter /** 204e18c0412SDaniel Vetter * drm_mm_for_each_node - iterator to walk over all allocated nodes 205e18c0412SDaniel Vetter * @entry: drm_mm_node structure to assign to in each iteration step 206e18c0412SDaniel Vetter * @mm: drm_mm allocator to walk 207e18c0412SDaniel Vetter * 208e18c0412SDaniel Vetter * This iterator walks over all nodes in the range allocator. It is implemented 209e18c0412SDaniel Vetter * with list_for_each, so not save against removal of elements. 210e18c0412SDaniel Vetter */ 211ad579002SChris Wilson #define drm_mm_for_each_node(entry, mm) \ 2122bc98c86SChris Wilson list_for_each_entry(entry, drm_mm_nodes(mm), node_list) 213ad579002SChris Wilson 214ad579002SChris Wilson /** 215ad579002SChris Wilson * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes 216ad579002SChris Wilson * @entry: drm_mm_node structure to assign to in each iteration step 217ad579002SChris Wilson * @next: drm_mm_node structure to store the next step 218ad579002SChris Wilson * @mm: drm_mm allocator to walk 219ad579002SChris Wilson * 220ad579002SChris Wilson * This iterator walks over all nodes in the range allocator. It is implemented 221ad579002SChris Wilson * with list_for_each_safe, so save against removal of elements. 222ad579002SChris Wilson */ 223ad579002SChris Wilson #define drm_mm_for_each_node_safe(entry, next, mm) \ 2242bc98c86SChris Wilson list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list) 2259e8944abSChris Wilson 22618b40c58SGeliang Tang #define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ 22718b40c58SGeliang Tang for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ 22818b40c58SGeliang Tang &entry->hole_stack != &(mm)->hole_stack ? \ 22918b40c58SGeliang Tang hole_start = drm_mm_hole_node_start(entry), \ 23018b40c58SGeliang Tang hole_end = drm_mm_hole_node_end(entry), \ 23118b40c58SGeliang Tang 1 : 0; \ 23218b40c58SGeliang Tang entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack)) 23318b40c58SGeliang Tang 234e18c0412SDaniel Vetter /** 235e18c0412SDaniel Vetter * drm_mm_for_each_hole - iterator to walk over all holes 236e18c0412SDaniel Vetter * @entry: drm_mm_node used internally to track progress 237e18c0412SDaniel Vetter * @mm: drm_mm allocator to walk 238e18c0412SDaniel Vetter * @hole_start: ulong variable to assign the hole start to on each iteration 239e18c0412SDaniel Vetter * @hole_end: ulong variable to assign the hole end to on each iteration 240e18c0412SDaniel Vetter * 241e18c0412SDaniel Vetter * This iterator walks over all holes in the range allocator. It is implemented 242e18c0412SDaniel Vetter * with list_for_each, so not save against removal of elements. @entry is used 243e18c0412SDaniel Vetter * internally and will not reflect a real drm_mm_node for the very first hole. 244e18c0412SDaniel Vetter * Hence users of this iterator may not access it. 245e18c0412SDaniel Vetter * 246e18c0412SDaniel Vetter * Implementation Note: 247e18c0412SDaniel Vetter * We need to inline list_for_each_entry in order to be able to set hole_start 248e18c0412SDaniel Vetter * and hole_end on each iteration while keeping the macro sane. 24962347f9eSLauri Kasanen * 25062347f9eSLauri Kasanen * The __drm_mm_for_each_hole version is similar, but with added support for 25162347f9eSLauri Kasanen * going backwards. 2529e8944abSChris Wilson */ 2539e8944abSChris Wilson #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ 25418b40c58SGeliang Tang __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0) 25562347f9eSLauri Kasanen 256249d6048SJerome Glisse /* 257249d6048SJerome Glisse * Basic range manager support (drm_mm.c) 258249d6048SJerome Glisse */ 259e18c0412SDaniel Vetter int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); 260b8103450SChris Wilson 261e18c0412SDaniel Vetter int drm_mm_insert_node_generic(struct drm_mm *mm, 262b8103450SChris Wilson struct drm_mm_node *node, 263440fd528SThierry Reding u64 size, 26471733207SChris Wilson u64 alignment, 26531e5d7c6SDavid Herrmann unsigned long color, 26662347f9eSLauri Kasanen enum drm_mm_search_flags sflags, 26762347f9eSLauri Kasanen enum drm_mm_allocator_flags aflags); 268e18c0412SDaniel Vetter /** 269e18c0412SDaniel Vetter * drm_mm_insert_node - search for space and insert @node 270e18c0412SDaniel Vetter * @mm: drm_mm to allocate from 271e18c0412SDaniel Vetter * @node: preallocate node to insert 272e18c0412SDaniel Vetter * @size: size of the allocation 273e18c0412SDaniel Vetter * @alignment: alignment of the allocation 274e18c0412SDaniel Vetter * @flags: flags to fine-tune the allocation 275e18c0412SDaniel Vetter * 276e18c0412SDaniel Vetter * This is a simplified version of drm_mm_insert_node_generic() with @color set 277e18c0412SDaniel Vetter * to 0. 278e18c0412SDaniel Vetter * 279e18c0412SDaniel Vetter * The preallocated node must be cleared to 0. 280e18c0412SDaniel Vetter * 281e18c0412SDaniel Vetter * Returns: 282e18c0412SDaniel Vetter * 0 on success, -ENOSPC if there's no suitable hole. 283e18c0412SDaniel Vetter */ 28431e5d7c6SDavid Herrmann static inline int drm_mm_insert_node(struct drm_mm *mm, 28531e5d7c6SDavid Herrmann struct drm_mm_node *node, 286440fd528SThierry Reding u64 size, 28771733207SChris Wilson u64 alignment, 28831e5d7c6SDavid Herrmann enum drm_mm_search_flags flags) 28931e5d7c6SDavid Herrmann { 29062347f9eSLauri Kasanen return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags, 29162347f9eSLauri Kasanen DRM_MM_CREATE_DEFAULT); 29231e5d7c6SDavid Herrmann } 29331e5d7c6SDavid Herrmann 294e18c0412SDaniel Vetter int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, 295b8103450SChris Wilson struct drm_mm_node *node, 296440fd528SThierry Reding u64 size, 29771733207SChris Wilson u64 alignment, 298b8103450SChris Wilson unsigned long color, 299440fd528SThierry Reding u64 start, 300440fd528SThierry Reding u64 end, 30162347f9eSLauri Kasanen enum drm_mm_search_flags sflags, 30262347f9eSLauri Kasanen enum drm_mm_allocator_flags aflags); 303e18c0412SDaniel Vetter /** 304e18c0412SDaniel Vetter * drm_mm_insert_node_in_range - ranged search for space and insert @node 305e18c0412SDaniel Vetter * @mm: drm_mm to allocate from 306e18c0412SDaniel Vetter * @node: preallocate node to insert 307e18c0412SDaniel Vetter * @size: size of the allocation 308e18c0412SDaniel Vetter * @alignment: alignment of the allocation 309e18c0412SDaniel Vetter * @start: start of the allowed range for this node 310e18c0412SDaniel Vetter * @end: end of the allowed range for this node 311e18c0412SDaniel Vetter * @flags: flags to fine-tune the allocation 312e18c0412SDaniel Vetter * 313e18c0412SDaniel Vetter * This is a simplified version of drm_mm_insert_node_in_range_generic() with 314e18c0412SDaniel Vetter * @color set to 0. 315e18c0412SDaniel Vetter * 316e18c0412SDaniel Vetter * The preallocated node must be cleared to 0. 317e18c0412SDaniel Vetter * 318e18c0412SDaniel Vetter * Returns: 319e18c0412SDaniel Vetter * 0 on success, -ENOSPC if there's no suitable hole. 320e18c0412SDaniel Vetter */ 32131e5d7c6SDavid Herrmann static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, 32231e5d7c6SDavid Herrmann struct drm_mm_node *node, 323440fd528SThierry Reding u64 size, 32471733207SChris Wilson u64 alignment, 325440fd528SThierry Reding u64 start, 326440fd528SThierry Reding u64 end, 32731e5d7c6SDavid Herrmann enum drm_mm_search_flags flags) 32831e5d7c6SDavid Herrmann { 32931e5d7c6SDavid Herrmann return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 33062347f9eSLauri Kasanen 0, start, end, flags, 33162347f9eSLauri Kasanen DRM_MM_CREATE_DEFAULT); 33231e5d7c6SDavid Herrmann } 33331e5d7c6SDavid Herrmann 334e18c0412SDaniel Vetter void drm_mm_remove_node(struct drm_mm_node *node); 335e18c0412SDaniel Vetter void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); 33645b186f1SChris Wilson void drm_mm_init(struct drm_mm *mm, u64 start, u64 size); 337e18c0412SDaniel Vetter void drm_mm_takedown(struct drm_mm *mm); 33845b186f1SChris Wilson bool drm_mm_clean(const struct drm_mm *mm); 339249d6048SJerome Glisse 340202b52b7SChris Wilson struct drm_mm_node * 34145b186f1SChris Wilson __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); 342202b52b7SChris Wilson 343522e85ddSChris Wilson /** 344522e85ddSChris Wilson * drm_mm_for_each_node_in_range - iterator to walk over a range of 345522e85ddSChris Wilson * allocated nodes 3468b2fb7b6SChris Wilson * @node__: drm_mm_node structure to assign to in each iteration step 3478b2fb7b6SChris Wilson * @mm__: drm_mm allocator to walk 3488b2fb7b6SChris Wilson * @start__: starting offset, the first node will overlap this 3498b2fb7b6SChris Wilson * @end__: ending offset, the last node will start before this (but may overlap) 350522e85ddSChris Wilson * 351522e85ddSChris Wilson * This iterator walks over all nodes in the range allocator that lie 352522e85ddSChris Wilson * between @start and @end. It is implemented similarly to list_for_each(), 353522e85ddSChris Wilson * but using the internal interval tree to accelerate the search for the 354522e85ddSChris Wilson * starting node, and so not safe against removal of elements. It assumes 355522e85ddSChris Wilson * that @end is within (or is the upper limit of) the drm_mm allocator. 356522e85ddSChris Wilson */ 3578b2fb7b6SChris Wilson #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \ 3588b2fb7b6SChris Wilson for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \ 3598b2fb7b6SChris Wilson node__ && node__->start < (end__); \ 3608b2fb7b6SChris Wilson node__ = list_next_entry(node__, node_list)) 361202b52b7SChris Wilson 3626b9d89b4SChris Wilson void drm_mm_init_scan(struct drm_mm *mm, 363440fd528SThierry Reding u64 size, 36471733207SChris Wilson u64 alignment, 3656b9d89b4SChris Wilson unsigned long color); 3666b9d89b4SChris Wilson void drm_mm_init_scan_with_range(struct drm_mm *mm, 367440fd528SThierry Reding u64 size, 36871733207SChris Wilson u64 alignment, 3696b9d89b4SChris Wilson unsigned long color, 370440fd528SThierry Reding u64 start, 371440fd528SThierry Reding u64 end); 372e18c0412SDaniel Vetter bool drm_mm_scan_add_block(struct drm_mm_node *node); 373e18c0412SDaniel Vetter bool drm_mm_scan_remove_block(struct drm_mm_node *node); 374709ea971SDaniel Vetter 37545b186f1SChris Wilson void drm_mm_debug_table(const struct drm_mm *mm, const char *prefix); 376fa8a1238SDave Airlie #ifdef CONFIG_DEBUG_FS 37745b186f1SChris Wilson int drm_mm_dump_table(struct seq_file *m, const struct drm_mm *mm); 378fa8a1238SDave Airlie #endif 379fa8a1238SDave Airlie 380249d6048SJerome Glisse #endif 381