1 /************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * 27 **************************************************************************/ 28 29 /* 30 * Generic simple memory manager implementation. Intended to be used as a base 31 * class implementation for more advanced memory managers. 32 * 33 * Note that the algorithm used is quite simple and there might be substantial 34 * performance gains if a smarter free list is implemented. Currently it is just an 35 * unordered stack of free regions. This could easily be improved if an RB-tree 36 * is used instead. At least if we expect heavy fragmentation. 37 * 38 * Aligned allocations can also see improvement. 39 * 40 * Authors: 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 42 */ 43 44 #include "drmP.h" 45 #include "drm_mm.h" 46 #include <linux/slab.h> 47 48 #define MM_UNUSED_TARGET 4 49 50 unsigned long drm_mm_tail_space(struct drm_mm *mm) 51 { 52 struct list_head *tail_node; 53 struct drm_mm_node *entry; 54 55 tail_node = mm->ml_entry.prev; 56 entry = list_entry(tail_node, struct drm_mm_node, ml_entry); 57 if (!entry->free) 58 return 0; 59 60 return entry->size; 61 } 62 63 int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) 64 { 65 struct list_head *tail_node; 66 struct drm_mm_node *entry; 67 68 tail_node = mm->ml_entry.prev; 69 entry = list_entry(tail_node, struct drm_mm_node, ml_entry); 70 if (!entry->free) 71 return -ENOMEM; 72 73 if (entry->size <= size) 74 return -ENOMEM; 75 76 entry->size -= size; 77 return 0; 78 } 79 80 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) 81 { 82 struct drm_mm_node *child; 83 84 if (atomic) 85 child = kmalloc(sizeof(*child), GFP_ATOMIC); 86 else 87 child = kmalloc(sizeof(*child), GFP_KERNEL); 88 89 if (unlikely(child == NULL)) { 90 spin_lock(&mm->unused_lock); 91 if (list_empty(&mm->unused_nodes)) 92 child = NULL; 93 else { 94 child = 95 list_entry(mm->unused_nodes.next, 96 struct drm_mm_node, fl_entry); 97 list_del(&child->fl_entry); 98 --mm->num_unused; 99 } 100 spin_unlock(&mm->unused_lock); 101 } 102 return child; 103 } 104 105 int drm_mm_pre_get(struct drm_mm *mm) 106 { 107 struct drm_mm_node *node; 108 109 spin_lock(&mm->unused_lock); 110 while (mm->num_unused < MM_UNUSED_TARGET) { 111 spin_unlock(&mm->unused_lock); 112 node = kmalloc(sizeof(*node), GFP_KERNEL); 113 spin_lock(&mm->unused_lock); 114 115 if (unlikely(node == NULL)) { 116 int ret = (mm->num_unused < 2) ? -ENOMEM : 0; 117 spin_unlock(&mm->unused_lock); 118 return ret; 119 } 120 ++mm->num_unused; 121 list_add_tail(&node->fl_entry, &mm->unused_nodes); 122 } 123 spin_unlock(&mm->unused_lock); 124 return 0; 125 } 126 EXPORT_SYMBOL(drm_mm_pre_get); 127 128 static int drm_mm_create_tail_node(struct drm_mm *mm, 129 unsigned long start, 130 unsigned long size, int atomic) 131 { 132 struct drm_mm_node *child; 133 134 child = drm_mm_kmalloc(mm, atomic); 135 if (unlikely(child == NULL)) 136 return -ENOMEM; 137 138 child->free = 1; 139 child->size = size; 140 child->start = start; 141 child->mm = mm; 142 143 list_add_tail(&child->ml_entry, &mm->ml_entry); 144 list_add_tail(&child->fl_entry, &mm->fl_entry); 145 146 return 0; 147 } 148 149 int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic) 150 { 151 struct list_head *tail_node; 152 struct drm_mm_node *entry; 153 154 tail_node = mm->ml_entry.prev; 155 entry = list_entry(tail_node, struct drm_mm_node, ml_entry); 156 if (!entry->free) { 157 return drm_mm_create_tail_node(mm, entry->start + entry->size, 158 size, atomic); 159 } 160 entry->size += size; 161 return 0; 162 } 163 164 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, 165 unsigned long size, 166 int atomic) 167 { 168 struct drm_mm_node *child; 169 170 child = drm_mm_kmalloc(parent->mm, atomic); 171 if (unlikely(child == NULL)) 172 return NULL; 173 174 INIT_LIST_HEAD(&child->fl_entry); 175 176 child->free = 0; 177 child->size = size; 178 child->start = parent->start; 179 child->mm = parent->mm; 180 181 list_add_tail(&child->ml_entry, &parent->ml_entry); 182 INIT_LIST_HEAD(&child->fl_entry); 183 184 parent->size -= size; 185 parent->start += size; 186 return child; 187 } 188 189 190 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, 191 unsigned long size, 192 unsigned alignment, 193 int atomic) 194 { 195 196 struct drm_mm_node *align_splitoff = NULL; 197 unsigned tmp = 0; 198 199 if (alignment) 200 tmp = node->start % alignment; 201 202 if (tmp) { 203 align_splitoff = 204 drm_mm_split_at_start(node, alignment - tmp, atomic); 205 if (unlikely(align_splitoff == NULL)) 206 return NULL; 207 } 208 209 if (node->size == size) { 210 list_del_init(&node->fl_entry); 211 node->free = 0; 212 } else { 213 node = drm_mm_split_at_start(node, size, atomic); 214 } 215 216 if (align_splitoff) 217 drm_mm_put_block(align_splitoff); 218 219 return node; 220 } 221 EXPORT_SYMBOL(drm_mm_get_block_generic); 222 223 /* 224 * Put a block. Merge with the previous and / or next block if they are free. 225 * Otherwise add to the free stack. 226 */ 227 228 void drm_mm_put_block(struct drm_mm_node *cur) 229 { 230 231 struct drm_mm *mm = cur->mm; 232 struct list_head *cur_head = &cur->ml_entry; 233 struct list_head *root_head = &mm->ml_entry; 234 struct drm_mm_node *prev_node = NULL; 235 struct drm_mm_node *next_node; 236 237 int merged = 0; 238 239 if (cur_head->prev != root_head) { 240 prev_node = 241 list_entry(cur_head->prev, struct drm_mm_node, ml_entry); 242 if (prev_node->free) { 243 prev_node->size += cur->size; 244 merged = 1; 245 } 246 } 247 if (cur_head->next != root_head) { 248 next_node = 249 list_entry(cur_head->next, struct drm_mm_node, ml_entry); 250 if (next_node->free) { 251 if (merged) { 252 prev_node->size += next_node->size; 253 list_del(&next_node->ml_entry); 254 list_del(&next_node->fl_entry); 255 if (mm->num_unused < MM_UNUSED_TARGET) { 256 list_add(&next_node->fl_entry, 257 &mm->unused_nodes); 258 ++mm->num_unused; 259 } else 260 kfree(next_node); 261 } else { 262 next_node->size += cur->size; 263 next_node->start = cur->start; 264 merged = 1; 265 } 266 } 267 } 268 if (!merged) { 269 cur->free = 1; 270 list_add(&cur->fl_entry, &mm->fl_entry); 271 } else { 272 list_del(&cur->ml_entry); 273 if (mm->num_unused < MM_UNUSED_TARGET) { 274 list_add(&cur->fl_entry, &mm->unused_nodes); 275 ++mm->num_unused; 276 } else 277 kfree(cur); 278 } 279 } 280 281 EXPORT_SYMBOL(drm_mm_put_block); 282 283 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, 284 unsigned long size, 285 unsigned alignment, int best_match) 286 { 287 struct list_head *list; 288 const struct list_head *free_stack = &mm->fl_entry; 289 struct drm_mm_node *entry; 290 struct drm_mm_node *best; 291 unsigned long best_size; 292 unsigned wasted; 293 294 best = NULL; 295 best_size = ~0UL; 296 297 list_for_each(list, free_stack) { 298 entry = list_entry(list, struct drm_mm_node, fl_entry); 299 wasted = 0; 300 301 if (entry->size < size) 302 continue; 303 304 if (alignment) { 305 register unsigned tmp = entry->start % alignment; 306 if (tmp) 307 wasted += alignment - tmp; 308 } 309 310 if (entry->size >= size + wasted) { 311 if (!best_match) 312 return entry; 313 if (size < best_size) { 314 best = entry; 315 best_size = entry->size; 316 } 317 } 318 } 319 320 return best; 321 } 322 EXPORT_SYMBOL(drm_mm_search_free); 323 324 int drm_mm_clean(struct drm_mm * mm) 325 { 326 struct list_head *head = &mm->ml_entry; 327 328 return (head->next->next == head); 329 } 330 EXPORT_SYMBOL(drm_mm_clean); 331 332 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 333 { 334 INIT_LIST_HEAD(&mm->ml_entry); 335 INIT_LIST_HEAD(&mm->fl_entry); 336 INIT_LIST_HEAD(&mm->unused_nodes); 337 mm->num_unused = 0; 338 spin_lock_init(&mm->unused_lock); 339 340 return drm_mm_create_tail_node(mm, start, size, 0); 341 } 342 EXPORT_SYMBOL(drm_mm_init); 343 344 void drm_mm_takedown(struct drm_mm * mm) 345 { 346 struct list_head *bnode = mm->fl_entry.next; 347 struct drm_mm_node *entry; 348 struct drm_mm_node *next; 349 350 entry = list_entry(bnode, struct drm_mm_node, fl_entry); 351 352 if (entry->ml_entry.next != &mm->ml_entry || 353 entry->fl_entry.next != &mm->fl_entry) { 354 DRM_ERROR("Memory manager not clean. Delaying takedown\n"); 355 return; 356 } 357 358 list_del(&entry->fl_entry); 359 list_del(&entry->ml_entry); 360 kfree(entry); 361 362 spin_lock(&mm->unused_lock); 363 list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) { 364 list_del(&entry->fl_entry); 365 kfree(entry); 366 --mm->num_unused; 367 } 368 spin_unlock(&mm->unused_lock); 369 370 BUG_ON(mm->num_unused != 0); 371 } 372 EXPORT_SYMBOL(drm_mm_takedown); 373