1 /************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * 27 **************************************************************************/ 28 29 /* 30 * Generic simple memory manager implementation. Intended to be used as a base 31 * class implementation for more advanced memory managers. 32 * 33 * Note that the algorithm used is quite simple and there might be substantial 34 * performance gains if a smarter free list is implemented. Currently it is just an 35 * unordered stack of free regions. This could easily be improved if an RB-tree 36 * is used instead. At least if we expect heavy fragmentation. 37 * 38 * Aligned allocations can also see improvement. 39 * 40 * Authors: 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 42 */ 43 44 #include "drmP.h" 45 #include "drm_mm.h" 46 #include <linux/slab.h> 47 #include <linux/seq_file.h> 48 49 #define MM_UNUSED_TARGET 4 50 51 unsigned long drm_mm_tail_space(struct drm_mm *mm) 52 { 53 struct list_head *tail_node; 54 struct drm_mm_node *entry; 55 56 tail_node = mm->ml_entry.prev; 57 entry = list_entry(tail_node, struct drm_mm_node, ml_entry); 58 if (!entry->free) 59 return 0; 60 61 return entry->size; 62 } 63 64 int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) 65 { 66 struct list_head *tail_node; 67 struct drm_mm_node *entry; 68 69 tail_node = mm->ml_entry.prev; 70 entry = list_entry(tail_node, struct drm_mm_node, ml_entry); 71 if (!entry->free) 72 return -ENOMEM; 73 74 if (entry->size <= size) 75 return -ENOMEM; 76 77 entry->size -= size; 78 return 0; 79 } 80 81 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) 82 { 83 struct drm_mm_node *child; 84 85 if (atomic) 86 child = kmalloc(sizeof(*child), GFP_ATOMIC); 87 else 88 child = kmalloc(sizeof(*child), GFP_KERNEL); 89 90 if (unlikely(child == NULL)) { 91 spin_lock(&mm->unused_lock); 92 if (list_empty(&mm->unused_nodes)) 93 child = NULL; 94 else { 95 child = 96 list_entry(mm->unused_nodes.next, 97 struct drm_mm_node, fl_entry); 98 list_del(&child->fl_entry); 99 --mm->num_unused; 100 } 101 spin_unlock(&mm->unused_lock); 102 } 103 return child; 104 } 105 106 /* drm_mm_pre_get() - pre allocate drm_mm_node structure 107 * drm_mm: memory manager struct we are pre-allocating for 108 * 109 * Returns 0 on success or -ENOMEM if allocation fails. 110 */ 111 int drm_mm_pre_get(struct drm_mm *mm) 112 { 113 struct drm_mm_node *node; 114 115 spin_lock(&mm->unused_lock); 116 while (mm->num_unused < MM_UNUSED_TARGET) { 117 spin_unlock(&mm->unused_lock); 118 node = kmalloc(sizeof(*node), GFP_KERNEL); 119 spin_lock(&mm->unused_lock); 120 121 if (unlikely(node == NULL)) { 122 int ret = (mm->num_unused < 2) ? -ENOMEM : 0; 123 spin_unlock(&mm->unused_lock); 124 return ret; 125 } 126 ++mm->num_unused; 127 list_add_tail(&node->fl_entry, &mm->unused_nodes); 128 } 129 spin_unlock(&mm->unused_lock); 130 return 0; 131 } 132 EXPORT_SYMBOL(drm_mm_pre_get); 133 134 static int drm_mm_create_tail_node(struct drm_mm *mm, 135 unsigned long start, 136 unsigned long size, int atomic) 137 { 138 struct drm_mm_node *child; 139 140 child = drm_mm_kmalloc(mm, atomic); 141 if (unlikely(child == NULL)) 142 return -ENOMEM; 143 144 child->free = 1; 145 child->size = size; 146 child->start = start; 147 child->mm = mm; 148 149 list_add_tail(&child->ml_entry, &mm->ml_entry); 150 list_add_tail(&child->fl_entry, &mm->fl_entry); 151 152 return 0; 153 } 154 155 int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic) 156 { 157 struct list_head *tail_node; 158 struct drm_mm_node *entry; 159 160 tail_node = mm->ml_entry.prev; 161 entry = list_entry(tail_node, struct drm_mm_node, ml_entry); 162 if (!entry->free) { 163 return drm_mm_create_tail_node(mm, entry->start + entry->size, 164 size, atomic); 165 } 166 entry->size += size; 167 return 0; 168 } 169 170 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, 171 unsigned long size, 172 int atomic) 173 { 174 struct drm_mm_node *child; 175 176 child = drm_mm_kmalloc(parent->mm, atomic); 177 if (unlikely(child == NULL)) 178 return NULL; 179 180 INIT_LIST_HEAD(&child->fl_entry); 181 182 child->free = 0; 183 child->size = size; 184 child->start = parent->start; 185 child->mm = parent->mm; 186 187 list_add_tail(&child->ml_entry, &parent->ml_entry); 188 INIT_LIST_HEAD(&child->fl_entry); 189 190 parent->size -= size; 191 parent->start += size; 192 return child; 193 } 194 195 196 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, 197 unsigned long size, 198 unsigned alignment, 199 int atomic) 200 { 201 202 struct drm_mm_node *align_splitoff = NULL; 203 unsigned tmp = 0; 204 205 if (alignment) 206 tmp = node->start % alignment; 207 208 if (tmp) { 209 align_splitoff = 210 drm_mm_split_at_start(node, alignment - tmp, atomic); 211 if (unlikely(align_splitoff == NULL)) 212 return NULL; 213 } 214 215 if (node->size == size) { 216 list_del_init(&node->fl_entry); 217 node->free = 0; 218 } else { 219 node = drm_mm_split_at_start(node, size, atomic); 220 } 221 222 if (align_splitoff) 223 drm_mm_put_block(align_splitoff); 224 225 return node; 226 } 227 EXPORT_SYMBOL(drm_mm_get_block_generic); 228 229 /* 230 * Put a block. Merge with the previous and / or next block if they are free. 231 * Otherwise add to the free stack. 232 */ 233 234 void drm_mm_put_block(struct drm_mm_node *cur) 235 { 236 237 struct drm_mm *mm = cur->mm; 238 struct list_head *cur_head = &cur->ml_entry; 239 struct list_head *root_head = &mm->ml_entry; 240 struct drm_mm_node *prev_node = NULL; 241 struct drm_mm_node *next_node; 242 243 int merged = 0; 244 245 if (cur_head->prev != root_head) { 246 prev_node = 247 list_entry(cur_head->prev, struct drm_mm_node, ml_entry); 248 if (prev_node->free) { 249 prev_node->size += cur->size; 250 merged = 1; 251 } 252 } 253 if (cur_head->next != root_head) { 254 next_node = 255 list_entry(cur_head->next, struct drm_mm_node, ml_entry); 256 if (next_node->free) { 257 if (merged) { 258 prev_node->size += next_node->size; 259 list_del(&next_node->ml_entry); 260 list_del(&next_node->fl_entry); 261 spin_lock(&mm->unused_lock); 262 if (mm->num_unused < MM_UNUSED_TARGET) { 263 list_add(&next_node->fl_entry, 264 &mm->unused_nodes); 265 ++mm->num_unused; 266 } else 267 kfree(next_node); 268 spin_unlock(&mm->unused_lock); 269 } else { 270 next_node->size += cur->size; 271 next_node->start = cur->start; 272 merged = 1; 273 } 274 } 275 } 276 if (!merged) { 277 cur->free = 1; 278 list_add(&cur->fl_entry, &mm->fl_entry); 279 } else { 280 list_del(&cur->ml_entry); 281 spin_lock(&mm->unused_lock); 282 if (mm->num_unused < MM_UNUSED_TARGET) { 283 list_add(&cur->fl_entry, &mm->unused_nodes); 284 ++mm->num_unused; 285 } else 286 kfree(cur); 287 spin_unlock(&mm->unused_lock); 288 } 289 } 290 291 EXPORT_SYMBOL(drm_mm_put_block); 292 293 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, 294 unsigned long size, 295 unsigned alignment, int best_match) 296 { 297 struct list_head *list; 298 const struct list_head *free_stack = &mm->fl_entry; 299 struct drm_mm_node *entry; 300 struct drm_mm_node *best; 301 unsigned long best_size; 302 unsigned wasted; 303 304 best = NULL; 305 best_size = ~0UL; 306 307 list_for_each(list, free_stack) { 308 entry = list_entry(list, struct drm_mm_node, fl_entry); 309 wasted = 0; 310 311 if (entry->size < size) 312 continue; 313 314 if (alignment) { 315 register unsigned tmp = entry->start % alignment; 316 if (tmp) 317 wasted += alignment - tmp; 318 } 319 320 if (entry->size >= size + wasted) { 321 if (!best_match) 322 return entry; 323 if (size < best_size) { 324 best = entry; 325 best_size = entry->size; 326 } 327 } 328 } 329 330 return best; 331 } 332 EXPORT_SYMBOL(drm_mm_search_free); 333 334 int drm_mm_clean(struct drm_mm * mm) 335 { 336 struct list_head *head = &mm->ml_entry; 337 338 return (head->next->next == head); 339 } 340 EXPORT_SYMBOL(drm_mm_clean); 341 342 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 343 { 344 INIT_LIST_HEAD(&mm->ml_entry); 345 INIT_LIST_HEAD(&mm->fl_entry); 346 INIT_LIST_HEAD(&mm->unused_nodes); 347 mm->num_unused = 0; 348 spin_lock_init(&mm->unused_lock); 349 350 return drm_mm_create_tail_node(mm, start, size, 0); 351 } 352 EXPORT_SYMBOL(drm_mm_init); 353 354 void drm_mm_takedown(struct drm_mm * mm) 355 { 356 struct list_head *bnode = mm->fl_entry.next; 357 struct drm_mm_node *entry; 358 struct drm_mm_node *next; 359 360 entry = list_entry(bnode, struct drm_mm_node, fl_entry); 361 362 if (entry->ml_entry.next != &mm->ml_entry || 363 entry->fl_entry.next != &mm->fl_entry) { 364 DRM_ERROR("Memory manager not clean. Delaying takedown\n"); 365 return; 366 } 367 368 list_del(&entry->fl_entry); 369 list_del(&entry->ml_entry); 370 kfree(entry); 371 372 spin_lock(&mm->unused_lock); 373 list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) { 374 list_del(&entry->fl_entry); 375 kfree(entry); 376 --mm->num_unused; 377 } 378 spin_unlock(&mm->unused_lock); 379 380 BUG_ON(mm->num_unused != 0); 381 } 382 EXPORT_SYMBOL(drm_mm_takedown); 383 384 #if defined(CONFIG_DEBUG_FS) 385 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 386 { 387 struct drm_mm_node *entry; 388 int total_used = 0, total_free = 0, total = 0; 389 390 list_for_each_entry(entry, &mm->ml_entry, ml_entry) { 391 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used"); 392 total += entry->size; 393 if (entry->free) 394 total_free += entry->size; 395 else 396 total_used += entry->size; 397 } 398 seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used); 399 return 0; 400 } 401 EXPORT_SYMBOL(drm_mm_dump_table); 402 #endif 403