1 /* 2 * Basic general purpose allocator for managing special purpose 3 * memory, for example, memory that is not managed by the regular 4 * kmalloc/kfree interface. Uses for this includes on-device special 5 * memory, uncached memory etc. 6 * 7 * It is safe to use the allocator in NMI handlers and other special 8 * unblockable contexts that could otherwise deadlock on locks. This 9 * is implemented by using atomic operations and retries on any 10 * conflicts. The disadvantage is that there may be livelocks in 11 * extreme cases. For better scalability, one allocator can be used 12 * for each CPU. 13 * 14 * The lockless operation only works if there is enough memory 15 * available. If new memory is added to the pool a lock has to be 16 * still taken. So any user relying on locklessness has to ensure 17 * that sufficient memory is preallocated. 18 * 19 * The basic atomic operation of this allocator is cmpxchg on long. 20 * On architectures that don't have NMI-safe cmpxchg implementation, 21 * the allocator can NOT be used in NMI handler. So code uses the 22 * allocator in NMI handler should depend on 23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. 24 * 25 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> 26 * 27 * This source code is licensed under the GNU General Public License, 28 * Version 2. See the file COPYING for more details. 29 */ 30 31 #include <linux/slab.h> 32 #include <linux/export.h> 33 #include <linux/bitmap.h> 34 #include <linux/rculist.h> 35 #include <linux/interrupt.h> 36 #include <linux/genalloc.h> 37 #include <linux/of_address.h> 38 #include <linux/of_device.h> 39 40 static inline size_t chunk_size(const struct gen_pool_chunk *chunk) 41 { 42 return chunk->end_addr - chunk->start_addr + 1; 43 } 44 45 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) 46 { 47 unsigned long val, nval; 48 49 nval = *addr; 50 do { 51 val = nval; 52 if (val & mask_to_set) 53 return -EBUSY; 54 cpu_relax(); 55 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val); 56 57 return 0; 58 } 59 60 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) 61 { 62 unsigned long val, nval; 63 64 nval = *addr; 65 do { 66 val = nval; 67 if ((val & mask_to_clear) != mask_to_clear) 68 return -EBUSY; 69 cpu_relax(); 70 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val); 71 72 return 0; 73 } 74 75 /* 76 * bitmap_set_ll - set the specified number of bits at the specified position 77 * @map: pointer to a bitmap 78 * @start: a bit position in @map 79 * @nr: number of bits to set 80 * 81 * Set @nr bits start from @start in @map lock-lessly. Several users 82 * can set/clear the same bitmap simultaneously without lock. If two 83 * users set the same bit, one user will return remain bits, otherwise 84 * return 0. 85 */ 86 static int bitmap_set_ll(unsigned long *map, int start, int nr) 87 { 88 unsigned long *p = map + BIT_WORD(start); 89 const int size = start + nr; 90 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); 91 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); 92 93 while (nr - bits_to_set >= 0) { 94 if (set_bits_ll(p, mask_to_set)) 95 return nr; 96 nr -= bits_to_set; 97 bits_to_set = BITS_PER_LONG; 98 mask_to_set = ~0UL; 99 p++; 100 } 101 if (nr) { 102 mask_to_set &= BITMAP_LAST_WORD_MASK(size); 103 if (set_bits_ll(p, mask_to_set)) 104 return nr; 105 } 106 107 return 0; 108 } 109 110 /* 111 * bitmap_clear_ll - clear the specified number of bits at the specified position 112 * @map: pointer to a bitmap 113 * @start: a bit position in @map 114 * @nr: number of bits to set 115 * 116 * Clear @nr bits start from @start in @map lock-lessly. Several users 117 * can set/clear the same bitmap simultaneously without lock. If two 118 * users clear the same bit, one user will return remain bits, 119 * otherwise return 0. 120 */ 121 static int bitmap_clear_ll(unsigned long *map, int start, int nr) 122 { 123 unsigned long *p = map + BIT_WORD(start); 124 const int size = start + nr; 125 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); 126 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); 127 128 while (nr - bits_to_clear >= 0) { 129 if (clear_bits_ll(p, mask_to_clear)) 130 return nr; 131 nr -= bits_to_clear; 132 bits_to_clear = BITS_PER_LONG; 133 mask_to_clear = ~0UL; 134 p++; 135 } 136 if (nr) { 137 mask_to_clear &= BITMAP_LAST_WORD_MASK(size); 138 if (clear_bits_ll(p, mask_to_clear)) 139 return nr; 140 } 141 142 return 0; 143 } 144 145 /** 146 * gen_pool_create - create a new special memory pool 147 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents 148 * @nid: node id of the node the pool structure should be allocated on, or -1 149 * 150 * Create a new special memory pool that can be used to manage special purpose 151 * memory not managed by the regular kmalloc/kfree interface. 152 */ 153 struct gen_pool *gen_pool_create(int min_alloc_order, int nid) 154 { 155 struct gen_pool *pool; 156 157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); 158 if (pool != NULL) { 159 spin_lock_init(&pool->lock); 160 INIT_LIST_HEAD(&pool->chunks); 161 pool->min_alloc_order = min_alloc_order; 162 pool->algo = gen_pool_first_fit; 163 pool->data = NULL; 164 } 165 return pool; 166 } 167 EXPORT_SYMBOL(gen_pool_create); 168 169 /** 170 * gen_pool_add_virt - add a new chunk of special memory to the pool 171 * @pool: pool to add new memory chunk to 172 * @virt: virtual starting address of memory chunk to add to pool 173 * @phys: physical starting address of memory chunk to add to pool 174 * @size: size in bytes of the memory chunk to add to pool 175 * @nid: node id of the node the chunk structure and bitmap should be 176 * allocated on, or -1 177 * 178 * Add a new chunk of special memory to the specified pool. 179 * 180 * Returns 0 on success or a -ve errno on failure. 181 */ 182 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, 183 size_t size, int nid) 184 { 185 struct gen_pool_chunk *chunk; 186 int nbits = size >> pool->min_alloc_order; 187 int nbytes = sizeof(struct gen_pool_chunk) + 188 BITS_TO_LONGS(nbits) * sizeof(long); 189 190 chunk = kzalloc_node(nbytes, GFP_KERNEL, nid); 191 if (unlikely(chunk == NULL)) 192 return -ENOMEM; 193 194 chunk->phys_addr = phys; 195 chunk->start_addr = virt; 196 chunk->end_addr = virt + size - 1; 197 atomic_set(&chunk->avail, size); 198 199 spin_lock(&pool->lock); 200 list_add_rcu(&chunk->next_chunk, &pool->chunks); 201 spin_unlock(&pool->lock); 202 203 return 0; 204 } 205 EXPORT_SYMBOL(gen_pool_add_virt); 206 207 /** 208 * gen_pool_virt_to_phys - return the physical address of memory 209 * @pool: pool to allocate from 210 * @addr: starting address of memory 211 * 212 * Returns the physical address on success, or -1 on error. 213 */ 214 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) 215 { 216 struct gen_pool_chunk *chunk; 217 phys_addr_t paddr = -1; 218 219 rcu_read_lock(); 220 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 221 if (addr >= chunk->start_addr && addr <= chunk->end_addr) { 222 paddr = chunk->phys_addr + (addr - chunk->start_addr); 223 break; 224 } 225 } 226 rcu_read_unlock(); 227 228 return paddr; 229 } 230 EXPORT_SYMBOL(gen_pool_virt_to_phys); 231 232 /** 233 * gen_pool_destroy - destroy a special memory pool 234 * @pool: pool to destroy 235 * 236 * Destroy the specified special memory pool. Verifies that there are no 237 * outstanding allocations. 238 */ 239 void gen_pool_destroy(struct gen_pool *pool) 240 { 241 struct list_head *_chunk, *_next_chunk; 242 struct gen_pool_chunk *chunk; 243 int order = pool->min_alloc_order; 244 int bit, end_bit; 245 246 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { 247 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 248 list_del(&chunk->next_chunk); 249 250 end_bit = chunk_size(chunk) >> order; 251 bit = find_next_bit(chunk->bits, end_bit, 0); 252 BUG_ON(bit < end_bit); 253 254 kfree(chunk); 255 } 256 kfree(pool); 257 return; 258 } 259 EXPORT_SYMBOL(gen_pool_destroy); 260 261 /** 262 * gen_pool_alloc - allocate special memory from the pool 263 * @pool: pool to allocate from 264 * @size: number of bytes to allocate from the pool 265 * 266 * Allocate the requested number of bytes from the specified pool. 267 * Uses the pool allocation function (with first-fit algorithm by default). 268 * Can not be used in NMI handler on architectures without 269 * NMI-safe cmpxchg implementation. 270 */ 271 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) 272 { 273 struct gen_pool_chunk *chunk; 274 unsigned long addr = 0; 275 int order = pool->min_alloc_order; 276 int nbits, start_bit = 0, end_bit, remain; 277 278 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 279 BUG_ON(in_nmi()); 280 #endif 281 282 if (size == 0) 283 return 0; 284 285 nbits = (size + (1UL << order) - 1) >> order; 286 rcu_read_lock(); 287 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 288 if (size > atomic_read(&chunk->avail)) 289 continue; 290 291 end_bit = chunk_size(chunk) >> order; 292 retry: 293 start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, 294 pool->data); 295 if (start_bit >= end_bit) 296 continue; 297 remain = bitmap_set_ll(chunk->bits, start_bit, nbits); 298 if (remain) { 299 remain = bitmap_clear_ll(chunk->bits, start_bit, 300 nbits - remain); 301 BUG_ON(remain); 302 goto retry; 303 } 304 305 addr = chunk->start_addr + ((unsigned long)start_bit << order); 306 size = nbits << order; 307 atomic_sub(size, &chunk->avail); 308 break; 309 } 310 rcu_read_unlock(); 311 return addr; 312 } 313 EXPORT_SYMBOL(gen_pool_alloc); 314 315 /** 316 * gen_pool_free - free allocated special memory back to the pool 317 * @pool: pool to free to 318 * @addr: starting address of memory to free back to pool 319 * @size: size in bytes of memory to free 320 * 321 * Free previously allocated special memory back to the specified 322 * pool. Can not be used in NMI handler on architectures without 323 * NMI-safe cmpxchg implementation. 324 */ 325 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) 326 { 327 struct gen_pool_chunk *chunk; 328 int order = pool->min_alloc_order; 329 int start_bit, nbits, remain; 330 331 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 332 BUG_ON(in_nmi()); 333 #endif 334 335 nbits = (size + (1UL << order) - 1) >> order; 336 rcu_read_lock(); 337 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 338 if (addr >= chunk->start_addr && addr <= chunk->end_addr) { 339 BUG_ON(addr + size - 1 > chunk->end_addr); 340 start_bit = (addr - chunk->start_addr) >> order; 341 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); 342 BUG_ON(remain); 343 size = nbits << order; 344 atomic_add(size, &chunk->avail); 345 rcu_read_unlock(); 346 return; 347 } 348 } 349 rcu_read_unlock(); 350 BUG(); 351 } 352 EXPORT_SYMBOL(gen_pool_free); 353 354 /** 355 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool 356 * @pool: the generic memory pool 357 * @func: func to call 358 * @data: additional data used by @func 359 * 360 * Call @func for every chunk of generic memory pool. The @func is 361 * called with rcu_read_lock held. 362 */ 363 void gen_pool_for_each_chunk(struct gen_pool *pool, 364 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), 365 void *data) 366 { 367 struct gen_pool_chunk *chunk; 368 369 rcu_read_lock(); 370 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) 371 func(pool, chunk, data); 372 rcu_read_unlock(); 373 } 374 EXPORT_SYMBOL(gen_pool_for_each_chunk); 375 376 /** 377 * gen_pool_avail - get available free space of the pool 378 * @pool: pool to get available free space 379 * 380 * Return available free space of the specified pool. 381 */ 382 size_t gen_pool_avail(struct gen_pool *pool) 383 { 384 struct gen_pool_chunk *chunk; 385 size_t avail = 0; 386 387 rcu_read_lock(); 388 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) 389 avail += atomic_read(&chunk->avail); 390 rcu_read_unlock(); 391 return avail; 392 } 393 EXPORT_SYMBOL_GPL(gen_pool_avail); 394 395 /** 396 * gen_pool_size - get size in bytes of memory managed by the pool 397 * @pool: pool to get size 398 * 399 * Return size in bytes of memory managed by the pool. 400 */ 401 size_t gen_pool_size(struct gen_pool *pool) 402 { 403 struct gen_pool_chunk *chunk; 404 size_t size = 0; 405 406 rcu_read_lock(); 407 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) 408 size += chunk_size(chunk); 409 rcu_read_unlock(); 410 return size; 411 } 412 EXPORT_SYMBOL_GPL(gen_pool_size); 413 414 /** 415 * gen_pool_set_algo - set the allocation algorithm 416 * @pool: pool to change allocation algorithm 417 * @algo: custom algorithm function 418 * @data: additional data used by @algo 419 * 420 * Call @algo for each memory allocation in the pool. 421 * If @algo is NULL use gen_pool_first_fit as default 422 * memory allocation function. 423 */ 424 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) 425 { 426 rcu_read_lock(); 427 428 pool->algo = algo; 429 if (!pool->algo) 430 pool->algo = gen_pool_first_fit; 431 432 pool->data = data; 433 434 rcu_read_unlock(); 435 } 436 EXPORT_SYMBOL(gen_pool_set_algo); 437 438 /** 439 * gen_pool_first_fit - find the first available region 440 * of memory matching the size requirement (no alignment constraint) 441 * @map: The address to base the search on 442 * @size: The bitmap size in bits 443 * @start: The bitnumber to start searching at 444 * @nr: The number of zeroed bits we're looking for 445 * @data: additional data - unused 446 */ 447 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, 448 unsigned long start, unsigned int nr, void *data) 449 { 450 return bitmap_find_next_zero_area(map, size, start, nr, 0); 451 } 452 EXPORT_SYMBOL(gen_pool_first_fit); 453 454 /** 455 * gen_pool_best_fit - find the best fitting region of memory 456 * macthing the size requirement (no alignment constraint) 457 * @map: The address to base the search on 458 * @size: The bitmap size in bits 459 * @start: The bitnumber to start searching at 460 * @nr: The number of zeroed bits we're looking for 461 * @data: additional data - unused 462 * 463 * Iterate over the bitmap to find the smallest free region 464 * which we can allocate the memory. 465 */ 466 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, 467 unsigned long start, unsigned int nr, void *data) 468 { 469 unsigned long start_bit = size; 470 unsigned long len = size + 1; 471 unsigned long index; 472 473 index = bitmap_find_next_zero_area(map, size, start, nr, 0); 474 475 while (index < size) { 476 int next_bit = find_next_bit(map, size, index + nr); 477 if ((next_bit - index) < len) { 478 len = next_bit - index; 479 start_bit = index; 480 if (len == nr) 481 return start_bit; 482 } 483 index = bitmap_find_next_zero_area(map, size, 484 next_bit + 1, nr, 0); 485 } 486 487 return start_bit; 488 } 489 EXPORT_SYMBOL(gen_pool_best_fit); 490 491 static void devm_gen_pool_release(struct device *dev, void *res) 492 { 493 gen_pool_destroy(*(struct gen_pool **)res); 494 } 495 496 /** 497 * devm_gen_pool_create - managed gen_pool_create 498 * @dev: device that provides the gen_pool 499 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents 500 * @nid: node id of the node the pool structure should be allocated on, or -1 501 * 502 * Create a new special memory pool that can be used to manage special purpose 503 * memory not managed by the regular kmalloc/kfree interface. The pool will be 504 * automatically destroyed by the device management code. 505 */ 506 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, 507 int nid) 508 { 509 struct gen_pool **ptr, *pool; 510 511 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); 512 513 pool = gen_pool_create(min_alloc_order, nid); 514 if (pool) { 515 *ptr = pool; 516 devres_add(dev, ptr); 517 } else { 518 devres_free(ptr); 519 } 520 521 return pool; 522 } 523 524 /** 525 * dev_get_gen_pool - Obtain the gen_pool (if any) for a device 526 * @dev: device to retrieve the gen_pool from 527 * 528 * Returns the gen_pool for the device if one is present, or NULL. 529 */ 530 struct gen_pool *dev_get_gen_pool(struct device *dev) 531 { 532 struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL, 533 NULL); 534 535 if (!p) 536 return NULL; 537 return *p; 538 } 539 EXPORT_SYMBOL_GPL(dev_get_gen_pool); 540 541 #ifdef CONFIG_OF 542 /** 543 * of_get_named_gen_pool - find a pool by phandle property 544 * @np: device node 545 * @propname: property name containing phandle(s) 546 * @index: index into the phandle array 547 * 548 * Returns the pool that contains the chunk starting at the physical 549 * address of the device tree node pointed at by the phandle property, 550 * or NULL if not found. 551 */ 552 struct gen_pool *of_get_named_gen_pool(struct device_node *np, 553 const char *propname, int index) 554 { 555 struct platform_device *pdev; 556 struct device_node *np_pool; 557 558 np_pool = of_parse_phandle(np, propname, index); 559 if (!np_pool) 560 return NULL; 561 pdev = of_find_device_by_node(np_pool); 562 if (!pdev) 563 return NULL; 564 return dev_get_gen_pool(&pdev->dev); 565 } 566 EXPORT_SYMBOL_GPL(of_get_named_gen_pool); 567 #endif /* CONFIG_OF */ 568