1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Basic general purpose allocator for managing special purpose 4 * memory, for example, memory that is not managed by the regular 5 * kmalloc/kfree interface. Uses for this includes on-device special 6 * memory, uncached memory etc. 7 * 8 * It is safe to use the allocator in NMI handlers and other special 9 * unblockable contexts that could otherwise deadlock on locks. This 10 * is implemented by using atomic operations and retries on any 11 * conflicts. The disadvantage is that there may be livelocks in 12 * extreme cases. For better scalability, one allocator can be used 13 * for each CPU. 14 * 15 * The lockless operation only works if there is enough memory 16 * available. If new memory is added to the pool a lock has to be 17 * still taken. So any user relying on locklessness has to ensure 18 * that sufficient memory is preallocated. 19 * 20 * The basic atomic operation of this allocator is cmpxchg on long. 21 * On architectures that don't have NMI-safe cmpxchg implementation, 22 * the allocator can NOT be used in NMI handler. So code uses the 23 * allocator in NMI handler should depend on 24 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. 25 * 26 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> 27 */ 28 29 #include <linux/slab.h> 30 #include <linux/export.h> 31 #include <linux/bitmap.h> 32 #include <linux/rculist.h> 33 #include <linux/interrupt.h> 34 #include <linux/genalloc.h> 35 #include <linux/of_device.h> 36 #include <linux/vmalloc.h> 37 38 static inline size_t chunk_size(const struct gen_pool_chunk *chunk) 39 { 40 return chunk->end_addr - chunk->start_addr + 1; 41 } 42 43 static inline int 44 set_bits_ll(unsigned long *addr, unsigned long mask_to_set) 45 { 46 unsigned long val = READ_ONCE(*addr); 47 48 do { 49 if (val & mask_to_set) 50 return -EBUSY; 51 cpu_relax(); 52 } while (!try_cmpxchg(addr, &val, val | mask_to_set)); 53 54 return 0; 55 } 56 57 static inline int 58 clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) 59 { 60 unsigned long val = READ_ONCE(*addr); 61 62 do { 63 if ((val & mask_to_clear) != mask_to_clear) 64 return -EBUSY; 65 cpu_relax(); 66 } while (!try_cmpxchg(addr, &val, val & ~mask_to_clear)); 67 68 return 0; 69 } 70 71 /* 72 * bitmap_set_ll - set the specified number of bits at the specified position 73 * @map: pointer to a bitmap 74 * @start: a bit position in @map 75 * @nr: number of bits to set 76 * 77 * Set @nr bits start from @start in @map lock-lessly. Several users 78 * can set/clear the same bitmap simultaneously without lock. If two 79 * users set the same bit, one user will return remain bits, otherwise 80 * return 0. 81 */ 82 static unsigned long 83 bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr) 84 { 85 unsigned long *p = map + BIT_WORD(start); 86 const unsigned long size = start + nr; 87 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); 88 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); 89 90 while (nr >= bits_to_set) { 91 if (set_bits_ll(p, mask_to_set)) 92 return nr; 93 nr -= bits_to_set; 94 bits_to_set = BITS_PER_LONG; 95 mask_to_set = ~0UL; 96 p++; 97 } 98 if (nr) { 99 mask_to_set &= BITMAP_LAST_WORD_MASK(size); 100 if (set_bits_ll(p, mask_to_set)) 101 return nr; 102 } 103 104 return 0; 105 } 106 107 /* 108 * bitmap_clear_ll - clear the specified number of bits at the specified position 109 * @map: pointer to a bitmap 110 * @start: a bit position in @map 111 * @nr: number of bits to set 112 * 113 * Clear @nr bits start from @start in @map lock-lessly. Several users 114 * can set/clear the same bitmap simultaneously without lock. If two 115 * users clear the same bit, one user will return remain bits, 116 * otherwise return 0. 117 */ 118 static unsigned long 119 bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr) 120 { 121 unsigned long *p = map + BIT_WORD(start); 122 const unsigned long size = start + nr; 123 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); 124 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); 125 126 while (nr >= bits_to_clear) { 127 if (clear_bits_ll(p, mask_to_clear)) 128 return nr; 129 nr -= bits_to_clear; 130 bits_to_clear = BITS_PER_LONG; 131 mask_to_clear = ~0UL; 132 p++; 133 } 134 if (nr) { 135 mask_to_clear &= BITMAP_LAST_WORD_MASK(size); 136 if (clear_bits_ll(p, mask_to_clear)) 137 return nr; 138 } 139 140 return 0; 141 } 142 143 /** 144 * gen_pool_create - create a new special memory pool 145 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents 146 * @nid: node id of the node the pool structure should be allocated on, or -1 147 * 148 * Create a new special memory pool that can be used to manage special purpose 149 * memory not managed by the regular kmalloc/kfree interface. 150 */ 151 struct gen_pool *gen_pool_create(int min_alloc_order, int nid) 152 { 153 struct gen_pool *pool; 154 155 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); 156 if (pool != NULL) { 157 spin_lock_init(&pool->lock); 158 INIT_LIST_HEAD(&pool->chunks); 159 pool->min_alloc_order = min_alloc_order; 160 pool->algo = gen_pool_first_fit; 161 pool->data = NULL; 162 pool->name = NULL; 163 } 164 return pool; 165 } 166 EXPORT_SYMBOL(gen_pool_create); 167 168 /** 169 * gen_pool_add_owner- add a new chunk of special memory to the pool 170 * @pool: pool to add new memory chunk to 171 * @virt: virtual starting address of memory chunk to add to pool 172 * @phys: physical starting address of memory chunk to add to pool 173 * @size: size in bytes of the memory chunk to add to pool 174 * @nid: node id of the node the chunk structure and bitmap should be 175 * allocated on, or -1 176 * @owner: private data the publisher would like to recall at alloc time 177 * 178 * Add a new chunk of special memory to the specified pool. 179 * 180 * Returns 0 on success or a -ve errno on failure. 181 */ 182 int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, 183 size_t size, int nid, void *owner) 184 { 185 struct gen_pool_chunk *chunk; 186 unsigned long nbits = size >> pool->min_alloc_order; 187 unsigned long nbytes = sizeof(struct gen_pool_chunk) + 188 BITS_TO_LONGS(nbits) * sizeof(long); 189 190 chunk = vzalloc_node(nbytes, nid); 191 if (unlikely(chunk == NULL)) 192 return -ENOMEM; 193 194 chunk->phys_addr = phys; 195 chunk->start_addr = virt; 196 chunk->end_addr = virt + size - 1; 197 chunk->owner = owner; 198 atomic_long_set(&chunk->avail, size); 199 200 spin_lock(&pool->lock); 201 list_add_rcu(&chunk->next_chunk, &pool->chunks); 202 spin_unlock(&pool->lock); 203 204 return 0; 205 } 206 EXPORT_SYMBOL(gen_pool_add_owner); 207 208 /** 209 * gen_pool_virt_to_phys - return the physical address of memory 210 * @pool: pool to allocate from 211 * @addr: starting address of memory 212 * 213 * Returns the physical address on success, or -1 on error. 214 */ 215 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) 216 { 217 struct gen_pool_chunk *chunk; 218 phys_addr_t paddr = -1; 219 220 rcu_read_lock(); 221 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 222 if (addr >= chunk->start_addr && addr <= chunk->end_addr) { 223 paddr = chunk->phys_addr + (addr - chunk->start_addr); 224 break; 225 } 226 } 227 rcu_read_unlock(); 228 229 return paddr; 230 } 231 EXPORT_SYMBOL(gen_pool_virt_to_phys); 232 233 /** 234 * gen_pool_destroy - destroy a special memory pool 235 * @pool: pool to destroy 236 * 237 * Destroy the specified special memory pool. Verifies that there are no 238 * outstanding allocations. 239 */ 240 void gen_pool_destroy(struct gen_pool *pool) 241 { 242 struct list_head *_chunk, *_next_chunk; 243 struct gen_pool_chunk *chunk; 244 int order = pool->min_alloc_order; 245 unsigned long bit, end_bit; 246 247 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { 248 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 249 list_del(&chunk->next_chunk); 250 251 end_bit = chunk_size(chunk) >> order; 252 bit = find_first_bit(chunk->bits, end_bit); 253 BUG_ON(bit < end_bit); 254 255 vfree(chunk); 256 } 257 kfree_const(pool->name); 258 kfree(pool); 259 } 260 EXPORT_SYMBOL(gen_pool_destroy); 261 262 /** 263 * gen_pool_alloc_algo_owner - allocate special memory from the pool 264 * @pool: pool to allocate from 265 * @size: number of bytes to allocate from the pool 266 * @algo: algorithm passed from caller 267 * @data: data passed to algorithm 268 * @owner: optionally retrieve the chunk owner 269 * 270 * Allocate the requested number of bytes from the specified pool. 271 * Uses the pool allocation function (with first-fit algorithm by default). 272 * Can not be used in NMI handler on architectures without 273 * NMI-safe cmpxchg implementation. 274 */ 275 unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, 276 genpool_algo_t algo, void *data, void **owner) 277 { 278 struct gen_pool_chunk *chunk; 279 unsigned long addr = 0; 280 int order = pool->min_alloc_order; 281 unsigned long nbits, start_bit, end_bit, remain; 282 283 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 284 BUG_ON(in_nmi()); 285 #endif 286 287 if (owner) 288 *owner = NULL; 289 290 if (size == 0) 291 return 0; 292 293 nbits = (size + (1UL << order) - 1) >> order; 294 rcu_read_lock(); 295 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 296 if (size > atomic_long_read(&chunk->avail)) 297 continue; 298 299 start_bit = 0; 300 end_bit = chunk_size(chunk) >> order; 301 retry: 302 start_bit = algo(chunk->bits, end_bit, start_bit, 303 nbits, data, pool, chunk->start_addr); 304 if (start_bit >= end_bit) 305 continue; 306 remain = bitmap_set_ll(chunk->bits, start_bit, nbits); 307 if (remain) { 308 remain = bitmap_clear_ll(chunk->bits, start_bit, 309 nbits - remain); 310 BUG_ON(remain); 311 goto retry; 312 } 313 314 addr = chunk->start_addr + ((unsigned long)start_bit << order); 315 size = nbits << order; 316 atomic_long_sub(size, &chunk->avail); 317 if (owner) 318 *owner = chunk->owner; 319 break; 320 } 321 rcu_read_unlock(); 322 return addr; 323 } 324 EXPORT_SYMBOL(gen_pool_alloc_algo_owner); 325 326 /** 327 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage 328 * @pool: pool to allocate from 329 * @size: number of bytes to allocate from the pool 330 * @dma: dma-view physical address return value. Use %NULL if unneeded. 331 * 332 * Allocate the requested number of bytes from the specified pool. 333 * Uses the pool allocation function (with first-fit algorithm by default). 334 * Can not be used in NMI handler on architectures without 335 * NMI-safe cmpxchg implementation. 336 * 337 * Return: virtual address of the allocated memory, or %NULL on failure 338 */ 339 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) 340 { 341 return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data); 342 } 343 EXPORT_SYMBOL(gen_pool_dma_alloc); 344 345 /** 346 * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA 347 * usage with the given pool algorithm 348 * @pool: pool to allocate from 349 * @size: number of bytes to allocate from the pool 350 * @dma: DMA-view physical address return value. Use %NULL if unneeded. 351 * @algo: algorithm passed from caller 352 * @data: data passed to algorithm 353 * 354 * Allocate the requested number of bytes from the specified pool. Uses the 355 * given pool allocation function. Can not be used in NMI handler on 356 * architectures without NMI-safe cmpxchg implementation. 357 * 358 * Return: virtual address of the allocated memory, or %NULL on failure 359 */ 360 void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size, 361 dma_addr_t *dma, genpool_algo_t algo, void *data) 362 { 363 unsigned long vaddr; 364 365 if (!pool) 366 return NULL; 367 368 vaddr = gen_pool_alloc_algo(pool, size, algo, data); 369 if (!vaddr) 370 return NULL; 371 372 if (dma) 373 *dma = gen_pool_virt_to_phys(pool, vaddr); 374 375 return (void *)vaddr; 376 } 377 EXPORT_SYMBOL(gen_pool_dma_alloc_algo); 378 379 /** 380 * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA 381 * usage with the given alignment 382 * @pool: pool to allocate from 383 * @size: number of bytes to allocate from the pool 384 * @dma: DMA-view physical address return value. Use %NULL if unneeded. 385 * @align: alignment in bytes for starting address 386 * 387 * Allocate the requested number bytes from the specified pool, with the given 388 * alignment restriction. Can not be used in NMI handler on architectures 389 * without NMI-safe cmpxchg implementation. 390 * 391 * Return: virtual address of the allocated memory, or %NULL on failure 392 */ 393 void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size, 394 dma_addr_t *dma, int align) 395 { 396 struct genpool_data_align data = { .align = align }; 397 398 return gen_pool_dma_alloc_algo(pool, size, dma, 399 gen_pool_first_fit_align, &data); 400 } 401 EXPORT_SYMBOL(gen_pool_dma_alloc_align); 402 403 /** 404 * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for 405 * DMA usage 406 * @pool: pool to allocate from 407 * @size: number of bytes to allocate from the pool 408 * @dma: dma-view physical address return value. Use %NULL if unneeded. 409 * 410 * Allocate the requested number of zeroed bytes from the specified pool. 411 * Uses the pool allocation function (with first-fit algorithm by default). 412 * Can not be used in NMI handler on architectures without 413 * NMI-safe cmpxchg implementation. 414 * 415 * Return: virtual address of the allocated zeroed memory, or %NULL on failure 416 */ 417 void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) 418 { 419 return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data); 420 } 421 EXPORT_SYMBOL(gen_pool_dma_zalloc); 422 423 /** 424 * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for 425 * DMA usage with the given pool algorithm 426 * @pool: pool to allocate from 427 * @size: number of bytes to allocate from the pool 428 * @dma: DMA-view physical address return value. Use %NULL if unneeded. 429 * @algo: algorithm passed from caller 430 * @data: data passed to algorithm 431 * 432 * Allocate the requested number of zeroed bytes from the specified pool. Uses 433 * the given pool allocation function. Can not be used in NMI handler on 434 * architectures without NMI-safe cmpxchg implementation. 435 * 436 * Return: virtual address of the allocated zeroed memory, or %NULL on failure 437 */ 438 void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size, 439 dma_addr_t *dma, genpool_algo_t algo, void *data) 440 { 441 void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data); 442 443 if (vaddr) 444 memset(vaddr, 0, size); 445 446 return vaddr; 447 } 448 EXPORT_SYMBOL(gen_pool_dma_zalloc_algo); 449 450 /** 451 * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for 452 * DMA usage with the given alignment 453 * @pool: pool to allocate from 454 * @size: number of bytes to allocate from the pool 455 * @dma: DMA-view physical address return value. Use %NULL if unneeded. 456 * @align: alignment in bytes for starting address 457 * 458 * Allocate the requested number of zeroed bytes from the specified pool, 459 * with the given alignment restriction. Can not be used in NMI handler on 460 * architectures without NMI-safe cmpxchg implementation. 461 * 462 * Return: virtual address of the allocated zeroed memory, or %NULL on failure 463 */ 464 void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size, 465 dma_addr_t *dma, int align) 466 { 467 struct genpool_data_align data = { .align = align }; 468 469 return gen_pool_dma_zalloc_algo(pool, size, dma, 470 gen_pool_first_fit_align, &data); 471 } 472 EXPORT_SYMBOL(gen_pool_dma_zalloc_align); 473 474 /** 475 * gen_pool_free_owner - free allocated special memory back to the pool 476 * @pool: pool to free to 477 * @addr: starting address of memory to free back to pool 478 * @size: size in bytes of memory to free 479 * @owner: private data stashed at gen_pool_add() time 480 * 481 * Free previously allocated special memory back to the specified 482 * pool. Can not be used in NMI handler on architectures without 483 * NMI-safe cmpxchg implementation. 484 */ 485 void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size, 486 void **owner) 487 { 488 struct gen_pool_chunk *chunk; 489 int order = pool->min_alloc_order; 490 unsigned long start_bit, nbits, remain; 491 492 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 493 BUG_ON(in_nmi()); 494 #endif 495 496 if (owner) 497 *owner = NULL; 498 499 nbits = (size + (1UL << order) - 1) >> order; 500 rcu_read_lock(); 501 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 502 if (addr >= chunk->start_addr && addr <= chunk->end_addr) { 503 BUG_ON(addr + size - 1 > chunk->end_addr); 504 start_bit = (addr - chunk->start_addr) >> order; 505 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); 506 BUG_ON(remain); 507 size = nbits << order; 508 atomic_long_add(size, &chunk->avail); 509 if (owner) 510 *owner = chunk->owner; 511 rcu_read_unlock(); 512 return; 513 } 514 } 515 rcu_read_unlock(); 516 BUG(); 517 } 518 EXPORT_SYMBOL(gen_pool_free_owner); 519 520 /** 521 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool 522 * @pool: the generic memory pool 523 * @func: func to call 524 * @data: additional data used by @func 525 * 526 * Call @func for every chunk of generic memory pool. The @func is 527 * called with rcu_read_lock held. 528 */ 529 void gen_pool_for_each_chunk(struct gen_pool *pool, 530 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), 531 void *data) 532 { 533 struct gen_pool_chunk *chunk; 534 535 rcu_read_lock(); 536 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) 537 func(pool, chunk, data); 538 rcu_read_unlock(); 539 } 540 EXPORT_SYMBOL(gen_pool_for_each_chunk); 541 542 /** 543 * gen_pool_has_addr - checks if an address falls within the range of a pool 544 * @pool: the generic memory pool 545 * @start: start address 546 * @size: size of the region 547 * 548 * Check if the range of addresses falls within the specified pool. Returns 549 * true if the entire range is contained in the pool and false otherwise. 550 */ 551 bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start, 552 size_t size) 553 { 554 bool found = false; 555 unsigned long end = start + size - 1; 556 struct gen_pool_chunk *chunk; 557 558 rcu_read_lock(); 559 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { 560 if (start >= chunk->start_addr && start <= chunk->end_addr) { 561 if (end <= chunk->end_addr) { 562 found = true; 563 break; 564 } 565 } 566 } 567 rcu_read_unlock(); 568 return found; 569 } 570 EXPORT_SYMBOL(gen_pool_has_addr); 571 572 /** 573 * gen_pool_avail - get available free space of the pool 574 * @pool: pool to get available free space 575 * 576 * Return available free space of the specified pool. 577 */ 578 size_t gen_pool_avail(struct gen_pool *pool) 579 { 580 struct gen_pool_chunk *chunk; 581 size_t avail = 0; 582 583 rcu_read_lock(); 584 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) 585 avail += atomic_long_read(&chunk->avail); 586 rcu_read_unlock(); 587 return avail; 588 } 589 EXPORT_SYMBOL_GPL(gen_pool_avail); 590 591 /** 592 * gen_pool_size - get size in bytes of memory managed by the pool 593 * @pool: pool to get size 594 * 595 * Return size in bytes of memory managed by the pool. 596 */ 597 size_t gen_pool_size(struct gen_pool *pool) 598 { 599 struct gen_pool_chunk *chunk; 600 size_t size = 0; 601 602 rcu_read_lock(); 603 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) 604 size += chunk_size(chunk); 605 rcu_read_unlock(); 606 return size; 607 } 608 EXPORT_SYMBOL_GPL(gen_pool_size); 609 610 /** 611 * gen_pool_set_algo - set the allocation algorithm 612 * @pool: pool to change allocation algorithm 613 * @algo: custom algorithm function 614 * @data: additional data used by @algo 615 * 616 * Call @algo for each memory allocation in the pool. 617 * If @algo is NULL use gen_pool_first_fit as default 618 * memory allocation function. 619 */ 620 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) 621 { 622 rcu_read_lock(); 623 624 pool->algo = algo; 625 if (!pool->algo) 626 pool->algo = gen_pool_first_fit; 627 628 pool->data = data; 629 630 rcu_read_unlock(); 631 } 632 EXPORT_SYMBOL(gen_pool_set_algo); 633 634 /** 635 * gen_pool_first_fit - find the first available region 636 * of memory matching the size requirement (no alignment constraint) 637 * @map: The address to base the search on 638 * @size: The bitmap size in bits 639 * @start: The bitnumber to start searching at 640 * @nr: The number of zeroed bits we're looking for 641 * @data: additional data - unused 642 * @pool: pool to find the fit region memory from 643 * @start_addr: not used in this function 644 */ 645 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, 646 unsigned long start, unsigned int nr, void *data, 647 struct gen_pool *pool, unsigned long start_addr) 648 { 649 return bitmap_find_next_zero_area(map, size, start, nr, 0); 650 } 651 EXPORT_SYMBOL(gen_pool_first_fit); 652 653 /** 654 * gen_pool_first_fit_align - find the first available region 655 * of memory matching the size requirement (alignment constraint) 656 * @map: The address to base the search on 657 * @size: The bitmap size in bits 658 * @start: The bitnumber to start searching at 659 * @nr: The number of zeroed bits we're looking for 660 * @data: data for alignment 661 * @pool: pool to get order from 662 * @start_addr: start addr of alloction chunk 663 */ 664 unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, 665 unsigned long start, unsigned int nr, void *data, 666 struct gen_pool *pool, unsigned long start_addr) 667 { 668 struct genpool_data_align *alignment; 669 unsigned long align_mask, align_off; 670 int order; 671 672 alignment = data; 673 order = pool->min_alloc_order; 674 align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1; 675 align_off = (start_addr & (alignment->align - 1)) >> order; 676 677 return bitmap_find_next_zero_area_off(map, size, start, nr, 678 align_mask, align_off); 679 } 680 EXPORT_SYMBOL(gen_pool_first_fit_align); 681 682 /** 683 * gen_pool_fixed_alloc - reserve a specific region 684 * @map: The address to base the search on 685 * @size: The bitmap size in bits 686 * @start: The bitnumber to start searching at 687 * @nr: The number of zeroed bits we're looking for 688 * @data: data for alignment 689 * @pool: pool to get order from 690 * @start_addr: not used in this function 691 */ 692 unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, 693 unsigned long start, unsigned int nr, void *data, 694 struct gen_pool *pool, unsigned long start_addr) 695 { 696 struct genpool_data_fixed *fixed_data; 697 int order; 698 unsigned long offset_bit; 699 unsigned long start_bit; 700 701 fixed_data = data; 702 order = pool->min_alloc_order; 703 offset_bit = fixed_data->offset >> order; 704 if (WARN_ON(fixed_data->offset & ((1UL << order) - 1))) 705 return size; 706 707 start_bit = bitmap_find_next_zero_area(map, size, 708 start + offset_bit, nr, 0); 709 if (start_bit != offset_bit) 710 start_bit = size; 711 return start_bit; 712 } 713 EXPORT_SYMBOL(gen_pool_fixed_alloc); 714 715 /** 716 * gen_pool_first_fit_order_align - find the first available region 717 * of memory matching the size requirement. The region will be aligned 718 * to the order of the size specified. 719 * @map: The address to base the search on 720 * @size: The bitmap size in bits 721 * @start: The bitnumber to start searching at 722 * @nr: The number of zeroed bits we're looking for 723 * @data: additional data - unused 724 * @pool: pool to find the fit region memory from 725 * @start_addr: not used in this function 726 */ 727 unsigned long gen_pool_first_fit_order_align(unsigned long *map, 728 unsigned long size, unsigned long start, 729 unsigned int nr, void *data, struct gen_pool *pool, 730 unsigned long start_addr) 731 { 732 unsigned long align_mask = roundup_pow_of_two(nr) - 1; 733 734 return bitmap_find_next_zero_area(map, size, start, nr, align_mask); 735 } 736 EXPORT_SYMBOL(gen_pool_first_fit_order_align); 737 738 /** 739 * gen_pool_best_fit - find the best fitting region of memory 740 * matching the size requirement (no alignment constraint) 741 * @map: The address to base the search on 742 * @size: The bitmap size in bits 743 * @start: The bitnumber to start searching at 744 * @nr: The number of zeroed bits we're looking for 745 * @data: additional data - unused 746 * @pool: pool to find the fit region memory from 747 * @start_addr: not used in this function 748 * 749 * Iterate over the bitmap to find the smallest free region 750 * which we can allocate the memory. 751 */ 752 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, 753 unsigned long start, unsigned int nr, void *data, 754 struct gen_pool *pool, unsigned long start_addr) 755 { 756 unsigned long start_bit = size; 757 unsigned long len = size + 1; 758 unsigned long index; 759 760 index = bitmap_find_next_zero_area(map, size, start, nr, 0); 761 762 while (index < size) { 763 unsigned long next_bit = find_next_bit(map, size, index + nr); 764 if ((next_bit - index) < len) { 765 len = next_bit - index; 766 start_bit = index; 767 if (len == nr) 768 return start_bit; 769 } 770 index = bitmap_find_next_zero_area(map, size, 771 next_bit + 1, nr, 0); 772 } 773 774 return start_bit; 775 } 776 EXPORT_SYMBOL(gen_pool_best_fit); 777 778 static void devm_gen_pool_release(struct device *dev, void *res) 779 { 780 gen_pool_destroy(*(struct gen_pool **)res); 781 } 782 783 static int devm_gen_pool_match(struct device *dev, void *res, void *data) 784 { 785 struct gen_pool **p = res; 786 787 /* NULL data matches only a pool without an assigned name */ 788 if (!data && !(*p)->name) 789 return 1; 790 791 if (!data || !(*p)->name) 792 return 0; 793 794 return !strcmp((*p)->name, data); 795 } 796 797 /** 798 * gen_pool_get - Obtain the gen_pool (if any) for a device 799 * @dev: device to retrieve the gen_pool from 800 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device 801 * 802 * Returns the gen_pool for the device if one is present, or NULL. 803 */ 804 struct gen_pool *gen_pool_get(struct device *dev, const char *name) 805 { 806 struct gen_pool **p; 807 808 p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match, 809 (void *)name); 810 if (!p) 811 return NULL; 812 return *p; 813 } 814 EXPORT_SYMBOL_GPL(gen_pool_get); 815 816 /** 817 * devm_gen_pool_create - managed gen_pool_create 818 * @dev: device that provides the gen_pool 819 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents 820 * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes 821 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device 822 * 823 * Create a new special memory pool that can be used to manage special purpose 824 * memory not managed by the regular kmalloc/kfree interface. The pool will be 825 * automatically destroyed by the device management code. 826 */ 827 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, 828 int nid, const char *name) 829 { 830 struct gen_pool **ptr, *pool; 831 const char *pool_name = NULL; 832 833 /* Check that genpool to be created is uniquely addressed on device */ 834 if (gen_pool_get(dev, name)) 835 return ERR_PTR(-EINVAL); 836 837 if (name) { 838 pool_name = kstrdup_const(name, GFP_KERNEL); 839 if (!pool_name) 840 return ERR_PTR(-ENOMEM); 841 } 842 843 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); 844 if (!ptr) 845 goto free_pool_name; 846 847 pool = gen_pool_create(min_alloc_order, nid); 848 if (!pool) 849 goto free_devres; 850 851 *ptr = pool; 852 pool->name = pool_name; 853 devres_add(dev, ptr); 854 855 return pool; 856 857 free_devres: 858 devres_free(ptr); 859 free_pool_name: 860 kfree_const(pool_name); 861 862 return ERR_PTR(-ENOMEM); 863 } 864 EXPORT_SYMBOL(devm_gen_pool_create); 865 866 #ifdef CONFIG_OF 867 /** 868 * of_gen_pool_get - find a pool by phandle property 869 * @np: device node 870 * @propname: property name containing phandle(s) 871 * @index: index into the phandle array 872 * 873 * Returns the pool that contains the chunk starting at the physical 874 * address of the device tree node pointed at by the phandle property, 875 * or NULL if not found. 876 */ 877 struct gen_pool *of_gen_pool_get(struct device_node *np, 878 const char *propname, int index) 879 { 880 struct platform_device *pdev; 881 struct device_node *np_pool, *parent; 882 const char *name = NULL; 883 struct gen_pool *pool = NULL; 884 885 np_pool = of_parse_phandle(np, propname, index); 886 if (!np_pool) 887 return NULL; 888 889 pdev = of_find_device_by_node(np_pool); 890 if (!pdev) { 891 /* Check if named gen_pool is created by parent node device */ 892 parent = of_get_parent(np_pool); 893 pdev = of_find_device_by_node(parent); 894 of_node_put(parent); 895 896 of_property_read_string(np_pool, "label", &name); 897 if (!name) 898 name = np_pool->name; 899 } 900 if (pdev) 901 pool = gen_pool_get(&pdev->dev, name); 902 of_node_put(np_pool); 903 904 return pool; 905 } 906 EXPORT_SYMBOL_GPL(of_gen_pool_get); 907 #endif /* CONFIG_OF */ 908