1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Basic general purpose allocator for managing special purpose 4 * memory, for example, memory that is not managed by the regular 5 * kmalloc/kfree interface. Uses for this includes on-device special 6 * memory, uncached memory etc. 7 * 8 * It is safe to use the allocator in NMI handlers and other special 9 * unblockable contexts that could otherwise deadlock on locks. This 10 * is implemented by using atomic operations and retries on any 11 * conflicts. The disadvantage is that there may be livelocks in 12 * extreme cases. For better scalability, one allocator can be used 13 * for each CPU. 14 * 15 * The lockless operation only works if there is enough memory 16 * available. If new memory is added to the pool a lock has to be 17 * still taken. So any user relying on locklessness has to ensure 18 * that sufficient memory is preallocated. 19 * 20 * The basic atomic operation of this allocator is cmpxchg on long. 21 * On architectures that don't have NMI-safe cmpxchg implementation, 22 * the allocator can NOT be used in NMI handler. So code uses the 23 * allocator in NMI handler should depend on 24 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. 25 * 26 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> 27 */ 28 29 #include <linux/slab.h> 30 #include <linux/export.h> 31 #include <linux/bitmap.h> 32 #include <linux/rculist.h> 33 #include <linux/interrupt.h> 34 #include <linux/genalloc.h> 35 #include <linux/of_device.h> 36 #include <linux/vmalloc.h> 37 38 static inline size_t chunk_size(const struct gen_pool_chunk *chunk) 39 { 40 return chunk->end_addr - chunk->start_addr + 1; 41 } 42 43 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) 44 { 45 unsigned long val, nval; 46 47 nval = *addr; 48 do { 49 val = nval; 50 if (val & mask_to_set) 51 return -EBUSY; 52 cpu_relax(); 53 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val); 54 55 return 0; 56 } 57 58 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) 59 { 60 unsigned long val, nval; 61 62 nval = *addr; 63 do { 64 val = nval; 65 if ((val & mask_to_clear) != mask_to_clear) 66 return -EBUSY; 67 cpu_relax(); 68 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val); 69 70 return 0; 71 } 72 73 /* 74 * bitmap_set_ll - set the specified number of bits at the specified position 75 * @map: pointer to a bitmap 76 * @start: a bit position in @map 77 * @nr: number of bits to set 78 * 79 * Set @nr bits start from @start in @map lock-lessly. Several users 80 * can set/clear the same bitmap simultaneously without lock. If two 81 * users set the same bit, one user will return remain bits, otherwise 82 * return 0. 83 */ 84 static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr) 85 { 86 unsigned long *p = map + BIT_WORD(start); 87 const unsigned long size = start + nr; 88 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); 89 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); 90 91 while (nr >= bits_to_set) { 92 if (set_bits_ll(p, mask_to_set)) 93 return nr; 94 nr -= bits_to_set; 95 bits_to_set = BITS_PER_LONG; 96 mask_to_set = ~0UL; 97 p++; 98 } 99 if (nr) { 100 mask_to_set &= BITMAP_LAST_WORD_MASK(size); 101 if (set_bits_ll(p, mask_to_set)) 102 return nr; 103 } 104 105 return 0; 106 } 107 108 /* 109 * bitmap_clear_ll - clear the specified number of bits at the specified position 110 * @map: pointer to a bitmap 111 * @start: a bit position in @map 112 * @nr: number of bits to set 113 * 114 * Clear @nr bits start from @start in @map lock-lessly. Several users 115 * can set/clear the same bitmap simultaneously without lock. If two 116 * users clear the same bit, one user will return remain bits, 117 * otherwise return 0. 118 */ 119 static unsigned long 120 bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr) 121 { 122 unsigned long *p = map + BIT_WORD(start); 123 const unsigned long size = start + nr; 124 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); 125 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); 126 127 while (nr >= bits_to_clear) { 128 if (clear_bits_ll(p, mask_to_clear)) 129 return nr; 130 nr -= bits_to_clear; 131 bits_to_clear = BITS_PER_LONG; 132 mask_to_clear = ~0UL; 133 p++; 134 } 135 if (nr) { 136 mask_to_clear &= BITMAP_LAST_WORD_MASK(size); 137 if (clear_bits_ll(p, mask_to_clear)) 138 return nr; 139 } 140 141 return 0; 142 } 143 144 /** 145 * gen_pool_create - create a new special memory pool 146 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents 147 * @nid: node id of the node the pool structure should be allocated on, or -1 148 * 149 * Create a new special memory pool that can be used to manage special purpose 150 * memory not managed by the regular kmalloc/kfree interface. 151 */ 152 struct gen_pool *gen_pool_create(int min_alloc_order, int nid) 153 { 154 struct gen_pool *pool; 155 156 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); 157 if (pool != NULL) { 158 spin_lock_init(&pool->lock); 159 INIT_LIST_HEAD(&pool->chunks); 160 pool->min_alloc_order = min_alloc_order; 161 pool->algo = gen_pool_first_fit; 162 pool->data = NULL; 163 pool->name = NULL; 164 } 165 return pool; 166 } 167 EXPORT_SYMBOL(gen_pool_create); 168 169 /** 170 * gen_pool_add_owner- add a new chunk of special memory to the pool 171 * @pool: pool to add new memory chunk to 172 * @virt: virtual starting address of memory chunk to add to pool 173 * @phys: physical starting address of memory chunk to add to pool 174 * @size: size in bytes of the memory chunk to add to pool 175 * @nid: node id of the node the chunk structure and bitmap should be 176 * allocated on, or -1 177 * @owner: private data the publisher would like to recall at alloc time 178 * 179 * Add a new chunk of special memory to the specified pool. 180 * 181 * Returns 0 on success or a -ve errno on failure. 182 */ 183 int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, 184 size_t size, int nid, void *owner) 185 { 186 struct gen_pool_chunk *chunk; 187 unsigned long nbits = size >> pool->min_alloc_order; 188 unsigned long nbytes = sizeof(struct gen_pool_chunk) + 189 BITS_TO_LONGS(nbits) * sizeof(long); 190 191 chunk = vzalloc_node(nbytes, nid); 192 if (unlikely(chunk == NULL)) 193 return -ENOMEM; 194 195 chunk->phys_addr = phys; 196 chunk->start_addr = virt; 197 chunk->end_addr = virt + size - 1; 198 chunk->owner = owner; 199 atomic_long_set(&chunk->avail, size); 200 201 spin_lock(&pool->lock); 202 list_add_rcu(&chunk->next_chunk, &pool->chunks); 203 spin_unlock(&pool->lock); 204 205 return 0; 206 } 207 EXPORT_SYMBOL(gen_pool_add_owner); 208 209 /** 210 * gen_pool_virt_to_phys - return the physical address of memory 211 * @pool: pool to allocate from 212 * @addr: starting address of memory 213 * 214 * Returns the physical address on success, or -1 on error. 215 */ 216 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) 217 { 218 struct gen_pool_chunk *chunk; 219 phys_addr_t paddr = -1; 220 221 rcu_read_lock(); 222 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 223 if (addr >= chunk->start_addr && addr <= chunk->end_addr) { 224 paddr = chunk->phys_addr + (addr - chunk->start_addr); 225 break; 226 } 227 } 228 rcu_read_unlock(); 229 230 return paddr; 231 } 232 EXPORT_SYMBOL(gen_pool_virt_to_phys); 233 234 /** 235 * gen_pool_destroy - destroy a special memory pool 236 * @pool: pool to destroy 237 * 238 * Destroy the specified special memory pool. Verifies that there are no 239 * outstanding allocations. 240 */ 241 void gen_pool_destroy(struct gen_pool *pool) 242 { 243 struct list_head *_chunk, *_next_chunk; 244 struct gen_pool_chunk *chunk; 245 int order = pool->min_alloc_order; 246 unsigned long bit, end_bit; 247 248 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { 249 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 250 list_del(&chunk->next_chunk); 251 252 end_bit = chunk_size(chunk) >> order; 253 bit = find_next_bit(chunk->bits, end_bit, 0); 254 BUG_ON(bit < end_bit); 255 256 vfree(chunk); 257 } 258 kfree_const(pool->name); 259 kfree(pool); 260 } 261 EXPORT_SYMBOL(gen_pool_destroy); 262 263 /** 264 * gen_pool_alloc_algo_owner - allocate special memory from the pool 265 * @pool: pool to allocate from 266 * @size: number of bytes to allocate from the pool 267 * @algo: algorithm passed from caller 268 * @data: data passed to algorithm 269 * @owner: optionally retrieve the chunk owner 270 * 271 * Allocate the requested number of bytes from the specified pool. 272 * Uses the pool allocation function (with first-fit algorithm by default). 273 * Can not be used in NMI handler on architectures without 274 * NMI-safe cmpxchg implementation. 275 */ 276 unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, 277 genpool_algo_t algo, void *data, void **owner) 278 { 279 struct gen_pool_chunk *chunk; 280 unsigned long addr = 0; 281 int order = pool->min_alloc_order; 282 unsigned long nbits, start_bit, end_bit, remain; 283 284 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 285 BUG_ON(in_nmi()); 286 #endif 287 288 if (owner) 289 *owner = NULL; 290 291 if (size == 0) 292 return 0; 293 294 nbits = (size + (1UL << order) - 1) >> order; 295 rcu_read_lock(); 296 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 297 if (size > atomic_long_read(&chunk->avail)) 298 continue; 299 300 start_bit = 0; 301 end_bit = chunk_size(chunk) >> order; 302 retry: 303 start_bit = algo(chunk->bits, end_bit, start_bit, 304 nbits, data, pool, chunk->start_addr); 305 if (start_bit >= end_bit) 306 continue; 307 remain = bitmap_set_ll(chunk->bits, start_bit, nbits); 308 if (remain) { 309 remain = bitmap_clear_ll(chunk->bits, start_bit, 310 nbits - remain); 311 BUG_ON(remain); 312 goto retry; 313 } 314 315 addr = chunk->start_addr + ((unsigned long)start_bit << order); 316 size = nbits << order; 317 atomic_long_sub(size, &chunk->avail); 318 if (owner) 319 *owner = chunk->owner; 320 break; 321 } 322 rcu_read_unlock(); 323 return addr; 324 } 325 EXPORT_SYMBOL(gen_pool_alloc_algo_owner); 326 327 /** 328 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage 329 * @pool: pool to allocate from 330 * @size: number of bytes to allocate from the pool 331 * @dma: dma-view physical address return value. Use %NULL if unneeded. 332 * 333 * Allocate the requested number of bytes from the specified pool. 334 * Uses the pool allocation function (with first-fit algorithm by default). 335 * Can not be used in NMI handler on architectures without 336 * NMI-safe cmpxchg implementation. 337 * 338 * Return: virtual address of the allocated memory, or %NULL on failure 339 */ 340 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) 341 { 342 return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data); 343 } 344 EXPORT_SYMBOL(gen_pool_dma_alloc); 345 346 /** 347 * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA 348 * usage with the given pool algorithm 349 * @pool: pool to allocate from 350 * @size: number of bytes to allocate from the pool 351 * @dma: DMA-view physical address return value. Use %NULL if unneeded. 352 * @algo: algorithm passed from caller 353 * @data: data passed to algorithm 354 * 355 * Allocate the requested number of bytes from the specified pool. Uses the 356 * given pool allocation function. Can not be used in NMI handler on 357 * architectures without NMI-safe cmpxchg implementation. 358 * 359 * Return: virtual address of the allocated memory, or %NULL on failure 360 */ 361 void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size, 362 dma_addr_t *dma, genpool_algo_t algo, void *data) 363 { 364 unsigned long vaddr; 365 366 if (!pool) 367 return NULL; 368 369 vaddr = gen_pool_alloc_algo(pool, size, algo, data); 370 if (!vaddr) 371 return NULL; 372 373 if (dma) 374 *dma = gen_pool_virt_to_phys(pool, vaddr); 375 376 return (void *)vaddr; 377 } 378 EXPORT_SYMBOL(gen_pool_dma_alloc_algo); 379 380 /** 381 * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA 382 * usage with the given alignment 383 * @pool: pool to allocate from 384 * @size: number of bytes to allocate from the pool 385 * @dma: DMA-view physical address return value. Use %NULL if unneeded. 386 * @align: alignment in bytes for starting address 387 * 388 * Allocate the requested number bytes from the specified pool, with the given 389 * alignment restriction. Can not be used in NMI handler on architectures 390 * without NMI-safe cmpxchg implementation. 391 * 392 * Return: virtual address of the allocated memory, or %NULL on failure 393 */ 394 void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size, 395 dma_addr_t *dma, int align) 396 { 397 struct genpool_data_align data = { .align = align }; 398 399 return gen_pool_dma_alloc_algo(pool, size, dma, 400 gen_pool_first_fit_align, &data); 401 } 402 EXPORT_SYMBOL(gen_pool_dma_alloc_align); 403 404 /** 405 * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for 406 * DMA usage 407 * @pool: pool to allocate from 408 * @size: number of bytes to allocate from the pool 409 * @dma: dma-view physical address return value. Use %NULL if unneeded. 410 * 411 * Allocate the requested number of zeroed bytes from the specified pool. 412 * Uses the pool allocation function (with first-fit algorithm by default). 413 * Can not be used in NMI handler on architectures without 414 * NMI-safe cmpxchg implementation. 415 * 416 * Return: virtual address of the allocated zeroed memory, or %NULL on failure 417 */ 418 void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) 419 { 420 return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data); 421 } 422 EXPORT_SYMBOL(gen_pool_dma_zalloc); 423 424 /** 425 * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for 426 * DMA usage with the given pool algorithm 427 * @pool: pool to allocate from 428 * @size: number of bytes to allocate from the pool 429 * @dma: DMA-view physical address return value. Use %NULL if unneeded. 430 * @algo: algorithm passed from caller 431 * @data: data passed to algorithm 432 * 433 * Allocate the requested number of zeroed bytes from the specified pool. Uses 434 * the given pool allocation function. Can not be used in NMI handler on 435 * architectures without NMI-safe cmpxchg implementation. 436 * 437 * Return: virtual address of the allocated zeroed memory, or %NULL on failure 438 */ 439 void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size, 440 dma_addr_t *dma, genpool_algo_t algo, void *data) 441 { 442 void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data); 443 444 if (vaddr) 445 memset(vaddr, 0, size); 446 447 return vaddr; 448 } 449 EXPORT_SYMBOL(gen_pool_dma_zalloc_algo); 450 451 /** 452 * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for 453 * DMA usage with the given alignment 454 * @pool: pool to allocate from 455 * @size: number of bytes to allocate from the pool 456 * @dma: DMA-view physical address return value. Use %NULL if unneeded. 457 * @align: alignment in bytes for starting address 458 * 459 * Allocate the requested number of zeroed bytes from the specified pool, 460 * with the given alignment restriction. Can not be used in NMI handler on 461 * architectures without NMI-safe cmpxchg implementation. 462 * 463 * Return: virtual address of the allocated zeroed memory, or %NULL on failure 464 */ 465 void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size, 466 dma_addr_t *dma, int align) 467 { 468 struct genpool_data_align data = { .align = align }; 469 470 return gen_pool_dma_zalloc_algo(pool, size, dma, 471 gen_pool_first_fit_align, &data); 472 } 473 EXPORT_SYMBOL(gen_pool_dma_zalloc_align); 474 475 /** 476 * gen_pool_free_owner - free allocated special memory back to the pool 477 * @pool: pool to free to 478 * @addr: starting address of memory to free back to pool 479 * @size: size in bytes of memory to free 480 * @owner: private data stashed at gen_pool_add() time 481 * 482 * Free previously allocated special memory back to the specified 483 * pool. Can not be used in NMI handler on architectures without 484 * NMI-safe cmpxchg implementation. 485 */ 486 void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size, 487 void **owner) 488 { 489 struct gen_pool_chunk *chunk; 490 int order = pool->min_alloc_order; 491 unsigned long start_bit, nbits, remain; 492 493 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 494 BUG_ON(in_nmi()); 495 #endif 496 497 if (owner) 498 *owner = NULL; 499 500 nbits = (size + (1UL << order) - 1) >> order; 501 rcu_read_lock(); 502 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 503 if (addr >= chunk->start_addr && addr <= chunk->end_addr) { 504 BUG_ON(addr + size - 1 > chunk->end_addr); 505 start_bit = (addr - chunk->start_addr) >> order; 506 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); 507 BUG_ON(remain); 508 size = nbits << order; 509 atomic_long_add(size, &chunk->avail); 510 if (owner) 511 *owner = chunk->owner; 512 rcu_read_unlock(); 513 return; 514 } 515 } 516 rcu_read_unlock(); 517 BUG(); 518 } 519 EXPORT_SYMBOL(gen_pool_free_owner); 520 521 /** 522 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool 523 * @pool: the generic memory pool 524 * @func: func to call 525 * @data: additional data used by @func 526 * 527 * Call @func for every chunk of generic memory pool. The @func is 528 * called with rcu_read_lock held. 529 */ 530 void gen_pool_for_each_chunk(struct gen_pool *pool, 531 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), 532 void *data) 533 { 534 struct gen_pool_chunk *chunk; 535 536 rcu_read_lock(); 537 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) 538 func(pool, chunk, data); 539 rcu_read_unlock(); 540 } 541 EXPORT_SYMBOL(gen_pool_for_each_chunk); 542 543 /** 544 * gen_pool_has_addr - checks if an address falls within the range of a pool 545 * @pool: the generic memory pool 546 * @start: start address 547 * @size: size of the region 548 * 549 * Check if the range of addresses falls within the specified pool. Returns 550 * true if the entire range is contained in the pool and false otherwise. 551 */ 552 bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start, 553 size_t size) 554 { 555 bool found = false; 556 unsigned long end = start + size - 1; 557 struct gen_pool_chunk *chunk; 558 559 rcu_read_lock(); 560 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { 561 if (start >= chunk->start_addr && start <= chunk->end_addr) { 562 if (end <= chunk->end_addr) { 563 found = true; 564 break; 565 } 566 } 567 } 568 rcu_read_unlock(); 569 return found; 570 } 571 EXPORT_SYMBOL(gen_pool_has_addr); 572 573 /** 574 * gen_pool_avail - get available free space of the pool 575 * @pool: pool to get available free space 576 * 577 * Return available free space of the specified pool. 578 */ 579 size_t gen_pool_avail(struct gen_pool *pool) 580 { 581 struct gen_pool_chunk *chunk; 582 size_t avail = 0; 583 584 rcu_read_lock(); 585 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) 586 avail += atomic_long_read(&chunk->avail); 587 rcu_read_unlock(); 588 return avail; 589 } 590 EXPORT_SYMBOL_GPL(gen_pool_avail); 591 592 /** 593 * gen_pool_size - get size in bytes of memory managed by the pool 594 * @pool: pool to get size 595 * 596 * Return size in bytes of memory managed by the pool. 597 */ 598 size_t gen_pool_size(struct gen_pool *pool) 599 { 600 struct gen_pool_chunk *chunk; 601 size_t size = 0; 602 603 rcu_read_lock(); 604 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) 605 size += chunk_size(chunk); 606 rcu_read_unlock(); 607 return size; 608 } 609 EXPORT_SYMBOL_GPL(gen_pool_size); 610 611 /** 612 * gen_pool_set_algo - set the allocation algorithm 613 * @pool: pool to change allocation algorithm 614 * @algo: custom algorithm function 615 * @data: additional data used by @algo 616 * 617 * Call @algo for each memory allocation in the pool. 618 * If @algo is NULL use gen_pool_first_fit as default 619 * memory allocation function. 620 */ 621 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) 622 { 623 rcu_read_lock(); 624 625 pool->algo = algo; 626 if (!pool->algo) 627 pool->algo = gen_pool_first_fit; 628 629 pool->data = data; 630 631 rcu_read_unlock(); 632 } 633 EXPORT_SYMBOL(gen_pool_set_algo); 634 635 /** 636 * gen_pool_first_fit - find the first available region 637 * of memory matching the size requirement (no alignment constraint) 638 * @map: The address to base the search on 639 * @size: The bitmap size in bits 640 * @start: The bitnumber to start searching at 641 * @nr: The number of zeroed bits we're looking for 642 * @data: additional data - unused 643 * @pool: pool to find the fit region memory from 644 */ 645 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, 646 unsigned long start, unsigned int nr, void *data, 647 struct gen_pool *pool, unsigned long start_addr) 648 { 649 return bitmap_find_next_zero_area(map, size, start, nr, 0); 650 } 651 EXPORT_SYMBOL(gen_pool_first_fit); 652 653 /** 654 * gen_pool_first_fit_align - find the first available region 655 * of memory matching the size requirement (alignment constraint) 656 * @map: The address to base the search on 657 * @size: The bitmap size in bits 658 * @start: The bitnumber to start searching at 659 * @nr: The number of zeroed bits we're looking for 660 * @data: data for alignment 661 * @pool: pool to get order from 662 */ 663 unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, 664 unsigned long start, unsigned int nr, void *data, 665 struct gen_pool *pool, unsigned long start_addr) 666 { 667 struct genpool_data_align *alignment; 668 unsigned long align_mask, align_off; 669 int order; 670 671 alignment = data; 672 order = pool->min_alloc_order; 673 align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1; 674 align_off = (start_addr & (alignment->align - 1)) >> order; 675 676 return bitmap_find_next_zero_area_off(map, size, start, nr, 677 align_mask, align_off); 678 } 679 EXPORT_SYMBOL(gen_pool_first_fit_align); 680 681 /** 682 * gen_pool_fixed_alloc - reserve a specific region 683 * @map: The address to base the search on 684 * @size: The bitmap size in bits 685 * @start: The bitnumber to start searching at 686 * @nr: The number of zeroed bits we're looking for 687 * @data: data for alignment 688 * @pool: pool to get order from 689 */ 690 unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, 691 unsigned long start, unsigned int nr, void *data, 692 struct gen_pool *pool, unsigned long start_addr) 693 { 694 struct genpool_data_fixed *fixed_data; 695 int order; 696 unsigned long offset_bit; 697 unsigned long start_bit; 698 699 fixed_data = data; 700 order = pool->min_alloc_order; 701 offset_bit = fixed_data->offset >> order; 702 if (WARN_ON(fixed_data->offset & ((1UL << order) - 1))) 703 return size; 704 705 start_bit = bitmap_find_next_zero_area(map, size, 706 start + offset_bit, nr, 0); 707 if (start_bit != offset_bit) 708 start_bit = size; 709 return start_bit; 710 } 711 EXPORT_SYMBOL(gen_pool_fixed_alloc); 712 713 /** 714 * gen_pool_first_fit_order_align - find the first available region 715 * of memory matching the size requirement. The region will be aligned 716 * to the order of the size specified. 717 * @map: The address to base the search on 718 * @size: The bitmap size in bits 719 * @start: The bitnumber to start searching at 720 * @nr: The number of zeroed bits we're looking for 721 * @data: additional data - unused 722 * @pool: pool to find the fit region memory from 723 */ 724 unsigned long gen_pool_first_fit_order_align(unsigned long *map, 725 unsigned long size, unsigned long start, 726 unsigned int nr, void *data, struct gen_pool *pool, 727 unsigned long start_addr) 728 { 729 unsigned long align_mask = roundup_pow_of_two(nr) - 1; 730 731 return bitmap_find_next_zero_area(map, size, start, nr, align_mask); 732 } 733 EXPORT_SYMBOL(gen_pool_first_fit_order_align); 734 735 /** 736 * gen_pool_best_fit - find the best fitting region of memory 737 * macthing the size requirement (no alignment constraint) 738 * @map: The address to base the search on 739 * @size: The bitmap size in bits 740 * @start: The bitnumber to start searching at 741 * @nr: The number of zeroed bits we're looking for 742 * @data: additional data - unused 743 * @pool: pool to find the fit region memory from 744 * 745 * Iterate over the bitmap to find the smallest free region 746 * which we can allocate the memory. 747 */ 748 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, 749 unsigned long start, unsigned int nr, void *data, 750 struct gen_pool *pool, unsigned long start_addr) 751 { 752 unsigned long start_bit = size; 753 unsigned long len = size + 1; 754 unsigned long index; 755 756 index = bitmap_find_next_zero_area(map, size, start, nr, 0); 757 758 while (index < size) { 759 unsigned long next_bit = find_next_bit(map, size, index + nr); 760 if ((next_bit - index) < len) { 761 len = next_bit - index; 762 start_bit = index; 763 if (len == nr) 764 return start_bit; 765 } 766 index = bitmap_find_next_zero_area(map, size, 767 next_bit + 1, nr, 0); 768 } 769 770 return start_bit; 771 } 772 EXPORT_SYMBOL(gen_pool_best_fit); 773 774 static void devm_gen_pool_release(struct device *dev, void *res) 775 { 776 gen_pool_destroy(*(struct gen_pool **)res); 777 } 778 779 static int devm_gen_pool_match(struct device *dev, void *res, void *data) 780 { 781 struct gen_pool **p = res; 782 783 /* NULL data matches only a pool without an assigned name */ 784 if (!data && !(*p)->name) 785 return 1; 786 787 if (!data || !(*p)->name) 788 return 0; 789 790 return !strcmp((*p)->name, data); 791 } 792 793 /** 794 * gen_pool_get - Obtain the gen_pool (if any) for a device 795 * @dev: device to retrieve the gen_pool from 796 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device 797 * 798 * Returns the gen_pool for the device if one is present, or NULL. 799 */ 800 struct gen_pool *gen_pool_get(struct device *dev, const char *name) 801 { 802 struct gen_pool **p; 803 804 p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match, 805 (void *)name); 806 if (!p) 807 return NULL; 808 return *p; 809 } 810 EXPORT_SYMBOL_GPL(gen_pool_get); 811 812 /** 813 * devm_gen_pool_create - managed gen_pool_create 814 * @dev: device that provides the gen_pool 815 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents 816 * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes 817 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device 818 * 819 * Create a new special memory pool that can be used to manage special purpose 820 * memory not managed by the regular kmalloc/kfree interface. The pool will be 821 * automatically destroyed by the device management code. 822 */ 823 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, 824 int nid, const char *name) 825 { 826 struct gen_pool **ptr, *pool; 827 const char *pool_name = NULL; 828 829 /* Check that genpool to be created is uniquely addressed on device */ 830 if (gen_pool_get(dev, name)) 831 return ERR_PTR(-EINVAL); 832 833 if (name) { 834 pool_name = kstrdup_const(name, GFP_KERNEL); 835 if (!pool_name) 836 return ERR_PTR(-ENOMEM); 837 } 838 839 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); 840 if (!ptr) 841 goto free_pool_name; 842 843 pool = gen_pool_create(min_alloc_order, nid); 844 if (!pool) 845 goto free_devres; 846 847 *ptr = pool; 848 pool->name = pool_name; 849 devres_add(dev, ptr); 850 851 return pool; 852 853 free_devres: 854 devres_free(ptr); 855 free_pool_name: 856 kfree_const(pool_name); 857 858 return ERR_PTR(-ENOMEM); 859 } 860 EXPORT_SYMBOL(devm_gen_pool_create); 861 862 #ifdef CONFIG_OF 863 /** 864 * of_gen_pool_get - find a pool by phandle property 865 * @np: device node 866 * @propname: property name containing phandle(s) 867 * @index: index into the phandle array 868 * 869 * Returns the pool that contains the chunk starting at the physical 870 * address of the device tree node pointed at by the phandle property, 871 * or NULL if not found. 872 */ 873 struct gen_pool *of_gen_pool_get(struct device_node *np, 874 const char *propname, int index) 875 { 876 struct platform_device *pdev; 877 struct device_node *np_pool, *parent; 878 const char *name = NULL; 879 struct gen_pool *pool = NULL; 880 881 np_pool = of_parse_phandle(np, propname, index); 882 if (!np_pool) 883 return NULL; 884 885 pdev = of_find_device_by_node(np_pool); 886 if (!pdev) { 887 /* Check if named gen_pool is created by parent node device */ 888 parent = of_get_parent(np_pool); 889 pdev = of_find_device_by_node(parent); 890 of_node_put(parent); 891 892 of_property_read_string(np_pool, "label", &name); 893 if (!name) 894 name = np_pool->name; 895 } 896 if (pdev) 897 pool = gen_pool_get(&pdev->dev, name); 898 of_node_put(np_pool); 899 900 return pool; 901 } 902 EXPORT_SYMBOL_GPL(of_gen_pool_get); 903 #endif /* CONFIG_OF */ 904