1 /* 2 * A Remote Heap. Remote means that we don't touch the memory that the 3 * heap points to. Normal heap implementations use the memory they manage 4 * to place their list. We cannot do that because the memory we manage may 5 * have special properties, for example it is uncachable or of different 6 * endianess. 7 * 8 * Author: Pantelis Antoniou <panto@intracom.gr> 9 * 10 * 2004 (c) INTRACOM S.A. Greece. This file is licensed under 11 * the terms of the GNU General Public License version 2. This program 12 * is licensed "as is" without any warranty of any kind, whether express 13 * or implied. 14 */ 15 #include <linux/types.h> 16 #include <linux/errno.h> 17 #include <linux/kernel.h> 18 #include <linux/mm.h> 19 #include <linux/slab.h> 20 21 #include <asm/rheap.h> 22 23 /* 24 * Fixup a list_head, needed when copying lists. If the pointers fall 25 * between s and e, apply the delta. This assumes that 26 * sizeof(struct list_head *) == sizeof(unsigned long *). 27 */ 28 static inline void fixup(unsigned long s, unsigned long e, int d, 29 struct list_head *l) 30 { 31 unsigned long *pp; 32 33 pp = (unsigned long *)&l->next; 34 if (*pp >= s && *pp < e) 35 *pp += d; 36 37 pp = (unsigned long *)&l->prev; 38 if (*pp >= s && *pp < e) 39 *pp += d; 40 } 41 42 /* Grow the allocated blocks */ 43 static int grow(rh_info_t * info, int max_blocks) 44 { 45 rh_block_t *block, *blk; 46 int i, new_blocks; 47 int delta; 48 unsigned long blks, blke; 49 50 if (max_blocks <= info->max_blocks) 51 return -EINVAL; 52 53 new_blocks = max_blocks - info->max_blocks; 54 55 block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL); 56 if (block == NULL) 57 return -ENOMEM; 58 59 if (info->max_blocks > 0) { 60 61 /* copy old block area */ 62 memcpy(block, info->block, 63 sizeof(rh_block_t) * info->max_blocks); 64 65 delta = (char *)block - (char *)info->block; 66 67 /* and fixup list pointers */ 68 blks = (unsigned long)info->block; 69 blke = (unsigned long)(info->block + info->max_blocks); 70 71 for (i = 0, blk = block; i < info->max_blocks; i++, blk++) 72 fixup(blks, blke, delta, &blk->list); 73 74 fixup(blks, blke, delta, &info->empty_list); 75 fixup(blks, blke, delta, &info->free_list); 76 fixup(blks, blke, delta, &info->taken_list); 77 78 /* free the old allocated memory */ 79 if ((info->flags & RHIF_STATIC_BLOCK) == 0) 80 kfree(info->block); 81 } 82 83 info->block = block; 84 info->empty_slots += new_blocks; 85 info->max_blocks = max_blocks; 86 info->flags &= ~RHIF_STATIC_BLOCK; 87 88 /* add all new blocks to the free list */ 89 blk = block + info->max_blocks - new_blocks; 90 for (i = 0; i < new_blocks; i++, blk++) 91 list_add(&blk->list, &info->empty_list); 92 93 return 0; 94 } 95 96 /* 97 * Assure at least the required amount of empty slots. If this function 98 * causes a grow in the block area then all pointers kept to the block 99 * area are invalid! 100 */ 101 static int assure_empty(rh_info_t * info, int slots) 102 { 103 int max_blocks; 104 105 /* This function is not meant to be used to grow uncontrollably */ 106 if (slots >= 4) 107 return -EINVAL; 108 109 /* Enough space */ 110 if (info->empty_slots >= slots) 111 return 0; 112 113 /* Next 16 sized block */ 114 max_blocks = ((info->max_blocks + slots) + 15) & ~15; 115 116 return grow(info, max_blocks); 117 } 118 119 static rh_block_t *get_slot(rh_info_t * info) 120 { 121 rh_block_t *blk; 122 123 /* If no more free slots, and failure to extend. */ 124 /* XXX: You should have called assure_empty before */ 125 if (info->empty_slots == 0) { 126 printk(KERN_ERR "rh: out of slots; crash is imminent.\n"); 127 return NULL; 128 } 129 130 /* Get empty slot to use */ 131 blk = list_entry(info->empty_list.next, rh_block_t, list); 132 list_del_init(&blk->list); 133 info->empty_slots--; 134 135 /* Initialize */ 136 blk->start = 0; 137 blk->size = 0; 138 blk->owner = NULL; 139 140 return blk; 141 } 142 143 static inline void release_slot(rh_info_t * info, rh_block_t * blk) 144 { 145 list_add(&blk->list, &info->empty_list); 146 info->empty_slots++; 147 } 148 149 static void attach_free_block(rh_info_t * info, rh_block_t * blkn) 150 { 151 rh_block_t *blk; 152 rh_block_t *before; 153 rh_block_t *after; 154 rh_block_t *next; 155 int size; 156 unsigned long s, e, bs, be; 157 struct list_head *l; 158 159 /* We assume that they are aligned properly */ 160 size = blkn->size; 161 s = blkn->start; 162 e = s + size; 163 164 /* Find the blocks immediately before and after the given one 165 * (if any) */ 166 before = NULL; 167 after = NULL; 168 next = NULL; 169 170 list_for_each(l, &info->free_list) { 171 blk = list_entry(l, rh_block_t, list); 172 173 bs = blk->start; 174 be = bs + blk->size; 175 176 if (next == NULL && s >= bs) 177 next = blk; 178 179 if (be == s) 180 before = blk; 181 182 if (e == bs) 183 after = blk; 184 185 /* If both are not null, break now */ 186 if (before != NULL && after != NULL) 187 break; 188 } 189 190 /* Now check if they are really adjacent */ 191 if (before && s != (before->start + before->size)) 192 before = NULL; 193 194 if (after && e != after->start) 195 after = NULL; 196 197 /* No coalescing; list insert and return */ 198 if (before == NULL && after == NULL) { 199 200 if (next != NULL) 201 list_add(&blkn->list, &next->list); 202 else 203 list_add(&blkn->list, &info->free_list); 204 205 return; 206 } 207 208 /* We don't need it anymore */ 209 release_slot(info, blkn); 210 211 /* Grow the before block */ 212 if (before != NULL && after == NULL) { 213 before->size += size; 214 return; 215 } 216 217 /* Grow the after block backwards */ 218 if (before == NULL && after != NULL) { 219 after->start -= size; 220 after->size += size; 221 return; 222 } 223 224 /* Grow the before block, and release the after block */ 225 before->size += size + after->size; 226 list_del(&after->list); 227 release_slot(info, after); 228 } 229 230 static void attach_taken_block(rh_info_t * info, rh_block_t * blkn) 231 { 232 rh_block_t *blk; 233 struct list_head *l; 234 235 /* Find the block immediately before the given one (if any) */ 236 list_for_each(l, &info->taken_list) { 237 blk = list_entry(l, rh_block_t, list); 238 if (blk->start > blkn->start) { 239 list_add_tail(&blkn->list, &blk->list); 240 return; 241 } 242 } 243 244 list_add_tail(&blkn->list, &info->taken_list); 245 } 246 247 /* 248 * Create a remote heap dynamically. Note that no memory for the blocks 249 * are allocated. It will upon the first allocation 250 */ 251 rh_info_t *rh_create(unsigned int alignment) 252 { 253 rh_info_t *info; 254 255 /* Alignment must be a power of two */ 256 if ((alignment & (alignment - 1)) != 0) 257 return ERR_PTR(-EINVAL); 258 259 info = kmalloc(sizeof(*info), GFP_KERNEL); 260 if (info == NULL) 261 return ERR_PTR(-ENOMEM); 262 263 info->alignment = alignment; 264 265 /* Initially everything as empty */ 266 info->block = NULL; 267 info->max_blocks = 0; 268 info->empty_slots = 0; 269 info->flags = 0; 270 271 INIT_LIST_HEAD(&info->empty_list); 272 INIT_LIST_HEAD(&info->free_list); 273 INIT_LIST_HEAD(&info->taken_list); 274 275 return info; 276 } 277 278 /* 279 * Destroy a dynamically created remote heap. Deallocate only if the areas 280 * are not static 281 */ 282 void rh_destroy(rh_info_t * info) 283 { 284 if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL) 285 kfree(info->block); 286 287 if ((info->flags & RHIF_STATIC_INFO) == 0) 288 kfree(info); 289 } 290 291 /* 292 * Initialize in place a remote heap info block. This is needed to support 293 * operation very early in the startup of the kernel, when it is not yet safe 294 * to call kmalloc. 295 */ 296 void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks, 297 rh_block_t * block) 298 { 299 int i; 300 rh_block_t *blk; 301 302 /* Alignment must be a power of two */ 303 if ((alignment & (alignment - 1)) != 0) 304 return; 305 306 info->alignment = alignment; 307 308 /* Initially everything as empty */ 309 info->block = block; 310 info->max_blocks = max_blocks; 311 info->empty_slots = max_blocks; 312 info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK; 313 314 INIT_LIST_HEAD(&info->empty_list); 315 INIT_LIST_HEAD(&info->free_list); 316 INIT_LIST_HEAD(&info->taken_list); 317 318 /* Add all new blocks to the free list */ 319 for (i = 0, blk = block; i < max_blocks; i++, blk++) 320 list_add(&blk->list, &info->empty_list); 321 } 322 323 /* Attach a free memory region, coalesces regions if adjuscent */ 324 int rh_attach_region(rh_info_t * info, unsigned long start, int size) 325 { 326 rh_block_t *blk; 327 unsigned long s, e, m; 328 int r; 329 330 /* The region must be aligned */ 331 s = start; 332 e = s + size; 333 m = info->alignment - 1; 334 335 /* Round start up */ 336 s = (s + m) & ~m; 337 338 /* Round end down */ 339 e = e & ~m; 340 341 if (IS_ERR_VALUE(e) || (e < s)) 342 return -ERANGE; 343 344 /* Take final values */ 345 start = s; 346 size = e - s; 347 348 /* Grow the blocks, if needed */ 349 r = assure_empty(info, 1); 350 if (r < 0) 351 return r; 352 353 blk = get_slot(info); 354 blk->start = start; 355 blk->size = size; 356 blk->owner = NULL; 357 358 attach_free_block(info, blk); 359 360 return 0; 361 } 362 363 /* Detatch given address range, splits free block if needed. */ 364 unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size) 365 { 366 struct list_head *l; 367 rh_block_t *blk, *newblk; 368 unsigned long s, e, m, bs, be; 369 370 /* Validate size */ 371 if (size <= 0) 372 return (unsigned long) -EINVAL; 373 374 /* The region must be aligned */ 375 s = start; 376 e = s + size; 377 m = info->alignment - 1; 378 379 /* Round start up */ 380 s = (s + m) & ~m; 381 382 /* Round end down */ 383 e = e & ~m; 384 385 if (assure_empty(info, 1) < 0) 386 return (unsigned long) -ENOMEM; 387 388 blk = NULL; 389 list_for_each(l, &info->free_list) { 390 blk = list_entry(l, rh_block_t, list); 391 /* The range must lie entirely inside one free block */ 392 bs = blk->start; 393 be = blk->start + blk->size; 394 if (s >= bs && e <= be) 395 break; 396 blk = NULL; 397 } 398 399 if (blk == NULL) 400 return (unsigned long) -ENOMEM; 401 402 /* Perfect fit */ 403 if (bs == s && be == e) { 404 /* Delete from free list, release slot */ 405 list_del(&blk->list); 406 release_slot(info, blk); 407 return s; 408 } 409 410 /* blk still in free list, with updated start and/or size */ 411 if (bs == s || be == e) { 412 if (bs == s) 413 blk->start += size; 414 blk->size -= size; 415 416 } else { 417 /* The front free fragment */ 418 blk->size = s - bs; 419 420 /* the back free fragment */ 421 newblk = get_slot(info); 422 newblk->start = e; 423 newblk->size = be - e; 424 425 list_add(&newblk->list, &blk->list); 426 } 427 428 return s; 429 } 430 431 /* Allocate a block of memory at the specified alignment. The value returned 432 * is an offset into the buffer initialized by rh_init(), or a negative number 433 * if there is an error. 434 */ 435 unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner) 436 { 437 struct list_head *l; 438 rh_block_t *blk; 439 rh_block_t *newblk; 440 unsigned long start, sp_size; 441 442 /* Validate size, and alignment must be power of two */ 443 if (size <= 0 || (alignment & (alignment - 1)) != 0) 444 return (unsigned long) -EINVAL; 445 446 /* Align to configured alignment */ 447 size = (size + (info->alignment - 1)) & ~(info->alignment - 1); 448 449 if (assure_empty(info, 2) < 0) 450 return (unsigned long) -ENOMEM; 451 452 blk = NULL; 453 list_for_each(l, &info->free_list) { 454 blk = list_entry(l, rh_block_t, list); 455 if (size <= blk->size) { 456 start = (blk->start + alignment - 1) & ~(alignment - 1); 457 if (start + size <= blk->start + blk->size) 458 break; 459 } 460 blk = NULL; 461 } 462 463 if (blk == NULL) 464 return (unsigned long) -ENOMEM; 465 466 /* Just fits */ 467 if (blk->size == size) { 468 /* Move from free list to taken list */ 469 list_del(&blk->list); 470 newblk = blk; 471 } else { 472 /* Fragment caused, split if needed */ 473 /* Create block for fragment in the beginning */ 474 sp_size = start - blk->start; 475 if (sp_size) { 476 rh_block_t *spblk; 477 478 spblk = get_slot(info); 479 spblk->start = blk->start; 480 spblk->size = sp_size; 481 /* add before the blk */ 482 list_add(&spblk->list, blk->list.prev); 483 } 484 newblk = get_slot(info); 485 newblk->start = start; 486 newblk->size = size; 487 488 /* blk still in free list, with updated start and size 489 * for fragment in the end */ 490 blk->start = start + size; 491 blk->size -= sp_size + size; 492 /* No fragment in the end, remove blk */ 493 if (blk->size == 0) { 494 list_del(&blk->list); 495 release_slot(info, blk); 496 } 497 } 498 499 newblk->owner = owner; 500 attach_taken_block(info, newblk); 501 502 return start; 503 } 504 505 /* Allocate a block of memory at the default alignment. The value returned is 506 * an offset into the buffer initialized by rh_init(), or a negative number if 507 * there is an error. 508 */ 509 unsigned long rh_alloc(rh_info_t * info, int size, const char *owner) 510 { 511 return rh_alloc_align(info, size, info->alignment, owner); 512 } 513 514 /* Allocate a block of memory at the given offset, rounded up to the default 515 * alignment. The value returned is an offset into the buffer initialized by 516 * rh_init(), or a negative number if there is an error. 517 */ 518 unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner) 519 { 520 struct list_head *l; 521 rh_block_t *blk, *newblk1, *newblk2; 522 unsigned long s, e, m, bs = 0, be = 0; 523 524 /* Validate size */ 525 if (size <= 0) 526 return (unsigned long) -EINVAL; 527 528 /* The region must be aligned */ 529 s = start; 530 e = s + size; 531 m = info->alignment - 1; 532 533 /* Round start up */ 534 s = (s + m) & ~m; 535 536 /* Round end down */ 537 e = e & ~m; 538 539 if (assure_empty(info, 2) < 0) 540 return (unsigned long) -ENOMEM; 541 542 blk = NULL; 543 list_for_each(l, &info->free_list) { 544 blk = list_entry(l, rh_block_t, list); 545 /* The range must lie entirely inside one free block */ 546 bs = blk->start; 547 be = blk->start + blk->size; 548 if (s >= bs && e <= be) 549 break; 550 } 551 552 if (blk == NULL) 553 return (unsigned long) -ENOMEM; 554 555 /* Perfect fit */ 556 if (bs == s && be == e) { 557 /* Move from free list to taken list */ 558 list_del(&blk->list); 559 blk->owner = owner; 560 561 start = blk->start; 562 attach_taken_block(info, blk); 563 564 return start; 565 566 } 567 568 /* blk still in free list, with updated start and/or size */ 569 if (bs == s || be == e) { 570 if (bs == s) 571 blk->start += size; 572 blk->size -= size; 573 574 } else { 575 /* The front free fragment */ 576 blk->size = s - bs; 577 578 /* The back free fragment */ 579 newblk2 = get_slot(info); 580 newblk2->start = e; 581 newblk2->size = be - e; 582 583 list_add(&newblk2->list, &blk->list); 584 } 585 586 newblk1 = get_slot(info); 587 newblk1->start = s; 588 newblk1->size = e - s; 589 newblk1->owner = owner; 590 591 start = newblk1->start; 592 attach_taken_block(info, newblk1); 593 594 return start; 595 } 596 597 /* Deallocate the memory previously allocated by one of the rh_alloc functions. 598 * The return value is the size of the deallocated block, or a negative number 599 * if there is an error. 600 */ 601 int rh_free(rh_info_t * info, unsigned long start) 602 { 603 rh_block_t *blk, *blk2; 604 struct list_head *l; 605 int size; 606 607 /* Linear search for block */ 608 blk = NULL; 609 list_for_each(l, &info->taken_list) { 610 blk2 = list_entry(l, rh_block_t, list); 611 if (start < blk2->start) 612 break; 613 blk = blk2; 614 } 615 616 if (blk == NULL || start > (blk->start + blk->size)) 617 return -EINVAL; 618 619 /* Remove from taken list */ 620 list_del(&blk->list); 621 622 /* Get size of freed block */ 623 size = blk->size; 624 attach_free_block(info, blk); 625 626 return size; 627 } 628 629 int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats) 630 { 631 rh_block_t *blk; 632 struct list_head *l; 633 struct list_head *h; 634 int nr; 635 636 switch (what) { 637 638 case RHGS_FREE: 639 h = &info->free_list; 640 break; 641 642 case RHGS_TAKEN: 643 h = &info->taken_list; 644 break; 645 646 default: 647 return -EINVAL; 648 } 649 650 /* Linear search for block */ 651 nr = 0; 652 list_for_each(l, h) { 653 blk = list_entry(l, rh_block_t, list); 654 if (stats != NULL && nr < max_stats) { 655 stats->start = blk->start; 656 stats->size = blk->size; 657 stats->owner = blk->owner; 658 stats++; 659 } 660 nr++; 661 } 662 663 return nr; 664 } 665 666 int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner) 667 { 668 rh_block_t *blk, *blk2; 669 struct list_head *l; 670 int size; 671 672 /* Linear search for block */ 673 blk = NULL; 674 list_for_each(l, &info->taken_list) { 675 blk2 = list_entry(l, rh_block_t, list); 676 if (start < blk2->start) 677 break; 678 blk = blk2; 679 } 680 681 if (blk == NULL || start > (blk->start + blk->size)) 682 return -EINVAL; 683 684 blk->owner = owner; 685 size = blk->size; 686 687 return size; 688 } 689 690 void rh_dump(rh_info_t * info) 691 { 692 static rh_stats_t st[32]; /* XXX maximum 32 blocks */ 693 int maxnr; 694 int i, nr; 695 696 maxnr = ARRAY_SIZE(st); 697 698 printk(KERN_INFO 699 "info @0x%p (%d slots empty / %d max)\n", 700 info, info->empty_slots, info->max_blocks); 701 702 printk(KERN_INFO " Free:\n"); 703 nr = rh_get_stats(info, RHGS_FREE, maxnr, st); 704 if (nr > maxnr) 705 nr = maxnr; 706 for (i = 0; i < nr; i++) 707 printk(KERN_INFO 708 " 0x%lx-0x%lx (%u)\n", 709 st[i].start, st[i].start + st[i].size, 710 st[i].size); 711 printk(KERN_INFO "\n"); 712 713 printk(KERN_INFO " Taken:\n"); 714 nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st); 715 if (nr > maxnr) 716 nr = maxnr; 717 for (i = 0; i < nr; i++) 718 printk(KERN_INFO 719 " 0x%lx-0x%lx (%u) %s\n", 720 st[i].start, st[i].start + st[i].size, 721 st[i].size, st[i].owner != NULL ? st[i].owner : ""); 722 printk(KERN_INFO "\n"); 723 } 724 725 void rh_dump_blk(rh_info_t * info, rh_block_t * blk) 726 { 727 printk(KERN_INFO 728 "blk @0x%p: 0x%lx-0x%lx (%u)\n", 729 blk, blk->start, blk->start + blk->size, blk->size); 730 } 731