1 /***********************license start*************** 2 * Author: Cavium Networks 3 * 4 * Contact: support@caviumnetworks.com 5 * This file is part of the OCTEON SDK 6 * 7 * Copyright (c) 2003-2008 Cavium Networks 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more 17 * details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this file; if not, write to the Free Software 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 * or visit http://www.gnu.org/licenses/. 23 * 24 * This file may also be available under a different license from Cavium. 25 * Contact Cavium Networks for more information 26 ***********************license end**************************************/ 27 28 /* 29 * Simple allocate only memory allocator. Used to allocate memory at 30 * application start time. 31 */ 32 33 #include <linux/kernel.h> 34 35 #include <asm/octeon/cvmx.h> 36 #include <asm/octeon/cvmx-spinlock.h> 37 #include <asm/octeon/cvmx-bootmem.h> 38 39 /*#define DEBUG */ 40 41 42 static struct cvmx_bootmem_desc *cvmx_bootmem_desc; 43 44 /* See header file for descriptions of functions */ 45 46 /* 47 * Wrapper functions are provided for reading/writing the size and 48 * next block values as these may not be directly addressible (in 32 49 * bit applications, for instance.) Offsets of data elements in 50 * bootmem list, must match cvmx_bootmem_block_header_t. 51 */ 52 #define NEXT_OFFSET 0 53 #define SIZE_OFFSET 8 54 55 static void cvmx_bootmem_phy_set_size(uint64_t addr, uint64_t size) 56 { 57 cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size); 58 } 59 60 static void cvmx_bootmem_phy_set_next(uint64_t addr, uint64_t next) 61 { 62 cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next); 63 } 64 65 static uint64_t cvmx_bootmem_phy_get_size(uint64_t addr) 66 { 67 return cvmx_read64_uint64((addr + SIZE_OFFSET) | (1ull << 63)); 68 } 69 70 static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr) 71 { 72 return cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63)); 73 } 74 75 void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, 76 uint64_t min_addr, uint64_t max_addr) 77 { 78 int64_t address; 79 address = 80 cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, 0); 81 82 if (address > 0) 83 return cvmx_phys_to_ptr(address); 84 else 85 return NULL; 86 } 87 88 void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address, 89 uint64_t alignment) 90 { 91 return cvmx_bootmem_alloc_range(size, alignment, address, 92 address + size); 93 } 94 95 void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment) 96 { 97 return cvmx_bootmem_alloc_range(size, alignment, 0, 0); 98 } 99 100 int cvmx_bootmem_free_named(char *name) 101 { 102 return cvmx_bootmem_phy_named_block_free(name, 0); 103 } 104 105 struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name) 106 { 107 return cvmx_bootmem_phy_named_block_find(name, 0); 108 } 109 110 void cvmx_bootmem_lock(void) 111 { 112 cvmx_spinlock_lock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock)); 113 } 114 115 void cvmx_bootmem_unlock(void) 116 { 117 cvmx_spinlock_unlock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock)); 118 } 119 120 int cvmx_bootmem_init(void *mem_desc_ptr) 121 { 122 /* Here we set the global pointer to the bootmem descriptor 123 * block. This pointer will be used directly, so we will set 124 * it up to be directly usable by the application. It is set 125 * up as follows for the various runtime/ABI combinations: 126 * 127 * Linux 64 bit: Set XKPHYS bit 128 * Linux 32 bit: use mmap to create mapping, use virtual address 129 * CVMX 64 bit: use physical address directly 130 * CVMX 32 bit: use physical address directly 131 * 132 * Note that the CVMX environment assumes the use of 1-1 TLB 133 * mappings so that the physical addresses can be used 134 * directly 135 */ 136 if (!cvmx_bootmem_desc) { 137 #if defined(CVMX_ABI_64) 138 /* Set XKPHYS bit */ 139 cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr)); 140 #else 141 cvmx_bootmem_desc = (struct cvmx_bootmem_desc *) mem_desc_ptr; 142 #endif 143 } 144 145 return 0; 146 } 147 148 /* 149 * The cvmx_bootmem_phy* functions below return 64 bit physical 150 * addresses, and expose more features that the cvmx_bootmem_functions 151 * above. These are required for full memory space access in 32 bit 152 * applications, as well as for using some advance features. Most 153 * applications should not need to use these. 154 */ 155 156 int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, 157 uint64_t address_max, uint64_t alignment, 158 uint32_t flags) 159 { 160 161 uint64_t head_addr; 162 uint64_t ent_addr; 163 /* points to previous list entry, NULL current entry is head of list */ 164 uint64_t prev_addr = 0; 165 uint64_t new_ent_addr = 0; 166 uint64_t desired_min_addr; 167 168 #ifdef DEBUG 169 cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, " 170 "min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n", 171 (unsigned long long)req_size, 172 (unsigned long long)address_min, 173 (unsigned long long)address_max, 174 (unsigned long long)alignment); 175 #endif 176 177 if (cvmx_bootmem_desc->major_version > 3) { 178 cvmx_dprintf("ERROR: Incompatible bootmem descriptor " 179 "version: %d.%d at addr: %p\n", 180 (int)cvmx_bootmem_desc->major_version, 181 (int)cvmx_bootmem_desc->minor_version, 182 cvmx_bootmem_desc); 183 goto error_out; 184 } 185 186 /* 187 * Do a variety of checks to validate the arguments. The 188 * allocator code will later assume that these checks have 189 * been made. We validate that the requested constraints are 190 * not self-contradictory before we look through the list of 191 * available memory. 192 */ 193 194 /* 0 is not a valid req_size for this allocator */ 195 if (!req_size) 196 goto error_out; 197 198 /* Round req_size up to mult of minimum alignment bytes */ 199 req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) & 200 ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1); 201 202 /* 203 * Convert !0 address_min and 0 address_max to special case of 204 * range that specifies an exact memory block to allocate. Do 205 * this before other checks and adjustments so that this 206 * tranformation will be validated. 207 */ 208 if (address_min && !address_max) 209 address_max = address_min + req_size; 210 else if (!address_min && !address_max) 211 address_max = ~0ull; /* If no limits given, use max limits */ 212 213 214 /* 215 * Enforce minimum alignment (this also keeps the minimum free block 216 * req_size the same as the alignment req_size. 217 */ 218 if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE) 219 alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE; 220 221 /* 222 * Adjust address minimum based on requested alignment (round 223 * up to meet alignment). Do this here so we can reject 224 * impossible requests up front. (NOP for address_min == 0) 225 */ 226 if (alignment) 227 address_min = __ALIGN_MASK(address_min, (alignment - 1)); 228 229 /* 230 * Reject inconsistent args. We have adjusted these, so this 231 * may fail due to our internal changes even if this check 232 * would pass for the values the user supplied. 233 */ 234 if (req_size > address_max - address_min) 235 goto error_out; 236 237 /* Walk through the list entries - first fit found is returned */ 238 239 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) 240 cvmx_bootmem_lock(); 241 head_addr = cvmx_bootmem_desc->head_addr; 242 ent_addr = head_addr; 243 for (; ent_addr; 244 prev_addr = ent_addr, 245 ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) { 246 uint64_t usable_base, usable_max; 247 uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr); 248 249 if (cvmx_bootmem_phy_get_next(ent_addr) 250 && ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) { 251 cvmx_dprintf("Internal bootmem_alloc() error: ent: " 252 "0x%llx, next: 0x%llx\n", 253 (unsigned long long)ent_addr, 254 (unsigned long long) 255 cvmx_bootmem_phy_get_next(ent_addr)); 256 goto error_out; 257 } 258 259 /* 260 * Determine if this is an entry that can satisify the 261 * request Check to make sure entry is large enough to 262 * satisfy request. 263 */ 264 usable_base = 265 __ALIGN_MASK(max(address_min, ent_addr), alignment - 1); 266 usable_max = min(address_max, ent_addr + ent_size); 267 /* 268 * We should be able to allocate block at address 269 * usable_base. 270 */ 271 272 desired_min_addr = usable_base; 273 /* 274 * Determine if request can be satisfied from the 275 * current entry. 276 */ 277 if (!((ent_addr + ent_size) > usable_base 278 && ent_addr < address_max 279 && req_size <= usable_max - usable_base)) 280 continue; 281 /* 282 * We have found an entry that has room to satisfy the 283 * request, so allocate it from this entry. If end 284 * CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from 285 * the end of this block rather than the beginning. 286 */ 287 if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC) { 288 desired_min_addr = usable_max - req_size; 289 /* 290 * Align desired address down to required 291 * alignment. 292 */ 293 desired_min_addr &= ~(alignment - 1); 294 } 295 296 /* Match at start of entry */ 297 if (desired_min_addr == ent_addr) { 298 if (req_size < ent_size) { 299 /* 300 * big enough to create a new block 301 * from top portion of block. 302 */ 303 new_ent_addr = ent_addr + req_size; 304 cvmx_bootmem_phy_set_next(new_ent_addr, 305 cvmx_bootmem_phy_get_next(ent_addr)); 306 cvmx_bootmem_phy_set_size(new_ent_addr, 307 ent_size - 308 req_size); 309 310 /* 311 * Adjust next pointer as following 312 * code uses this. 313 */ 314 cvmx_bootmem_phy_set_next(ent_addr, 315 new_ent_addr); 316 } 317 318 /* 319 * adjust prev ptr or head to remove this 320 * entry from list. 321 */ 322 if (prev_addr) 323 cvmx_bootmem_phy_set_next(prev_addr, 324 cvmx_bootmem_phy_get_next(ent_addr)); 325 else 326 /* 327 * head of list being returned, so 328 * update head ptr. 329 */ 330 cvmx_bootmem_desc->head_addr = 331 cvmx_bootmem_phy_get_next(ent_addr); 332 333 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) 334 cvmx_bootmem_unlock(); 335 return desired_min_addr; 336 } 337 /* 338 * block returned doesn't start at beginning of entry, 339 * so we know that we will be splitting a block off 340 * the front of this one. Create a new block from the 341 * beginning, add to list, and go to top of loop 342 * again. 343 * 344 * create new block from high portion of 345 * block, so that top block starts at desired 346 * addr. 347 */ 348 new_ent_addr = desired_min_addr; 349 cvmx_bootmem_phy_set_next(new_ent_addr, 350 cvmx_bootmem_phy_get_next 351 (ent_addr)); 352 cvmx_bootmem_phy_set_size(new_ent_addr, 353 cvmx_bootmem_phy_get_size 354 (ent_addr) - 355 (desired_min_addr - 356 ent_addr)); 357 cvmx_bootmem_phy_set_size(ent_addr, 358 desired_min_addr - ent_addr); 359 cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr); 360 /* Loop again to handle actual alloc from new block */ 361 } 362 error_out: 363 /* We didn't find anything, so return error */ 364 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) 365 cvmx_bootmem_unlock(); 366 return -1; 367 } 368 369 int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags) 370 { 371 uint64_t cur_addr; 372 uint64_t prev_addr = 0; /* zero is invalid */ 373 int retval = 0; 374 375 #ifdef DEBUG 376 cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n", 377 (unsigned long long)phy_addr, (unsigned long long)size); 378 #endif 379 if (cvmx_bootmem_desc->major_version > 3) { 380 cvmx_dprintf("ERROR: Incompatible bootmem descriptor " 381 "version: %d.%d at addr: %p\n", 382 (int)cvmx_bootmem_desc->major_version, 383 (int)cvmx_bootmem_desc->minor_version, 384 cvmx_bootmem_desc); 385 return 0; 386 } 387 388 /* 0 is not a valid size for this allocator */ 389 if (!size) 390 return 0; 391 392 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) 393 cvmx_bootmem_lock(); 394 cur_addr = cvmx_bootmem_desc->head_addr; 395 if (cur_addr == 0 || phy_addr < cur_addr) { 396 /* add at front of list - special case with changing head ptr */ 397 if (cur_addr && phy_addr + size > cur_addr) 398 goto bootmem_free_done; /* error, overlapping section */ 399 else if (phy_addr + size == cur_addr) { 400 /* Add to front of existing first block */ 401 cvmx_bootmem_phy_set_next(phy_addr, 402 cvmx_bootmem_phy_get_next 403 (cur_addr)); 404 cvmx_bootmem_phy_set_size(phy_addr, 405 cvmx_bootmem_phy_get_size 406 (cur_addr) + size); 407 cvmx_bootmem_desc->head_addr = phy_addr; 408 409 } else { 410 /* New block before first block. OK if cur_addr is 0 */ 411 cvmx_bootmem_phy_set_next(phy_addr, cur_addr); 412 cvmx_bootmem_phy_set_size(phy_addr, size); 413 cvmx_bootmem_desc->head_addr = phy_addr; 414 } 415 retval = 1; 416 goto bootmem_free_done; 417 } 418 419 /* Find place in list to add block */ 420 while (cur_addr && phy_addr > cur_addr) { 421 prev_addr = cur_addr; 422 cur_addr = cvmx_bootmem_phy_get_next(cur_addr); 423 } 424 425 if (!cur_addr) { 426 /* 427 * We have reached the end of the list, add on to end, 428 * checking to see if we need to combine with last 429 * block 430 */ 431 if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) == 432 phy_addr) { 433 cvmx_bootmem_phy_set_size(prev_addr, 434 cvmx_bootmem_phy_get_size 435 (prev_addr) + size); 436 } else { 437 cvmx_bootmem_phy_set_next(prev_addr, phy_addr); 438 cvmx_bootmem_phy_set_size(phy_addr, size); 439 cvmx_bootmem_phy_set_next(phy_addr, 0); 440 } 441 retval = 1; 442 goto bootmem_free_done; 443 } else { 444 /* 445 * insert between prev and cur nodes, checking for 446 * merge with either/both. 447 */ 448 if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) == 449 phy_addr) { 450 /* Merge with previous */ 451 cvmx_bootmem_phy_set_size(prev_addr, 452 cvmx_bootmem_phy_get_size 453 (prev_addr) + size); 454 if (phy_addr + size == cur_addr) { 455 /* Also merge with current */ 456 cvmx_bootmem_phy_set_size(prev_addr, 457 cvmx_bootmem_phy_get_size(cur_addr) + 458 cvmx_bootmem_phy_get_size(prev_addr)); 459 cvmx_bootmem_phy_set_next(prev_addr, 460 cvmx_bootmem_phy_get_next(cur_addr)); 461 } 462 retval = 1; 463 goto bootmem_free_done; 464 } else if (phy_addr + size == cur_addr) { 465 /* Merge with current */ 466 cvmx_bootmem_phy_set_size(phy_addr, 467 cvmx_bootmem_phy_get_size 468 (cur_addr) + size); 469 cvmx_bootmem_phy_set_next(phy_addr, 470 cvmx_bootmem_phy_get_next 471 (cur_addr)); 472 cvmx_bootmem_phy_set_next(prev_addr, phy_addr); 473 retval = 1; 474 goto bootmem_free_done; 475 } 476 477 /* It is a standalone block, add in between prev and cur */ 478 cvmx_bootmem_phy_set_size(phy_addr, size); 479 cvmx_bootmem_phy_set_next(phy_addr, cur_addr); 480 cvmx_bootmem_phy_set_next(prev_addr, phy_addr); 481 482 } 483 retval = 1; 484 485 bootmem_free_done: 486 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) 487 cvmx_bootmem_unlock(); 488 return retval; 489 490 } 491 492 struct cvmx_bootmem_named_block_desc * 493 cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags) 494 { 495 unsigned int i; 496 struct cvmx_bootmem_named_block_desc *named_block_array_ptr; 497 498 #ifdef DEBUG 499 cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name); 500 #endif 501 /* 502 * Lock the structure to make sure that it is not being 503 * changed while we are examining it. 504 */ 505 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) 506 cvmx_bootmem_lock(); 507 508 /* Use XKPHYS for 64 bit linux */ 509 named_block_array_ptr = (struct cvmx_bootmem_named_block_desc *) 510 cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr); 511 512 #ifdef DEBUG 513 cvmx_dprintf 514 ("cvmx_bootmem_phy_named_block_find: named_block_array_ptr: %p\n", 515 named_block_array_ptr); 516 #endif 517 if (cvmx_bootmem_desc->major_version == 3) { 518 for (i = 0; 519 i < cvmx_bootmem_desc->named_block_num_blocks; i++) { 520 if ((name && named_block_array_ptr[i].size 521 && !strncmp(name, named_block_array_ptr[i].name, 522 cvmx_bootmem_desc->named_block_name_len 523 - 1)) 524 || (!name && !named_block_array_ptr[i].size)) { 525 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) 526 cvmx_bootmem_unlock(); 527 528 return &(named_block_array_ptr[i]); 529 } 530 } 531 } else { 532 cvmx_dprintf("ERROR: Incompatible bootmem descriptor " 533 "version: %d.%d at addr: %p\n", 534 (int)cvmx_bootmem_desc->major_version, 535 (int)cvmx_bootmem_desc->minor_version, 536 cvmx_bootmem_desc); 537 } 538 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) 539 cvmx_bootmem_unlock(); 540 541 return NULL; 542 } 543 544 int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags) 545 { 546 struct cvmx_bootmem_named_block_desc *named_block_ptr; 547 548 if (cvmx_bootmem_desc->major_version != 3) { 549 cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: " 550 "%d.%d at addr: %p\n", 551 (int)cvmx_bootmem_desc->major_version, 552 (int)cvmx_bootmem_desc->minor_version, 553 cvmx_bootmem_desc); 554 return 0; 555 } 556 #ifdef DEBUG 557 cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s\n", name); 558 #endif 559 560 /* 561 * Take lock here, as name lookup/block free/name free need to 562 * be atomic. 563 */ 564 cvmx_bootmem_lock(); 565 566 named_block_ptr = 567 cvmx_bootmem_phy_named_block_find(name, 568 CVMX_BOOTMEM_FLAG_NO_LOCKING); 569 if (named_block_ptr) { 570 #ifdef DEBUG 571 cvmx_dprintf("cvmx_bootmem_phy_named_block_free: " 572 "%s, base: 0x%llx, size: 0x%llx\n", 573 name, 574 (unsigned long long)named_block_ptr->base_addr, 575 (unsigned long long)named_block_ptr->size); 576 #endif 577 __cvmx_bootmem_phy_free(named_block_ptr->base_addr, 578 named_block_ptr->size, 579 CVMX_BOOTMEM_FLAG_NO_LOCKING); 580 named_block_ptr->size = 0; 581 /* Set size to zero to indicate block not used. */ 582 } 583 584 cvmx_bootmem_unlock(); 585 return named_block_ptr != NULL; /* 0 on failure, 1 on success */ 586 } 587