1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (c) 2015, Sony Mobile Communications AB. 4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 5 * Copyright (c) 2018, Ramon Fried <ramon.fried@gmail.com> 6 */ 7 8 #include <common.h> 9 #include <errno.h> 10 #include <dm.h> 11 #include <dm/of_access.h> 12 #include <dm/of_addr.h> 13 #include <asm/io.h> 14 #include <linux/ioport.h> 15 #include <linux/io.h> 16 #include <smem.h> 17 18 DECLARE_GLOBAL_DATA_PTR; 19 20 /* 21 * The Qualcomm shared memory system is an allocate-only heap structure that 22 * consists of one of more memory areas that can be accessed by the processors 23 * in the SoC. 24 * 25 * All systems contains a global heap, accessible by all processors in the SoC, 26 * with a table of contents data structure (@smem_header) at the beginning of 27 * the main shared memory block. 28 * 29 * The global header contains meta data for allocations as well as a fixed list 30 * of 512 entries (@smem_global_entry) that can be initialized to reference 31 * parts of the shared memory space. 32 * 33 * 34 * In addition to this global heap, a set of "private" heaps can be set up at 35 * boot time with access restrictions so that only certain processor pairs can 36 * access the data. 37 * 38 * These partitions are referenced from an optional partition table 39 * (@smem_ptable), that is found 4kB from the end of the main smem region. The 40 * partition table entries (@smem_ptable_entry) lists the involved processors 41 * (or hosts) and their location in the main shared memory region. 42 * 43 * Each partition starts with a header (@smem_partition_header) that identifies 44 * the partition and holds properties for the two internal memory regions. The 45 * two regions are cached and non-cached memory respectively. Each region 46 * contain a link list of allocation headers (@smem_private_entry) followed by 47 * their data. 48 * 49 * Items in the non-cached region are allocated from the start of the partition 50 * while items in the cached region are allocated from the end. The free area 51 * is hence the region between the cached and non-cached offsets. The header of 52 * cached items comes after the data. 53 * 54 * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure 55 * for the global heap. A new global partition is created from the global heap 56 * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is 57 * set by the bootloader. 58 * 59 */ 60 61 /* 62 * The version member of the smem header contains an array of versions for the 63 * various software components in the SoC. We verify that the boot loader 64 * version is a valid version as a sanity check. 65 */ 66 #define SMEM_MASTER_SBL_VERSION_INDEX 7 67 #define SMEM_GLOBAL_HEAP_VERSION 11 68 #define SMEM_GLOBAL_PART_VERSION 12 69 70 /* 71 * The first 8 items are only to be allocated by the boot loader while 72 * initializing the heap. 73 */ 74 #define SMEM_ITEM_LAST_FIXED 8 75 76 /* Highest accepted item number, for both global and private heaps */ 77 #define SMEM_ITEM_COUNT 512 78 79 /* Processor/host identifier for the application processor */ 80 #define SMEM_HOST_APPS 0 81 82 /* Processor/host identifier for the global partition */ 83 #define SMEM_GLOBAL_HOST 0xfffe 84 85 /* Max number of processors/hosts in a system */ 86 #define SMEM_HOST_COUNT 10 87 88 /** 89 * struct smem_proc_comm - proc_comm communication struct (legacy) 90 * @command: current command to be executed 91 * @status: status of the currently requested command 92 * @params: parameters to the command 93 */ 94 struct smem_proc_comm { 95 __le32 command; 96 __le32 status; 97 __le32 params[2]; 98 }; 99 100 /** 101 * struct smem_global_entry - entry to reference smem items on the heap 102 * @allocated: boolean to indicate if this entry is used 103 * @offset: offset to the allocated space 104 * @size: size of the allocated space, 8 byte aligned 105 * @aux_base: base address for the memory region used by this unit, or 0 for 106 * the default region. bits 0,1 are reserved 107 */ 108 struct smem_global_entry { 109 __le32 allocated; 110 __le32 offset; 111 __le32 size; 112 __le32 aux_base; /* bits 1:0 reserved */ 113 }; 114 #define AUX_BASE_MASK 0xfffffffc 115 116 /** 117 * struct smem_header - header found in beginning of primary smem region 118 * @proc_comm: proc_comm communication interface (legacy) 119 * @version: array of versions for the various subsystems 120 * @initialized: boolean to indicate that smem is initialized 121 * @free_offset: index of the first unallocated byte in smem 122 * @available: number of bytes available for allocation 123 * @reserved: reserved field, must be 0 124 * toc: array of references to items 125 */ 126 struct smem_header { 127 struct smem_proc_comm proc_comm[4]; 128 __le32 version[32]; 129 __le32 initialized; 130 __le32 free_offset; 131 __le32 available; 132 __le32 reserved; 133 struct smem_global_entry toc[SMEM_ITEM_COUNT]; 134 }; 135 136 /** 137 * struct smem_ptable_entry - one entry in the @smem_ptable list 138 * @offset: offset, within the main shared memory region, of the partition 139 * @size: size of the partition 140 * @flags: flags for the partition (currently unused) 141 * @host0: first processor/host with access to this partition 142 * @host1: second processor/host with access to this partition 143 * @cacheline: alignment for "cached" entries 144 * @reserved: reserved entries for later use 145 */ 146 struct smem_ptable_entry { 147 __le32 offset; 148 __le32 size; 149 __le32 flags; 150 __le16 host0; 151 __le16 host1; 152 __le32 cacheline; 153 __le32 reserved[7]; 154 }; 155 156 /** 157 * struct smem_ptable - partition table for the private partitions 158 * @magic: magic number, must be SMEM_PTABLE_MAGIC 159 * @version: version of the partition table 160 * @num_entries: number of partitions in the table 161 * @reserved: for now reserved entries 162 * @entry: list of @smem_ptable_entry for the @num_entries partitions 163 */ 164 struct smem_ptable { 165 u8 magic[4]; 166 __le32 version; 167 __le32 num_entries; 168 __le32 reserved[5]; 169 struct smem_ptable_entry entry[]; 170 }; 171 172 static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */ 173 174 /** 175 * struct smem_partition_header - header of the partitions 176 * @magic: magic number, must be SMEM_PART_MAGIC 177 * @host0: first processor/host with access to this partition 178 * @host1: second processor/host with access to this partition 179 * @size: size of the partition 180 * @offset_free_uncached: offset to the first free byte of uncached memory in 181 * this partition 182 * @offset_free_cached: offset to the first free byte of cached memory in this 183 * partition 184 * @reserved: for now reserved entries 185 */ 186 struct smem_partition_header { 187 u8 magic[4]; 188 __le16 host0; 189 __le16 host1; 190 __le32 size; 191 __le32 offset_free_uncached; 192 __le32 offset_free_cached; 193 __le32 reserved[3]; 194 }; 195 196 static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 }; 197 198 /** 199 * struct smem_private_entry - header of each item in the private partition 200 * @canary: magic number, must be SMEM_PRIVATE_CANARY 201 * @item: identifying number of the smem item 202 * @size: size of the data, including padding bytes 203 * @padding_data: number of bytes of padding of data 204 * @padding_hdr: number of bytes of padding between the header and the data 205 * @reserved: for now reserved entry 206 */ 207 struct smem_private_entry { 208 u16 canary; /* bytes are the same so no swapping needed */ 209 __le16 item; 210 __le32 size; /* includes padding bytes */ 211 __le16 padding_data; 212 __le16 padding_hdr; 213 __le32 reserved; 214 }; 215 #define SMEM_PRIVATE_CANARY 0xa5a5 216 217 /** 218 * struct smem_info - smem region info located after the table of contents 219 * @magic: magic number, must be SMEM_INFO_MAGIC 220 * @size: size of the smem region 221 * @base_addr: base address of the smem region 222 * @reserved: for now reserved entry 223 * @num_items: highest accepted item number 224 */ 225 struct smem_info { 226 u8 magic[4]; 227 __le32 size; 228 __le32 base_addr; 229 __le32 reserved; 230 __le16 num_items; 231 }; 232 233 static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */ 234 235 /** 236 * struct smem_region - representation of a chunk of memory used for smem 237 * @aux_base: identifier of aux_mem base 238 * @virt_base: virtual base address of memory with this aux_mem identifier 239 * @size: size of the memory region 240 */ 241 struct smem_region { 242 u32 aux_base; 243 void __iomem *virt_base; 244 size_t size; 245 }; 246 247 /** 248 * struct qcom_smem - device data for the smem device 249 * @dev: device pointer 250 * @global_partition: pointer to global partition when in use 251 * @global_cacheline: cacheline size for global partition 252 * @partitions: list of pointers to partitions affecting the current 253 * processor/host 254 * @cacheline: list of cacheline sizes for each host 255 * @item_count: max accepted item number 256 * @num_regions: number of @regions 257 * @regions: list of the memory regions defining the shared memory 258 */ 259 struct qcom_smem { 260 struct udevice *dev; 261 262 struct smem_partition_header *global_partition; 263 size_t global_cacheline; 264 struct smem_partition_header *partitions[SMEM_HOST_COUNT]; 265 size_t cacheline[SMEM_HOST_COUNT]; 266 u32 item_count; 267 268 unsigned int num_regions; 269 struct smem_region regions[0]; 270 }; 271 272 static struct smem_private_entry * 273 phdr_to_last_uncached_entry(struct smem_partition_header *phdr) 274 { 275 void *p = phdr; 276 277 return p + le32_to_cpu(phdr->offset_free_uncached); 278 } 279 280 static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr, 281 size_t cacheline) 282 { 283 void *p = phdr; 284 285 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*phdr), cacheline); 286 } 287 288 static void *phdr_to_last_cached_entry(struct smem_partition_header *phdr) 289 { 290 void *p = phdr; 291 292 return p + le32_to_cpu(phdr->offset_free_cached); 293 } 294 295 static struct smem_private_entry * 296 phdr_to_first_uncached_entry(struct smem_partition_header *phdr) 297 { 298 void *p = phdr; 299 300 return p + sizeof(*phdr); 301 } 302 303 static struct smem_private_entry * 304 uncached_entry_next(struct smem_private_entry *e) 305 { 306 void *p = e; 307 308 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) + 309 le32_to_cpu(e->size); 310 } 311 312 static struct smem_private_entry * 313 cached_entry_next(struct smem_private_entry *e, size_t cacheline) 314 { 315 void *p = e; 316 317 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline); 318 } 319 320 static void *uncached_entry_to_item(struct smem_private_entry *e) 321 { 322 void *p = e; 323 324 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); 325 } 326 327 static void *cached_entry_to_item(struct smem_private_entry *e) 328 { 329 void *p = e; 330 331 return p - le32_to_cpu(e->size); 332 } 333 334 /* Pointer to the one and only smem handle */ 335 static struct qcom_smem *__smem; 336 337 static int qcom_smem_alloc_private(struct qcom_smem *smem, 338 struct smem_partition_header *phdr, 339 unsigned int item, 340 size_t size) 341 { 342 struct smem_private_entry *hdr, *end; 343 size_t alloc_size; 344 void *cached; 345 346 hdr = phdr_to_first_uncached_entry(phdr); 347 end = phdr_to_last_uncached_entry(phdr); 348 cached = phdr_to_last_cached_entry(phdr); 349 350 while (hdr < end) { 351 if (hdr->canary != SMEM_PRIVATE_CANARY) { 352 dev_err(smem->dev, 353 "Found invalid canary in hosts %d:%d partition\n", 354 phdr->host0, phdr->host1); 355 return -EINVAL; 356 } 357 358 if (le16_to_cpu(hdr->item) == item) 359 return -EEXIST; 360 361 hdr = uncached_entry_next(hdr); 362 } 363 364 /* Check that we don't grow into the cached region */ 365 alloc_size = sizeof(*hdr) + ALIGN(size, 8); 366 if ((void *)hdr + alloc_size >= cached) { 367 dev_err(smem->dev, "Out of memory\n"); 368 return -ENOSPC; 369 } 370 371 hdr->canary = SMEM_PRIVATE_CANARY; 372 hdr->item = cpu_to_le16(item); 373 hdr->size = cpu_to_le32(ALIGN(size, 8)); 374 hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size); 375 hdr->padding_hdr = 0; 376 377 /* 378 * Ensure the header is written before we advance the free offset, so 379 * that remote processors that does not take the remote spinlock still 380 * gets a consistent view of the linked list. 381 */ 382 dmb(); 383 le32_add_cpu(&phdr->offset_free_uncached, alloc_size); 384 385 return 0; 386 } 387 388 static int qcom_smem_alloc_global(struct qcom_smem *smem, 389 unsigned int item, 390 size_t size) 391 { 392 struct smem_global_entry *entry; 393 struct smem_header *header; 394 395 header = smem->regions[0].virt_base; 396 entry = &header->toc[item]; 397 if (entry->allocated) 398 return -EEXIST; 399 400 size = ALIGN(size, 8); 401 if (WARN_ON(size > le32_to_cpu(header->available))) 402 return -ENOMEM; 403 404 entry->offset = header->free_offset; 405 entry->size = cpu_to_le32(size); 406 407 /* 408 * Ensure the header is consistent before we mark the item allocated, 409 * so that remote processors will get a consistent view of the item 410 * even though they do not take the spinlock on read. 411 */ 412 dmb(); 413 entry->allocated = cpu_to_le32(1); 414 415 le32_add_cpu(&header->free_offset, size); 416 le32_add_cpu(&header->available, -size); 417 418 return 0; 419 } 420 421 /** 422 * qcom_smem_alloc() - allocate space for a smem item 423 * @host: remote processor id, or -1 424 * @item: smem item handle 425 * @size: number of bytes to be allocated 426 * 427 * Allocate space for a given smem item of size @size, given that the item is 428 * not yet allocated. 429 */ 430 static int qcom_smem_alloc(unsigned int host, unsigned int item, size_t size) 431 { 432 struct smem_partition_header *phdr; 433 int ret; 434 435 if (!__smem) 436 return -EPROBE_DEFER; 437 438 if (item < SMEM_ITEM_LAST_FIXED) { 439 dev_err(__smem->dev, 440 "Rejecting allocation of static entry %d\n", item); 441 return -EINVAL; 442 } 443 444 if (WARN_ON(item >= __smem->item_count)) 445 return -EINVAL; 446 447 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { 448 phdr = __smem->partitions[host]; 449 ret = qcom_smem_alloc_private(__smem, phdr, item, size); 450 } else if (__smem->global_partition) { 451 phdr = __smem->global_partition; 452 ret = qcom_smem_alloc_private(__smem, phdr, item, size); 453 } else { 454 ret = qcom_smem_alloc_global(__smem, item, size); 455 } 456 457 return ret; 458 } 459 460 static void *qcom_smem_get_global(struct qcom_smem *smem, 461 unsigned int item, 462 size_t *size) 463 { 464 struct smem_header *header; 465 struct smem_region *area; 466 struct smem_global_entry *entry; 467 u32 aux_base; 468 unsigned int i; 469 470 header = smem->regions[0].virt_base; 471 entry = &header->toc[item]; 472 if (!entry->allocated) 473 return ERR_PTR(-ENXIO); 474 475 aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK; 476 477 for (i = 0; i < smem->num_regions; i++) { 478 area = &smem->regions[i]; 479 480 if (area->aux_base == aux_base || !aux_base) { 481 if (size != NULL) 482 *size = le32_to_cpu(entry->size); 483 return area->virt_base + le32_to_cpu(entry->offset); 484 } 485 } 486 487 return ERR_PTR(-ENOENT); 488 } 489 490 static void *qcom_smem_get_private(struct qcom_smem *smem, 491 struct smem_partition_header *phdr, 492 size_t cacheline, 493 unsigned int item, 494 size_t *size) 495 { 496 struct smem_private_entry *e, *end; 497 498 e = phdr_to_first_uncached_entry(phdr); 499 end = phdr_to_last_uncached_entry(phdr); 500 501 while (e < end) { 502 if (e->canary != SMEM_PRIVATE_CANARY) 503 goto invalid_canary; 504 505 if (le16_to_cpu(e->item) == item) { 506 if (size != NULL) 507 *size = le32_to_cpu(e->size) - 508 le16_to_cpu(e->padding_data); 509 510 return uncached_entry_to_item(e); 511 } 512 513 e = uncached_entry_next(e); 514 } 515 516 /* Item was not found in the uncached list, search the cached list */ 517 518 e = phdr_to_first_cached_entry(phdr, cacheline); 519 end = phdr_to_last_cached_entry(phdr); 520 521 while (e > end) { 522 if (e->canary != SMEM_PRIVATE_CANARY) 523 goto invalid_canary; 524 525 if (le16_to_cpu(e->item) == item) { 526 if (size != NULL) 527 *size = le32_to_cpu(e->size) - 528 le16_to_cpu(e->padding_data); 529 530 return cached_entry_to_item(e); 531 } 532 533 e = cached_entry_next(e, cacheline); 534 } 535 536 return ERR_PTR(-ENOENT); 537 538 invalid_canary: 539 dev_err(smem->dev, "Found invalid canary in hosts %d:%d partition\n", 540 phdr->host0, phdr->host1); 541 542 return ERR_PTR(-EINVAL); 543 } 544 545 /** 546 * qcom_smem_get() - resolve ptr of size of a smem item 547 * @host: the remote processor, or -1 548 * @item: smem item handle 549 * @size: pointer to be filled out with size of the item 550 * 551 * Looks up smem item and returns pointer to it. Size of smem 552 * item is returned in @size. 553 */ 554 static void *qcom_smem_get(unsigned int host, unsigned int item, size_t *size) 555 { 556 struct smem_partition_header *phdr; 557 size_t cacheln; 558 void *ptr = ERR_PTR(-EPROBE_DEFER); 559 560 if (!__smem) 561 return ptr; 562 563 if (WARN_ON(item >= __smem->item_count)) 564 return ERR_PTR(-EINVAL); 565 566 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { 567 phdr = __smem->partitions[host]; 568 cacheln = __smem->cacheline[host]; 569 ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size); 570 } else if (__smem->global_partition) { 571 phdr = __smem->global_partition; 572 cacheln = __smem->global_cacheline; 573 ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size); 574 } else { 575 ptr = qcom_smem_get_global(__smem, item, size); 576 } 577 578 return ptr; 579 580 } 581 582 /** 583 * qcom_smem_get_free_space() - retrieve amount of free space in a partition 584 * @host: the remote processor identifying a partition, or -1 585 * 586 * To be used by smem clients as a quick way to determine if any new 587 * allocations has been made. 588 */ 589 static int qcom_smem_get_free_space(unsigned int host) 590 { 591 struct smem_partition_header *phdr; 592 struct smem_header *header; 593 unsigned int ret; 594 595 if (!__smem) 596 return -EPROBE_DEFER; 597 598 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { 599 phdr = __smem->partitions[host]; 600 ret = le32_to_cpu(phdr->offset_free_cached) - 601 le32_to_cpu(phdr->offset_free_uncached); 602 } else if (__smem->global_partition) { 603 phdr = __smem->global_partition; 604 ret = le32_to_cpu(phdr->offset_free_cached) - 605 le32_to_cpu(phdr->offset_free_uncached); 606 } else { 607 header = __smem->regions[0].virt_base; 608 ret = le32_to_cpu(header->available); 609 } 610 611 return ret; 612 } 613 614 static int qcom_smem_get_sbl_version(struct qcom_smem *smem) 615 { 616 struct smem_header *header; 617 __le32 *versions; 618 619 header = smem->regions[0].virt_base; 620 versions = header->version; 621 622 return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]); 623 } 624 625 static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem) 626 { 627 struct smem_ptable *ptable; 628 u32 version; 629 630 ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; 631 if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) 632 return ERR_PTR(-ENOENT); 633 634 version = le32_to_cpu(ptable->version); 635 if (version != 1) { 636 dev_err(smem->dev, 637 "Unsupported partition header version %d\n", version); 638 return ERR_PTR(-EINVAL); 639 } 640 return ptable; 641 } 642 643 static u32 qcom_smem_get_item_count(struct qcom_smem *smem) 644 { 645 struct smem_ptable *ptable; 646 struct smem_info *info; 647 648 ptable = qcom_smem_get_ptable(smem); 649 if (IS_ERR_OR_NULL(ptable)) 650 return SMEM_ITEM_COUNT; 651 652 info = (struct smem_info *)&ptable->entry[ptable->num_entries]; 653 if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic))) 654 return SMEM_ITEM_COUNT; 655 656 return le16_to_cpu(info->num_items); 657 } 658 659 static int qcom_smem_set_global_partition(struct qcom_smem *smem) 660 { 661 struct smem_partition_header *header; 662 struct smem_ptable_entry *entry = NULL; 663 struct smem_ptable *ptable; 664 u32 host0, host1, size; 665 int i; 666 667 ptable = qcom_smem_get_ptable(smem); 668 if (IS_ERR(ptable)) 669 return PTR_ERR(ptable); 670 671 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { 672 entry = &ptable->entry[i]; 673 host0 = le16_to_cpu(entry->host0); 674 host1 = le16_to_cpu(entry->host1); 675 676 if (host0 == SMEM_GLOBAL_HOST && host0 == host1) 677 break; 678 } 679 680 if (!entry) { 681 dev_err(smem->dev, "Missing entry for global partition\n"); 682 return -EINVAL; 683 } 684 685 if (!le32_to_cpu(entry->offset) || !le32_to_cpu(entry->size)) { 686 dev_err(smem->dev, "Invalid entry for global partition\n"); 687 return -EINVAL; 688 } 689 690 if (smem->global_partition) { 691 dev_err(smem->dev, "Already found the global partition\n"); 692 return -EINVAL; 693 } 694 695 header = smem->regions[0].virt_base + le32_to_cpu(entry->offset); 696 host0 = le16_to_cpu(header->host0); 697 host1 = le16_to_cpu(header->host1); 698 699 if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) { 700 dev_err(smem->dev, "Global partition has invalid magic\n"); 701 return -EINVAL; 702 } 703 704 if (host0 != SMEM_GLOBAL_HOST && host1 != SMEM_GLOBAL_HOST) { 705 dev_err(smem->dev, "Global partition hosts are invalid\n"); 706 return -EINVAL; 707 } 708 709 if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) { 710 dev_err(smem->dev, "Global partition has invalid size\n"); 711 return -EINVAL; 712 } 713 714 size = le32_to_cpu(header->offset_free_uncached); 715 if (size > le32_to_cpu(header->size)) { 716 dev_err(smem->dev, 717 "Global partition has invalid free pointer\n"); 718 return -EINVAL; 719 } 720 721 smem->global_partition = header; 722 smem->global_cacheline = le32_to_cpu(entry->cacheline); 723 724 return 0; 725 } 726 727 static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, 728 unsigned int local_host) 729 { 730 struct smem_partition_header *header; 731 struct smem_ptable_entry *entry; 732 struct smem_ptable *ptable; 733 unsigned int remote_host; 734 u32 host0, host1; 735 int i; 736 737 ptable = qcom_smem_get_ptable(smem); 738 if (IS_ERR(ptable)) 739 return PTR_ERR(ptable); 740 741 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { 742 entry = &ptable->entry[i]; 743 host0 = le16_to_cpu(entry->host0); 744 host1 = le16_to_cpu(entry->host1); 745 746 if (host0 != local_host && host1 != local_host) 747 continue; 748 749 if (!le32_to_cpu(entry->offset)) 750 continue; 751 752 if (!le32_to_cpu(entry->size)) 753 continue; 754 755 if (host0 == local_host) 756 remote_host = host1; 757 else 758 remote_host = host0; 759 760 if (remote_host >= SMEM_HOST_COUNT) { 761 dev_err(smem->dev, 762 "Invalid remote host %d\n", 763 remote_host); 764 return -EINVAL; 765 } 766 767 if (smem->partitions[remote_host]) { 768 dev_err(smem->dev, 769 "Already found a partition for host %d\n", 770 remote_host); 771 return -EINVAL; 772 } 773 774 header = smem->regions[0].virt_base + le32_to_cpu(entry->offset); 775 host0 = le16_to_cpu(header->host0); 776 host1 = le16_to_cpu(header->host1); 777 778 if (memcmp(header->magic, SMEM_PART_MAGIC, 779 sizeof(header->magic))) { 780 dev_err(smem->dev, 781 "Partition %d has invalid magic\n", i); 782 return -EINVAL; 783 } 784 785 if (host0 != local_host && host1 != local_host) { 786 dev_err(smem->dev, 787 "Partition %d hosts are invalid\n", i); 788 return -EINVAL; 789 } 790 791 if (host0 != remote_host && host1 != remote_host) { 792 dev_err(smem->dev, 793 "Partition %d hosts are invalid\n", i); 794 return -EINVAL; 795 } 796 797 if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) { 798 dev_err(smem->dev, 799 "Partition %d has invalid size\n", i); 800 return -EINVAL; 801 } 802 803 if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) { 804 dev_err(smem->dev, 805 "Partition %d has invalid free pointer\n", i); 806 return -EINVAL; 807 } 808 809 smem->partitions[remote_host] = header; 810 smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline); 811 } 812 813 return 0; 814 } 815 816 static int qcom_smem_map_memory(struct qcom_smem *smem, struct udevice *dev, 817 const char *name, int i) 818 { 819 struct fdt_resource r; 820 int ret; 821 int node = dev_of_offset(dev); 822 823 ret = fdtdec_lookup_phandle(gd->fdt_blob, node, name); 824 if (ret < 0) { 825 dev_err(dev, "No %s specified\n", name); 826 return -EINVAL; 827 } 828 829 ret = fdt_get_resource(gd->fdt_blob, ret, "reg", 0, &r); 830 if (ret) 831 return ret; 832 833 smem->regions[i].aux_base = (u32)r.start; 834 smem->regions[i].size = fdt_resource_size(&r); 835 smem->regions[i].virt_base = devm_ioremap(dev, r.start, fdt_resource_size(&r)); 836 if (!smem->regions[i].virt_base) 837 return -ENOMEM; 838 839 return 0; 840 } 841 842 static int qcom_smem_probe(struct udevice *dev) 843 { 844 struct smem_header *header; 845 struct qcom_smem *smem; 846 size_t array_size; 847 int num_regions; 848 u32 version; 849 int ret; 850 int node = dev_of_offset(dev); 851 852 num_regions = 1; 853 if (fdtdec_lookup_phandle(gd->fdt_blob, node, "qcomrpm-msg-ram") >= 0) 854 num_regions++; 855 856 array_size = num_regions * sizeof(struct smem_region); 857 smem = devm_kzalloc(dev, sizeof(*smem) + array_size, GFP_KERNEL); 858 if (!smem) 859 return -ENOMEM; 860 861 smem->dev = dev; 862 smem->num_regions = num_regions; 863 864 ret = qcom_smem_map_memory(smem, dev, "memory-region", 0); 865 if (ret) 866 return ret; 867 868 if (num_regions > 1) { 869 ret = qcom_smem_map_memory(smem, dev, 870 "qcom,rpm-msg-ram", 1); 871 if (ret) 872 return ret; 873 } 874 875 header = smem->regions[0].virt_base; 876 if (le32_to_cpu(header->initialized) != 1 || 877 le32_to_cpu(header->reserved)) { 878 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); 879 return -EINVAL; 880 } 881 882 version = qcom_smem_get_sbl_version(smem); 883 switch (version >> 16) { 884 case SMEM_GLOBAL_PART_VERSION: 885 ret = qcom_smem_set_global_partition(smem); 886 if (ret < 0) 887 return ret; 888 smem->item_count = qcom_smem_get_item_count(smem); 889 break; 890 case SMEM_GLOBAL_HEAP_VERSION: 891 smem->item_count = SMEM_ITEM_COUNT; 892 break; 893 default: 894 dev_err(dev, "Unsupported SMEM version 0x%x\n", version); 895 return -EINVAL; 896 } 897 898 ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS); 899 if (ret < 0 && ret != -ENOENT) 900 return ret; 901 902 __smem = smem; 903 904 return 0; 905 } 906 907 static int qcom_smem_remove(struct udevice *dev) 908 { 909 __smem = NULL; 910 911 return 0; 912 } 913 914 const struct udevice_id qcom_smem_of_match[] = { 915 { .compatible = "qcom,smem" }, 916 { } 917 }; 918 919 static const struct smem_ops msm_smem_ops = { 920 .alloc = qcom_smem_alloc, 921 .get = qcom_smem_get, 922 .get_free_space = qcom_smem_get_free_space, 923 }; 924 925 U_BOOT_DRIVER(qcom_smem) = { 926 .name = "qcom_smem", 927 .id = UCLASS_SMEM, 928 .of_match = qcom_smem_of_match, 929 .ops = &msm_smem_ops, 930 .probe = qcom_smem_probe, 931 .remove = qcom_smem_remove, 932 }; 933