1 /* 2 * Copyright (c) 2015, Sony Mobile Communications AB. 3 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 and 7 * only version 2 as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include <linux/hwspinlock.h> 16 #include <linux/io.h> 17 #include <linux/module.h> 18 #include <linux/of.h> 19 #include <linux/of_address.h> 20 #include <linux/platform_device.h> 21 #include <linux/sizes.h> 22 #include <linux/slab.h> 23 #include <linux/soc/qcom/smem.h> 24 25 /* 26 * The Qualcomm shared memory system is a allocate only heap structure that 27 * consists of one of more memory areas that can be accessed by the processors 28 * in the SoC. 29 * 30 * All systems contains a global heap, accessible by all processors in the SoC, 31 * with a table of contents data structure (@smem_header) at the beginning of 32 * the main shared memory block. 33 * 34 * The global header contains meta data for allocations as well as a fixed list 35 * of 512 entries (@smem_global_entry) that can be initialized to reference 36 * parts of the shared memory space. 37 * 38 * 39 * In addition to this global heap a set of "private" heaps can be set up at 40 * boot time with access restrictions so that only certain processor pairs can 41 * access the data. 42 * 43 * These partitions are referenced from an optional partition table 44 * (@smem_ptable), that is found 4kB from the end of the main smem region. The 45 * partition table entries (@smem_ptable_entry) lists the involved processors 46 * (or hosts) and their location in the main shared memory region. 47 * 48 * Each partition starts with a header (@smem_partition_header) that identifies 49 * the partition and holds properties for the two internal memory regions. The 50 * two regions are cached and non-cached memory respectively. Each region 51 * contain a link list of allocation headers (@smem_private_entry) followed by 52 * their data. 53 * 54 * Items in the non-cached region are allocated from the start of the partition 55 * while items in the cached region are allocated from the end. The free area 56 * is hence the region between the cached and non-cached offsets. The header of 57 * cached items comes after the data. 58 * 59 * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure 60 * for the global heap. A new global partition is created from the global heap 61 * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is 62 * set by the bootloader. 63 * 64 * To synchronize allocations in the shared memory heaps a remote spinlock must 65 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all 66 * platforms. 67 * 68 */ 69 70 /* 71 * The version member of the smem header contains an array of versions for the 72 * various software components in the SoC. We verify that the boot loader 73 * version is a valid version as a sanity check. 74 */ 75 #define SMEM_MASTER_SBL_VERSION_INDEX 7 76 #define SMEM_GLOBAL_HEAP_VERSION 11 77 #define SMEM_GLOBAL_PART_VERSION 12 78 79 /* 80 * The first 8 items are only to be allocated by the boot loader while 81 * initializing the heap. 82 */ 83 #define SMEM_ITEM_LAST_FIXED 8 84 85 /* Highest accepted item number, for both global and private heaps */ 86 #define SMEM_ITEM_COUNT 512 87 88 /* Processor/host identifier for the application processor */ 89 #define SMEM_HOST_APPS 0 90 91 /* Processor/host identifier for the global partition */ 92 #define SMEM_GLOBAL_HOST 0xfffe 93 94 /* Max number of processors/hosts in a system */ 95 #define SMEM_HOST_COUNT 10 96 97 /** 98 * struct smem_proc_comm - proc_comm communication struct (legacy) 99 * @command: current command to be executed 100 * @status: status of the currently requested command 101 * @params: parameters to the command 102 */ 103 struct smem_proc_comm { 104 __le32 command; 105 __le32 status; 106 __le32 params[2]; 107 }; 108 109 /** 110 * struct smem_global_entry - entry to reference smem items on the heap 111 * @allocated: boolean to indicate if this entry is used 112 * @offset: offset to the allocated space 113 * @size: size of the allocated space, 8 byte aligned 114 * @aux_base: base address for the memory region used by this unit, or 0 for 115 * the default region. bits 0,1 are reserved 116 */ 117 struct smem_global_entry { 118 __le32 allocated; 119 __le32 offset; 120 __le32 size; 121 __le32 aux_base; /* bits 1:0 reserved */ 122 }; 123 #define AUX_BASE_MASK 0xfffffffc 124 125 /** 126 * struct smem_header - header found in beginning of primary smem region 127 * @proc_comm: proc_comm communication interface (legacy) 128 * @version: array of versions for the various subsystems 129 * @initialized: boolean to indicate that smem is initialized 130 * @free_offset: index of the first unallocated byte in smem 131 * @available: number of bytes available for allocation 132 * @reserved: reserved field, must be 0 133 * toc: array of references to items 134 */ 135 struct smem_header { 136 struct smem_proc_comm proc_comm[4]; 137 __le32 version[32]; 138 __le32 initialized; 139 __le32 free_offset; 140 __le32 available; 141 __le32 reserved; 142 struct smem_global_entry toc[SMEM_ITEM_COUNT]; 143 }; 144 145 /** 146 * struct smem_ptable_entry - one entry in the @smem_ptable list 147 * @offset: offset, within the main shared memory region, of the partition 148 * @size: size of the partition 149 * @flags: flags for the partition (currently unused) 150 * @host0: first processor/host with access to this partition 151 * @host1: second processor/host with access to this partition 152 * @cacheline: alignment for "cached" entries 153 * @reserved: reserved entries for later use 154 */ 155 struct smem_ptable_entry { 156 __le32 offset; 157 __le32 size; 158 __le32 flags; 159 __le16 host0; 160 __le16 host1; 161 __le32 cacheline; 162 __le32 reserved[7]; 163 }; 164 165 /** 166 * struct smem_ptable - partition table for the private partitions 167 * @magic: magic number, must be SMEM_PTABLE_MAGIC 168 * @version: version of the partition table 169 * @num_entries: number of partitions in the table 170 * @reserved: for now reserved entries 171 * @entry: list of @smem_ptable_entry for the @num_entries partitions 172 */ 173 struct smem_ptable { 174 u8 magic[4]; 175 __le32 version; 176 __le32 num_entries; 177 __le32 reserved[5]; 178 struct smem_ptable_entry entry[]; 179 }; 180 181 static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */ 182 183 /** 184 * struct smem_partition_header - header of the partitions 185 * @magic: magic number, must be SMEM_PART_MAGIC 186 * @host0: first processor/host with access to this partition 187 * @host1: second processor/host with access to this partition 188 * @size: size of the partition 189 * @offset_free_uncached: offset to the first free byte of uncached memory in 190 * this partition 191 * @offset_free_cached: offset to the first free byte of cached memory in this 192 * partition 193 * @reserved: for now reserved entries 194 */ 195 struct smem_partition_header { 196 u8 magic[4]; 197 __le16 host0; 198 __le16 host1; 199 __le32 size; 200 __le32 offset_free_uncached; 201 __le32 offset_free_cached; 202 __le32 reserved[3]; 203 }; 204 205 static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 }; 206 207 /** 208 * struct smem_private_entry - header of each item in the private partition 209 * @canary: magic number, must be SMEM_PRIVATE_CANARY 210 * @item: identifying number of the smem item 211 * @size: size of the data, including padding bytes 212 * @padding_data: number of bytes of padding of data 213 * @padding_hdr: number of bytes of padding between the header and the data 214 * @reserved: for now reserved entry 215 */ 216 struct smem_private_entry { 217 u16 canary; /* bytes are the same so no swapping needed */ 218 __le16 item; 219 __le32 size; /* includes padding bytes */ 220 __le16 padding_data; 221 __le16 padding_hdr; 222 __le32 reserved; 223 }; 224 #define SMEM_PRIVATE_CANARY 0xa5a5 225 226 /** 227 * struct smem_info - smem region info located after the table of contents 228 * @magic: magic number, must be SMEM_INFO_MAGIC 229 * @size: size of the smem region 230 * @base_addr: base address of the smem region 231 * @reserved: for now reserved entry 232 * @num_items: highest accepted item number 233 */ 234 struct smem_info { 235 u8 magic[4]; 236 __le32 size; 237 __le32 base_addr; 238 __le32 reserved; 239 __le16 num_items; 240 }; 241 242 static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */ 243 244 /** 245 * struct smem_region - representation of a chunk of memory used for smem 246 * @aux_base: identifier of aux_mem base 247 * @virt_base: virtual base address of memory with this aux_mem identifier 248 * @size: size of the memory region 249 */ 250 struct smem_region { 251 u32 aux_base; 252 void __iomem *virt_base; 253 size_t size; 254 }; 255 256 /** 257 * struct qcom_smem - device data for the smem device 258 * @dev: device pointer 259 * @hwlock: reference to a hwspinlock 260 * @global_partition: pointer to global partition when in use 261 * @global_cacheline: cacheline size for global partition 262 * @partitions: list of pointers to partitions affecting the current 263 * processor/host 264 * @cacheline: list of cacheline sizes for each host 265 * @item_count: max accepted item number 266 * @num_regions: number of @regions 267 * @regions: list of the memory regions defining the shared memory 268 */ 269 struct qcom_smem { 270 struct device *dev; 271 272 struct hwspinlock *hwlock; 273 274 struct smem_partition_header *global_partition; 275 size_t global_cacheline; 276 struct smem_partition_header *partitions[SMEM_HOST_COUNT]; 277 size_t cacheline[SMEM_HOST_COUNT]; 278 u32 item_count; 279 280 unsigned num_regions; 281 struct smem_region regions[]; 282 }; 283 284 static void * 285 phdr_to_last_uncached_entry(struct smem_partition_header *phdr) 286 { 287 void *p = phdr; 288 289 return p + le32_to_cpu(phdr->offset_free_uncached); 290 } 291 292 static struct smem_private_entry * 293 phdr_to_first_cached_entry(struct smem_partition_header *phdr, 294 size_t cacheline) 295 { 296 void *p = phdr; 297 struct smem_private_entry *e; 298 299 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline); 300 } 301 302 static void * 303 phdr_to_last_cached_entry(struct smem_partition_header *phdr) 304 { 305 void *p = phdr; 306 307 return p + le32_to_cpu(phdr->offset_free_cached); 308 } 309 310 static struct smem_private_entry * 311 phdr_to_first_uncached_entry(struct smem_partition_header *phdr) 312 { 313 void *p = phdr; 314 315 return p + sizeof(*phdr); 316 } 317 318 static struct smem_private_entry * 319 uncached_entry_next(struct smem_private_entry *e) 320 { 321 void *p = e; 322 323 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) + 324 le32_to_cpu(e->size); 325 } 326 327 static struct smem_private_entry * 328 cached_entry_next(struct smem_private_entry *e, size_t cacheline) 329 { 330 void *p = e; 331 332 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline); 333 } 334 335 static void *uncached_entry_to_item(struct smem_private_entry *e) 336 { 337 void *p = e; 338 339 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); 340 } 341 342 static void *cached_entry_to_item(struct smem_private_entry *e) 343 { 344 void *p = e; 345 346 return p - le32_to_cpu(e->size); 347 } 348 349 /* Pointer to the one and only smem handle */ 350 static struct qcom_smem *__smem; 351 352 /* Timeout (ms) for the trylock of remote spinlocks */ 353 #define HWSPINLOCK_TIMEOUT 1000 354 355 static int qcom_smem_alloc_private(struct qcom_smem *smem, 356 struct smem_partition_header *phdr, 357 unsigned item, 358 size_t size) 359 { 360 struct smem_private_entry *hdr, *end; 361 size_t alloc_size; 362 void *cached; 363 364 hdr = phdr_to_first_uncached_entry(phdr); 365 end = phdr_to_last_uncached_entry(phdr); 366 cached = phdr_to_last_cached_entry(phdr); 367 368 while (hdr < end) { 369 if (hdr->canary != SMEM_PRIVATE_CANARY) 370 goto bad_canary; 371 if (le16_to_cpu(hdr->item) == item) 372 return -EEXIST; 373 374 hdr = uncached_entry_next(hdr); 375 } 376 377 /* Check that we don't grow into the cached region */ 378 alloc_size = sizeof(*hdr) + ALIGN(size, 8); 379 if ((void *)hdr + alloc_size > cached) { 380 dev_err(smem->dev, "Out of memory\n"); 381 return -ENOSPC; 382 } 383 384 hdr->canary = SMEM_PRIVATE_CANARY; 385 hdr->item = cpu_to_le16(item); 386 hdr->size = cpu_to_le32(ALIGN(size, 8)); 387 hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size); 388 hdr->padding_hdr = 0; 389 390 /* 391 * Ensure the header is written before we advance the free offset, so 392 * that remote processors that does not take the remote spinlock still 393 * gets a consistent view of the linked list. 394 */ 395 wmb(); 396 le32_add_cpu(&phdr->offset_free_uncached, alloc_size); 397 398 return 0; 399 bad_canary: 400 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", 401 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); 402 403 return -EINVAL; 404 } 405 406 static int qcom_smem_alloc_global(struct qcom_smem *smem, 407 unsigned item, 408 size_t size) 409 { 410 struct smem_global_entry *entry; 411 struct smem_header *header; 412 413 header = smem->regions[0].virt_base; 414 entry = &header->toc[item]; 415 if (entry->allocated) 416 return -EEXIST; 417 418 size = ALIGN(size, 8); 419 if (WARN_ON(size > le32_to_cpu(header->available))) 420 return -ENOMEM; 421 422 entry->offset = header->free_offset; 423 entry->size = cpu_to_le32(size); 424 425 /* 426 * Ensure the header is consistent before we mark the item allocated, 427 * so that remote processors will get a consistent view of the item 428 * even though they do not take the spinlock on read. 429 */ 430 wmb(); 431 entry->allocated = cpu_to_le32(1); 432 433 le32_add_cpu(&header->free_offset, size); 434 le32_add_cpu(&header->available, -size); 435 436 return 0; 437 } 438 439 /** 440 * qcom_smem_alloc() - allocate space for a smem item 441 * @host: remote processor id, or -1 442 * @item: smem item handle 443 * @size: number of bytes to be allocated 444 * 445 * Allocate space for a given smem item of size @size, given that the item is 446 * not yet allocated. 447 */ 448 int qcom_smem_alloc(unsigned host, unsigned item, size_t size) 449 { 450 struct smem_partition_header *phdr; 451 unsigned long flags; 452 int ret; 453 454 if (!__smem) 455 return -EPROBE_DEFER; 456 457 if (item < SMEM_ITEM_LAST_FIXED) { 458 dev_err(__smem->dev, 459 "Rejecting allocation of static entry %d\n", item); 460 return -EINVAL; 461 } 462 463 if (WARN_ON(item >= __smem->item_count)) 464 return -EINVAL; 465 466 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, 467 HWSPINLOCK_TIMEOUT, 468 &flags); 469 if (ret) 470 return ret; 471 472 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { 473 phdr = __smem->partitions[host]; 474 ret = qcom_smem_alloc_private(__smem, phdr, item, size); 475 } else if (__smem->global_partition) { 476 phdr = __smem->global_partition; 477 ret = qcom_smem_alloc_private(__smem, phdr, item, size); 478 } else { 479 ret = qcom_smem_alloc_global(__smem, item, size); 480 } 481 482 hwspin_unlock_irqrestore(__smem->hwlock, &flags); 483 484 return ret; 485 } 486 EXPORT_SYMBOL(qcom_smem_alloc); 487 488 static void *qcom_smem_get_global(struct qcom_smem *smem, 489 unsigned item, 490 size_t *size) 491 { 492 struct smem_header *header; 493 struct smem_region *region; 494 struct smem_global_entry *entry; 495 u32 aux_base; 496 unsigned i; 497 498 header = smem->regions[0].virt_base; 499 entry = &header->toc[item]; 500 if (!entry->allocated) 501 return ERR_PTR(-ENXIO); 502 503 aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK; 504 505 for (i = 0; i < smem->num_regions; i++) { 506 region = &smem->regions[i]; 507 508 if (region->aux_base == aux_base || !aux_base) { 509 if (size != NULL) 510 *size = le32_to_cpu(entry->size); 511 return region->virt_base + le32_to_cpu(entry->offset); 512 } 513 } 514 515 return ERR_PTR(-ENOENT); 516 } 517 518 static void *qcom_smem_get_private(struct qcom_smem *smem, 519 struct smem_partition_header *phdr, 520 size_t cacheline, 521 unsigned item, 522 size_t *size) 523 { 524 struct smem_private_entry *e, *end; 525 526 e = phdr_to_first_uncached_entry(phdr); 527 end = phdr_to_last_uncached_entry(phdr); 528 529 while (e < end) { 530 if (e->canary != SMEM_PRIVATE_CANARY) 531 goto invalid_canary; 532 533 if (le16_to_cpu(e->item) == item) { 534 if (size != NULL) 535 *size = le32_to_cpu(e->size) - 536 le16_to_cpu(e->padding_data); 537 538 return uncached_entry_to_item(e); 539 } 540 541 e = uncached_entry_next(e); 542 } 543 544 /* Item was not found in the uncached list, search the cached list */ 545 546 e = phdr_to_first_cached_entry(phdr, cacheline); 547 end = phdr_to_last_cached_entry(phdr); 548 549 while (e > end) { 550 if (e->canary != SMEM_PRIVATE_CANARY) 551 goto invalid_canary; 552 553 if (le16_to_cpu(e->item) == item) { 554 if (size != NULL) 555 *size = le32_to_cpu(e->size) - 556 le16_to_cpu(e->padding_data); 557 558 return cached_entry_to_item(e); 559 } 560 561 e = cached_entry_next(e, cacheline); 562 } 563 564 return ERR_PTR(-ENOENT); 565 566 invalid_canary: 567 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", 568 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); 569 570 return ERR_PTR(-EINVAL); 571 } 572 573 /** 574 * qcom_smem_get() - resolve ptr of size of a smem item 575 * @host: the remote processor, or -1 576 * @item: smem item handle 577 * @size: pointer to be filled out with size of the item 578 * 579 * Looks up smem item and returns pointer to it. Size of smem 580 * item is returned in @size. 581 */ 582 void *qcom_smem_get(unsigned host, unsigned item, size_t *size) 583 { 584 struct smem_partition_header *phdr; 585 unsigned long flags; 586 size_t cacheln; 587 int ret; 588 void *ptr = ERR_PTR(-EPROBE_DEFER); 589 590 if (!__smem) 591 return ptr; 592 593 if (WARN_ON(item >= __smem->item_count)) 594 return ERR_PTR(-EINVAL); 595 596 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, 597 HWSPINLOCK_TIMEOUT, 598 &flags); 599 if (ret) 600 return ERR_PTR(ret); 601 602 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { 603 phdr = __smem->partitions[host]; 604 cacheln = __smem->cacheline[host]; 605 ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size); 606 } else if (__smem->global_partition) { 607 phdr = __smem->global_partition; 608 cacheln = __smem->global_cacheline; 609 ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size); 610 } else { 611 ptr = qcom_smem_get_global(__smem, item, size); 612 } 613 614 hwspin_unlock_irqrestore(__smem->hwlock, &flags); 615 616 return ptr; 617 618 } 619 EXPORT_SYMBOL(qcom_smem_get); 620 621 /** 622 * qcom_smem_get_free_space() - retrieve amount of free space in a partition 623 * @host: the remote processor identifying a partition, or -1 624 * 625 * To be used by smem clients as a quick way to determine if any new 626 * allocations has been made. 627 */ 628 int qcom_smem_get_free_space(unsigned host) 629 { 630 struct smem_partition_header *phdr; 631 struct smem_header *header; 632 unsigned ret; 633 634 if (!__smem) 635 return -EPROBE_DEFER; 636 637 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { 638 phdr = __smem->partitions[host]; 639 ret = le32_to_cpu(phdr->offset_free_cached) - 640 le32_to_cpu(phdr->offset_free_uncached); 641 } else if (__smem->global_partition) { 642 phdr = __smem->global_partition; 643 ret = le32_to_cpu(phdr->offset_free_cached) - 644 le32_to_cpu(phdr->offset_free_uncached); 645 } else { 646 header = __smem->regions[0].virt_base; 647 ret = le32_to_cpu(header->available); 648 } 649 650 return ret; 651 } 652 EXPORT_SYMBOL(qcom_smem_get_free_space); 653 654 /** 655 * qcom_smem_virt_to_phys() - return the physical address associated 656 * with an smem item pointer (previously returned by qcom_smem_get() 657 * @p: the virtual address to convert 658 * 659 * Returns 0 if the pointer provided is not within any smem region. 660 */ 661 phys_addr_t qcom_smem_virt_to_phys(void *p) 662 { 663 unsigned i; 664 665 for (i = 0; i < __smem->num_regions; i++) { 666 struct smem_region *region = &__smem->regions[i]; 667 668 if (p < region->virt_base) 669 continue; 670 if (p < region->virt_base + region->size) { 671 u64 offset = p - region->virt_base; 672 673 return (phys_addr_t)region->aux_base + offset; 674 } 675 } 676 677 return 0; 678 } 679 EXPORT_SYMBOL(qcom_smem_virt_to_phys); 680 681 static int qcom_smem_get_sbl_version(struct qcom_smem *smem) 682 { 683 struct smem_header *header; 684 __le32 *versions; 685 686 header = smem->regions[0].virt_base; 687 versions = header->version; 688 689 return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]); 690 } 691 692 static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem) 693 { 694 struct smem_ptable *ptable; 695 u32 version; 696 697 ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; 698 if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) 699 return ERR_PTR(-ENOENT); 700 701 version = le32_to_cpu(ptable->version); 702 if (version != 1) { 703 dev_err(smem->dev, 704 "Unsupported partition header version %d\n", version); 705 return ERR_PTR(-EINVAL); 706 } 707 return ptable; 708 } 709 710 static u32 qcom_smem_get_item_count(struct qcom_smem *smem) 711 { 712 struct smem_ptable *ptable; 713 struct smem_info *info; 714 715 ptable = qcom_smem_get_ptable(smem); 716 if (IS_ERR_OR_NULL(ptable)) 717 return SMEM_ITEM_COUNT; 718 719 info = (struct smem_info *)&ptable->entry[ptable->num_entries]; 720 if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic))) 721 return SMEM_ITEM_COUNT; 722 723 return le16_to_cpu(info->num_items); 724 } 725 726 /* 727 * Validate the partition header for a partition whose partition 728 * table entry is supplied. Returns a pointer to its header if 729 * valid, or a null pointer otherwise. 730 */ 731 static struct smem_partition_header * 732 qcom_smem_partition_header(struct qcom_smem *smem, 733 struct smem_ptable_entry *entry, u16 host0, u16 host1) 734 { 735 struct smem_partition_header *header; 736 u32 size; 737 738 header = smem->regions[0].virt_base + le32_to_cpu(entry->offset); 739 740 if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) { 741 dev_err(smem->dev, "bad partition magic %02x %02x %02x %02x\n", 742 header->magic[0], header->magic[1], 743 header->magic[2], header->magic[3]); 744 return NULL; 745 } 746 747 if (host0 != le16_to_cpu(header->host0)) { 748 dev_err(smem->dev, "bad host0 (%hu != %hu)\n", 749 host0, le16_to_cpu(header->host0)); 750 return NULL; 751 } 752 if (host1 != le16_to_cpu(header->host1)) { 753 dev_err(smem->dev, "bad host1 (%hu != %hu)\n", 754 host1, le16_to_cpu(header->host1)); 755 return NULL; 756 } 757 758 size = le32_to_cpu(header->size); 759 if (size != le32_to_cpu(entry->size)) { 760 dev_err(smem->dev, "bad partition size (%u != %u)\n", 761 size, le32_to_cpu(entry->size)); 762 return NULL; 763 } 764 765 if (le32_to_cpu(header->offset_free_uncached) > size) { 766 dev_err(smem->dev, "bad partition free uncached (%u > %u)\n", 767 le32_to_cpu(header->offset_free_uncached), size); 768 return NULL; 769 } 770 771 return header; 772 } 773 774 static int qcom_smem_set_global_partition(struct qcom_smem *smem) 775 { 776 struct smem_partition_header *header; 777 struct smem_ptable_entry *entry; 778 struct smem_ptable *ptable; 779 bool found = false; 780 int i; 781 782 if (smem->global_partition) { 783 dev_err(smem->dev, "Already found the global partition\n"); 784 return -EINVAL; 785 } 786 787 ptable = qcom_smem_get_ptable(smem); 788 if (IS_ERR(ptable)) 789 return PTR_ERR(ptable); 790 791 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { 792 entry = &ptable->entry[i]; 793 if (!le32_to_cpu(entry->offset)) 794 continue; 795 if (!le32_to_cpu(entry->size)) 796 continue; 797 798 if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST) 799 continue; 800 801 if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) { 802 found = true; 803 break; 804 } 805 } 806 807 if (!found) { 808 dev_err(smem->dev, "Missing entry for global partition\n"); 809 return -EINVAL; 810 } 811 812 header = qcom_smem_partition_header(smem, entry, 813 SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST); 814 if (!header) 815 return -EINVAL; 816 817 smem->global_partition = header; 818 smem->global_cacheline = le32_to_cpu(entry->cacheline); 819 820 return 0; 821 } 822 823 static int 824 qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host) 825 { 826 struct smem_partition_header *header; 827 struct smem_ptable_entry *entry; 828 struct smem_ptable *ptable; 829 unsigned int remote_host; 830 u16 host0, host1; 831 int i; 832 833 ptable = qcom_smem_get_ptable(smem); 834 if (IS_ERR(ptable)) 835 return PTR_ERR(ptable); 836 837 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { 838 entry = &ptable->entry[i]; 839 if (!le32_to_cpu(entry->offset)) 840 continue; 841 if (!le32_to_cpu(entry->size)) 842 continue; 843 844 host0 = le16_to_cpu(entry->host0); 845 host1 = le16_to_cpu(entry->host1); 846 if (host0 == local_host) 847 remote_host = host1; 848 else if (host1 == local_host) 849 remote_host = host0; 850 else 851 continue; 852 853 if (remote_host >= SMEM_HOST_COUNT) { 854 dev_err(smem->dev, "bad host %hu\n", remote_host); 855 return -EINVAL; 856 } 857 858 if (smem->partitions[remote_host]) { 859 dev_err(smem->dev, "duplicate host %hu\n", remote_host); 860 return -EINVAL; 861 } 862 863 header = qcom_smem_partition_header(smem, entry, host0, host1); 864 if (!header) 865 return -EINVAL; 866 867 smem->partitions[remote_host] = header; 868 smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline); 869 } 870 871 return 0; 872 } 873 874 static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev, 875 const char *name, int i) 876 { 877 struct device_node *np; 878 struct resource r; 879 resource_size_t size; 880 int ret; 881 882 np = of_parse_phandle(dev->of_node, name, 0); 883 if (!np) { 884 dev_err(dev, "No %s specified\n", name); 885 return -EINVAL; 886 } 887 888 ret = of_address_to_resource(np, 0, &r); 889 of_node_put(np); 890 if (ret) 891 return ret; 892 size = resource_size(&r); 893 894 smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, size); 895 if (!smem->regions[i].virt_base) 896 return -ENOMEM; 897 smem->regions[i].aux_base = (u32)r.start; 898 smem->regions[i].size = size; 899 900 return 0; 901 } 902 903 static int qcom_smem_probe(struct platform_device *pdev) 904 { 905 struct smem_header *header; 906 struct qcom_smem *smem; 907 size_t array_size; 908 int num_regions; 909 int hwlock_id; 910 u32 version; 911 int ret; 912 913 num_regions = 1; 914 if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL)) 915 num_regions++; 916 917 array_size = num_regions * sizeof(struct smem_region); 918 smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL); 919 if (!smem) 920 return -ENOMEM; 921 922 smem->dev = &pdev->dev; 923 smem->num_regions = num_regions; 924 925 ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0); 926 if (ret) 927 return ret; 928 929 if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev, 930 "qcom,rpm-msg-ram", 1))) 931 return ret; 932 933 header = smem->regions[0].virt_base; 934 if (le32_to_cpu(header->initialized) != 1 || 935 le32_to_cpu(header->reserved)) { 936 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); 937 return -EINVAL; 938 } 939 940 version = qcom_smem_get_sbl_version(smem); 941 switch (version >> 16) { 942 case SMEM_GLOBAL_PART_VERSION: 943 ret = qcom_smem_set_global_partition(smem); 944 if (ret < 0) 945 return ret; 946 smem->item_count = qcom_smem_get_item_count(smem); 947 break; 948 case SMEM_GLOBAL_HEAP_VERSION: 949 smem->item_count = SMEM_ITEM_COUNT; 950 break; 951 default: 952 dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); 953 return -EINVAL; 954 } 955 956 BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT); 957 ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS); 958 if (ret < 0 && ret != -ENOENT) 959 return ret; 960 961 hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); 962 if (hwlock_id < 0) { 963 if (hwlock_id != -EPROBE_DEFER) 964 dev_err(&pdev->dev, "failed to retrieve hwlock\n"); 965 return hwlock_id; 966 } 967 968 smem->hwlock = hwspin_lock_request_specific(hwlock_id); 969 if (!smem->hwlock) 970 return -ENXIO; 971 972 __smem = smem; 973 974 return 0; 975 } 976 977 static int qcom_smem_remove(struct platform_device *pdev) 978 { 979 hwspin_lock_free(__smem->hwlock); 980 __smem = NULL; 981 982 return 0; 983 } 984 985 static const struct of_device_id qcom_smem_of_match[] = { 986 { .compatible = "qcom,smem" }, 987 {} 988 }; 989 MODULE_DEVICE_TABLE(of, qcom_smem_of_match); 990 991 static struct platform_driver qcom_smem_driver = { 992 .probe = qcom_smem_probe, 993 .remove = qcom_smem_remove, 994 .driver = { 995 .name = "qcom-smem", 996 .of_match_table = qcom_smem_of_match, 997 .suppress_bind_attrs = true, 998 }, 999 }; 1000 1001 static int __init qcom_smem_init(void) 1002 { 1003 return platform_driver_register(&qcom_smem_driver); 1004 } 1005 arch_initcall(qcom_smem_init); 1006 1007 static void __exit qcom_smem_exit(void) 1008 { 1009 platform_driver_unregister(&qcom_smem_driver); 1010 } 1011 module_exit(qcom_smem_exit) 1012 1013 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>"); 1014 MODULE_DESCRIPTION("Qualcomm Shared Memory Manager"); 1015 MODULE_LICENSE("GPL v2"); 1016