Lines Matching +full:part +full:- +full:number

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
44 * two regions are cached and non-cached memory respectively. Each region
48 * Items in the non-cached region are allocated from the start of the partition
50 * is hence the region between the cached and non-cached offsets. The header of
59 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
79 /* Highest accepted item number, for both global and private heaps */
88 /* Max number of processors/hosts in a system */
92 * struct smem_proc_comm - proc_comm communication struct (legacy)
104 * struct smem_global_entry - entry to reference smem items on the heap
120 * struct smem_header - header found in beginning of primary smem region
125 * @available: number of bytes available for allocation
140 * struct smem_ptable_entry - one entry in the @smem_ptable list
160 * struct smem_ptable - partition table for the private partitions
161 * @magic: magic number, must be SMEM_PTABLE_MAGIC
163 * @num_entries: number of partitions in the table
178 * struct smem_partition_header - header of the partitions
179 * @magic: magic number, must be SMEM_PART_MAGIC
200 * struct smem_partition - describes smem partition
216 * struct smem_private_entry - header of each item in the private partition
217 * @canary: magic number, must be SMEM_PRIVATE_CANARY
218 * @item: identifying number of the smem item
220 * @padding_data: number of bytes of padding of data
221 * @padding_hdr: number of bytes of padding between the header and the data
235 * struct smem_info - smem region info located after the table of contents
236 * @magic: magic number, must be SMEM_INFO_MAGIC
240 * @num_items: highest accepted item number
253 * struct smem_region - representation of a chunk of memory used for smem
265 * struct qcom_smem - device data for the smem device
271 * @item_count: max accepted item number
273 * @num_regions: number of @regions
296 return p + le32_to_cpu(phdr->offset_free_uncached); in phdr_to_last_uncached_entry()
306 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline); in phdr_to_first_cached_entry()
314 return p + le32_to_cpu(phdr->offset_free_cached); in phdr_to_last_cached_entry()
330 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) + in uncached_entry_next()
331 le32_to_cpu(e->size); in uncached_entry_next()
339 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline); in cached_entry_next()
346 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); in uncached_entry_to_item()
353 return p - le32_to_cpu(e->size); in cached_entry_to_item()
366 * qcom_smem_bust_hwspin_lock_by_host() - bust the smem hwspinlock for a host
382 return -EINVAL; in qcom_smem_bust_hwspin_lock_by_host()
384 return hwspin_lock_bust(__smem->hwlock, SMEM_HOST_ID_TO_HWSPINLOCK_ID(host)); in qcom_smem_bust_hwspin_lock_by_host()
389 * qcom_smem_is_available() - Check if SMEM is available
400 struct smem_partition *part, in qcom_smem_alloc_private() argument
410 phdr = (struct smem_partition_header __force *)part->virt_base; in qcom_smem_alloc_private()
411 p_end = (void *)phdr + part->size; in qcom_smem_alloc_private()
418 return -EINVAL; in qcom_smem_alloc_private()
421 if (hdr->canary != SMEM_PRIVATE_CANARY) in qcom_smem_alloc_private()
423 if (le16_to_cpu(hdr->item) == item) in qcom_smem_alloc_private()
424 return -EEXIST; in qcom_smem_alloc_private()
430 return -EINVAL; in qcom_smem_alloc_private()
435 dev_err(smem->dev, "Out of memory\n"); in qcom_smem_alloc_private()
436 return -ENOSPC; in qcom_smem_alloc_private()
439 hdr->canary = SMEM_PRIVATE_CANARY; in qcom_smem_alloc_private()
440 hdr->item = cpu_to_le16(item); in qcom_smem_alloc_private()
441 hdr->size = cpu_to_le32(ALIGN(size, 8)); in qcom_smem_alloc_private()
442 hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size); in qcom_smem_alloc_private()
443 hdr->padding_hdr = 0; in qcom_smem_alloc_private()
451 le32_add_cpu(&phdr->offset_free_uncached, alloc_size); in qcom_smem_alloc_private()
455 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", in qcom_smem_alloc_private()
456 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); in qcom_smem_alloc_private()
458 return -EINVAL; in qcom_smem_alloc_private()
468 header = smem->regions[0].virt_base; in qcom_smem_alloc_global()
469 entry = &header->toc[item]; in qcom_smem_alloc_global()
470 if (entry->allocated) in qcom_smem_alloc_global()
471 return -EEXIST; in qcom_smem_alloc_global()
474 if (WARN_ON(size > le32_to_cpu(header->available))) in qcom_smem_alloc_global()
475 return -ENOMEM; in qcom_smem_alloc_global()
477 entry->offset = header->free_offset; in qcom_smem_alloc_global()
478 entry->size = cpu_to_le32(size); in qcom_smem_alloc_global()
486 entry->allocated = cpu_to_le32(1); in qcom_smem_alloc_global()
488 le32_add_cpu(&header->free_offset, size); in qcom_smem_alloc_global()
489 le32_add_cpu(&header->available, -size); in qcom_smem_alloc_global()
495 * qcom_smem_alloc() - allocate space for a smem item
496 * @host: remote processor id, or -1
498 * @size: number of bytes to be allocated
505 struct smem_partition *part; in qcom_smem_alloc() local
510 return -EPROBE_DEFER; in qcom_smem_alloc()
513 dev_err(__smem->dev, in qcom_smem_alloc()
515 return -EINVAL; in qcom_smem_alloc()
518 if (WARN_ON(item >= __smem->item_count)) in qcom_smem_alloc()
519 return -EINVAL; in qcom_smem_alloc()
521 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, in qcom_smem_alloc()
527 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { in qcom_smem_alloc()
528 part = &__smem->partitions[host]; in qcom_smem_alloc()
529 ret = qcom_smem_alloc_private(__smem, part, item, size); in qcom_smem_alloc()
530 } else if (__smem->global_partition.virt_base) { in qcom_smem_alloc()
531 part = &__smem->global_partition; in qcom_smem_alloc()
532 ret = qcom_smem_alloc_private(__smem, part, item, size); in qcom_smem_alloc()
537 hwspin_unlock_irqrestore(__smem->hwlock, &flags); in qcom_smem_alloc()
555 header = smem->regions[0].virt_base; in qcom_smem_get_global()
556 entry = &header->toc[item]; in qcom_smem_get_global()
557 if (!entry->allocated) in qcom_smem_get_global()
558 return ERR_PTR(-ENXIO); in qcom_smem_get_global()
560 aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK; in qcom_smem_get_global()
562 for (i = 0; i < smem->num_regions; i++) { in qcom_smem_get_global()
563 region = &smem->regions[i]; in qcom_smem_get_global()
565 if ((u32)region->aux_base == aux_base || !aux_base) { in qcom_smem_get_global()
566 e_size = le32_to_cpu(entry->size); in qcom_smem_get_global()
567 entry_offset = le32_to_cpu(entry->offset); in qcom_smem_get_global()
569 if (WARN_ON(e_size + entry_offset > region->size)) in qcom_smem_get_global()
570 return ERR_PTR(-EINVAL); in qcom_smem_get_global()
575 return region->virt_base + entry_offset; in qcom_smem_get_global()
579 return ERR_PTR(-ENOENT); in qcom_smem_get_global()
583 struct smem_partition *part, in qcom_smem_get_private() argument
593 phdr = (struct smem_partition_header __force *)part->virt_base; in qcom_smem_get_private()
594 p_end = (void *)phdr + part->size; in qcom_smem_get_private()
600 if (e->canary != SMEM_PRIVATE_CANARY) in qcom_smem_get_private()
603 if (le16_to_cpu(e->item) == item) { in qcom_smem_get_private()
605 e_size = le32_to_cpu(e->size); in qcom_smem_get_private()
606 padding_data = le16_to_cpu(e->padding_data); in qcom_smem_get_private()
608 if (WARN_ON(e_size > part->size || padding_data > e_size)) in qcom_smem_get_private()
609 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
611 *size = e_size - padding_data; in qcom_smem_get_private()
616 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
625 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
629 e = phdr_to_first_cached_entry(phdr, part->cacheline); in qcom_smem_get_private()
633 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
636 if (e->canary != SMEM_PRIVATE_CANARY) in qcom_smem_get_private()
639 if (le16_to_cpu(e->item) == item) { in qcom_smem_get_private()
641 e_size = le32_to_cpu(e->size); in qcom_smem_get_private()
642 padding_data = le16_to_cpu(e->padding_data); in qcom_smem_get_private()
644 if (WARN_ON(e_size > part->size || padding_data > e_size)) in qcom_smem_get_private()
645 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
647 *size = e_size - padding_data; in qcom_smem_get_private()
652 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
657 e = cached_entry_next(e, part->cacheline); in qcom_smem_get_private()
661 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
663 return ERR_PTR(-ENOENT); in qcom_smem_get_private()
666 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", in qcom_smem_get_private()
667 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); in qcom_smem_get_private()
669 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
673 * qcom_smem_get() - resolve ptr of size of a smem item
674 * @host: the remote processor, or -1
683 struct smem_partition *part; in qcom_smem_get() local
686 void *ptr = ERR_PTR(-EPROBE_DEFER); in qcom_smem_get()
691 if (WARN_ON(item >= __smem->item_count)) in qcom_smem_get()
692 return ERR_PTR(-EINVAL); in qcom_smem_get()
694 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, in qcom_smem_get()
700 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { in qcom_smem_get()
701 part = &__smem->partitions[host]; in qcom_smem_get()
702 ptr = qcom_smem_get_private(__smem, part, item, size); in qcom_smem_get()
703 } else if (__smem->global_partition.virt_base) { in qcom_smem_get()
704 part = &__smem->global_partition; in qcom_smem_get()
705 ptr = qcom_smem_get_private(__smem, part, item, size); in qcom_smem_get()
710 hwspin_unlock_irqrestore(__smem->hwlock, &flags); in qcom_smem_get()
718 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
719 * @host: the remote processor identifying a partition, or -1
726 struct smem_partition *part; in qcom_smem_get_free_space() local
732 return -EPROBE_DEFER; in qcom_smem_get_free_space()
734 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { in qcom_smem_get_free_space()
735 part = &__smem->partitions[host]; in qcom_smem_get_free_space()
736 phdr = part->virt_base; in qcom_smem_get_free_space()
737 ret = le32_to_cpu(phdr->offset_free_cached) - in qcom_smem_get_free_space()
738 le32_to_cpu(phdr->offset_free_uncached); in qcom_smem_get_free_space()
740 if (ret > le32_to_cpu(part->size)) in qcom_smem_get_free_space()
741 return -EINVAL; in qcom_smem_get_free_space()
742 } else if (__smem->global_partition.virt_base) { in qcom_smem_get_free_space()
743 part = &__smem->global_partition; in qcom_smem_get_free_space()
744 phdr = part->virt_base; in qcom_smem_get_free_space()
745 ret = le32_to_cpu(phdr->offset_free_cached) - in qcom_smem_get_free_space()
746 le32_to_cpu(phdr->offset_free_uncached); in qcom_smem_get_free_space()
748 if (ret > le32_to_cpu(part->size)) in qcom_smem_get_free_space()
749 return -EINVAL; in qcom_smem_get_free_space()
751 header = __smem->regions[0].virt_base; in qcom_smem_get_free_space()
752 ret = le32_to_cpu(header->available); in qcom_smem_get_free_space()
754 if (ret > __smem->regions[0].size) in qcom_smem_get_free_space()
755 return -EINVAL; in qcom_smem_get_free_space()
768 * qcom_smem_virt_to_phys() - return the physical address associated
776 struct smem_partition *part; in qcom_smem_virt_to_phys() local
782 part = &__smem->partitions[i]; in qcom_smem_virt_to_phys()
784 if (addr_in_range(part->virt_base, part->size, p)) { in qcom_smem_virt_to_phys()
785 offset = p - part->virt_base; in qcom_smem_virt_to_phys()
787 return (phys_addr_t)part->phys_base + offset; in qcom_smem_virt_to_phys()
791 part = &__smem->global_partition; in qcom_smem_virt_to_phys()
793 if (addr_in_range(part->virt_base, part->size, p)) { in qcom_smem_virt_to_phys()
794 offset = p - part->virt_base; in qcom_smem_virt_to_phys()
796 return (phys_addr_t)part->phys_base + offset; in qcom_smem_virt_to_phys()
799 for (i = 0; i < __smem->num_regions; i++) { in qcom_smem_virt_to_phys()
800 area = &__smem->regions[i]; in qcom_smem_virt_to_phys()
802 if (addr_in_range(area->virt_base, area->size, p)) { in qcom_smem_virt_to_phys()
803 offset = p - area->virt_base; in qcom_smem_virt_to_phys()
805 return (phys_addr_t)area->aux_base + offset; in qcom_smem_virt_to_phys()
814 * qcom_smem_get_soc_id() - return the SoC ID
829 *id = __le32_to_cpu(info->id); in qcom_smem_get_soc_id()
840 header = smem->regions[0].virt_base; in qcom_smem_get_sbl_version()
841 versions = header->version; in qcom_smem_get_sbl_version()
851 ptable = smem->ptable; in qcom_smem_get_ptable()
852 if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) in qcom_smem_get_ptable()
853 return ERR_PTR(-ENOENT); in qcom_smem_get_ptable()
855 version = le32_to_cpu(ptable->version); in qcom_smem_get_ptable()
857 dev_err(smem->dev, in qcom_smem_get_ptable()
859 return ERR_PTR(-EINVAL); in qcom_smem_get_ptable()
873 info = (struct smem_info *)&ptable->entry[ptable->num_entries]; in qcom_smem_get_item_count()
874 if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic))) in qcom_smem_get_item_count()
877 return le16_to_cpu(info->num_items); in qcom_smem_get_item_count()
893 phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset); in qcom_smem_partition_header()
894 header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size)); in qcom_smem_partition_header()
899 if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) { in qcom_smem_partition_header()
900 dev_err(smem->dev, "bad partition magic %4ph\n", header->magic); in qcom_smem_partition_header()
904 if (host0 != le16_to_cpu(header->host0)) { in qcom_smem_partition_header()
905 dev_err(smem->dev, "bad host0 (%hu != %hu)\n", in qcom_smem_partition_header()
906 host0, le16_to_cpu(header->host0)); in qcom_smem_partition_header()
909 if (host1 != le16_to_cpu(header->host1)) { in qcom_smem_partition_header()
910 dev_err(smem->dev, "bad host1 (%hu != %hu)\n", in qcom_smem_partition_header()
911 host1, le16_to_cpu(header->host1)); in qcom_smem_partition_header()
915 size = le32_to_cpu(header->size); in qcom_smem_partition_header()
916 if (size != le32_to_cpu(entry->size)) { in qcom_smem_partition_header()
917 dev_err(smem->dev, "bad partition size (%u != %u)\n", in qcom_smem_partition_header()
918 size, le32_to_cpu(entry->size)); in qcom_smem_partition_header()
922 if (le32_to_cpu(header->offset_free_uncached) > size) { in qcom_smem_partition_header()
923 dev_err(smem->dev, "bad partition free uncached (%u > %u)\n", in qcom_smem_partition_header()
924 le32_to_cpu(header->offset_free_uncached), size); in qcom_smem_partition_header()
939 if (smem->global_partition.virt_base) { in qcom_smem_set_global_partition()
940 dev_err(smem->dev, "Already found the global partition\n"); in qcom_smem_set_global_partition()
941 return -EINVAL; in qcom_smem_set_global_partition()
948 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { in qcom_smem_set_global_partition()
949 entry = &ptable->entry[i]; in qcom_smem_set_global_partition()
950 if (!le32_to_cpu(entry->offset)) in qcom_smem_set_global_partition()
952 if (!le32_to_cpu(entry->size)) in qcom_smem_set_global_partition()
955 if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST) in qcom_smem_set_global_partition()
958 if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) { in qcom_smem_set_global_partition()
965 dev_err(smem->dev, "Missing entry for global partition\n"); in qcom_smem_set_global_partition()
966 return -EINVAL; in qcom_smem_set_global_partition()
972 return -EINVAL; in qcom_smem_set_global_partition()
974 smem->global_partition.virt_base = (void __iomem *)header; in qcom_smem_set_global_partition()
975 smem->global_partition.phys_base = smem->regions[0].aux_base + in qcom_smem_set_global_partition()
976 le32_to_cpu(entry->offset); in qcom_smem_set_global_partition()
977 smem->global_partition.size = le32_to_cpu(entry->size); in qcom_smem_set_global_partition()
978 smem->global_partition.cacheline = le32_to_cpu(entry->cacheline); in qcom_smem_set_global_partition()
997 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { in qcom_smem_enumerate_partitions()
998 entry = &ptable->entry[i]; in qcom_smem_enumerate_partitions()
999 if (!le32_to_cpu(entry->offset)) in qcom_smem_enumerate_partitions()
1001 if (!le32_to_cpu(entry->size)) in qcom_smem_enumerate_partitions()
1004 host0 = le16_to_cpu(entry->host0); in qcom_smem_enumerate_partitions()
1005 host1 = le16_to_cpu(entry->host1); in qcom_smem_enumerate_partitions()
1014 dev_err(smem->dev, "bad host %u\n", remote_host); in qcom_smem_enumerate_partitions()
1015 return -EINVAL; in qcom_smem_enumerate_partitions()
1018 if (smem->partitions[remote_host].virt_base) { in qcom_smem_enumerate_partitions()
1019 dev_err(smem->dev, "duplicate host %u\n", remote_host); in qcom_smem_enumerate_partitions()
1020 return -EINVAL; in qcom_smem_enumerate_partitions()
1025 return -EINVAL; in qcom_smem_enumerate_partitions()
1027 smem->partitions[remote_host].virt_base = (void __iomem *)header; in qcom_smem_enumerate_partitions()
1028 smem->partitions[remote_host].phys_base = smem->regions[0].aux_base + in qcom_smem_enumerate_partitions()
1029 le32_to_cpu(entry->offset); in qcom_smem_enumerate_partitions()
1030 smem->partitions[remote_host].size = le32_to_cpu(entry->size); in qcom_smem_enumerate_partitions()
1031 smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline); in qcom_smem_enumerate_partitions()
1042 region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K); in qcom_smem_map_toc()
1043 ptable_start = region->aux_base + region->size - SZ_4K; in qcom_smem_map_toc()
1045 smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K); in qcom_smem_map_toc()
1047 if (!region->virt_base || !smem->ptable) in qcom_smem_map_toc()
1048 return -ENOMEM; in qcom_smem_map_toc()
1057 phys_addr = smem->regions[0].aux_base; in qcom_smem_map_global()
1059 smem->regions[0].size = size; in qcom_smem_map_global()
1060 smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size); in qcom_smem_map_global()
1062 if (!smem->regions[0].virt_base) in qcom_smem_map_global()
1063 return -ENOMEM; in qcom_smem_map_global()
1071 struct device *dev = smem->dev; in qcom_smem_resolve_mem()
1076 np = of_parse_phandle(dev->of_node, name, 0); in qcom_smem_resolve_mem()
1079 return -EINVAL; in qcom_smem_resolve_mem()
1087 region->aux_base = r.start; in qcom_smem_resolve_mem()
1088 region->size = resource_size(&r); in qcom_smem_resolve_mem()
1107 if (of_property_present(pdev->dev.of_node, "qcom,rpm-msg-ram")) in qcom_smem_probe()
1110 smem = devm_kzalloc(&pdev->dev, struct_size(smem, regions, num_regions), in qcom_smem_probe()
1113 return -ENOMEM; in qcom_smem_probe()
1115 smem->dev = &pdev->dev; in qcom_smem_probe()
1116 smem->num_regions = num_regions; in qcom_smem_probe()
1118 rmem = of_reserved_mem_lookup(pdev->dev.of_node); in qcom_smem_probe()
1120 smem->regions[0].aux_base = rmem->base; in qcom_smem_probe()
1121 smem->regions[0].size = rmem->size; in qcom_smem_probe()
1124 * Fall back to the memory-region reference, if we're not a in qcom_smem_probe()
1125 * reserved-memory node. in qcom_smem_probe()
1127 ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]); in qcom_smem_probe()
1133 ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]); in qcom_smem_probe()
1139 ret = qcom_smem_map_toc(smem, &smem->regions[0]); in qcom_smem_probe()
1144 smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev, in qcom_smem_probe()
1145 smem->regions[i].aux_base, in qcom_smem_probe()
1146 smem->regions[i].size); in qcom_smem_probe()
1147 if (!smem->regions[i].virt_base) { in qcom_smem_probe()
1148 dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base); in qcom_smem_probe()
1149 return -ENOMEM; in qcom_smem_probe()
1153 header = smem->regions[0].virt_base; in qcom_smem_probe()
1154 if (le32_to_cpu(header->initialized) != 1 || in qcom_smem_probe()
1155 le32_to_cpu(header->reserved)) { in qcom_smem_probe()
1156 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); in qcom_smem_probe()
1157 return -EINVAL; in qcom_smem_probe()
1160 hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); in qcom_smem_probe()
1162 if (hwlock_id != -EPROBE_DEFER) in qcom_smem_probe()
1163 dev_err(&pdev->dev, "failed to retrieve hwlock\n"); in qcom_smem_probe()
1167 smem->hwlock = hwspin_lock_request_specific(hwlock_id); in qcom_smem_probe()
1168 if (!smem->hwlock) in qcom_smem_probe()
1169 return -ENXIO; in qcom_smem_probe()
1171 ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags); in qcom_smem_probe()
1174 size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset); in qcom_smem_probe()
1175 hwspin_unlock_irqrestore(smem->hwlock, &flags); in qcom_smem_probe()
1183 devm_iounmap(smem->dev, smem->regions[0].virt_base); in qcom_smem_probe()
1189 smem->item_count = qcom_smem_get_item_count(smem); in qcom_smem_probe()
1193 smem->item_count = SMEM_ITEM_COUNT; in qcom_smem_probe()
1196 dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); in qcom_smem_probe()
1197 return -EINVAL; in qcom_smem_probe()
1202 if (ret < 0 && ret != -ENOENT) in qcom_smem_probe()
1207 smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo", in qcom_smem_probe()
1210 if (IS_ERR(smem->socinfo)) in qcom_smem_probe()
1211 dev_dbg(&pdev->dev, "failed to register socinfo device\n"); in qcom_smem_probe()
1218 platform_device_unregister(__smem->socinfo); in qcom_smem_remove()
1220 hwspin_lock_free(__smem->hwlock); in qcom_smem_remove()
1236 .name = "qcom-smem",