197fb5e8dSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
24b638df4SBjorn Andersson /*
34b638df4SBjorn Andersson * Copyright (c) 2015, Sony Mobile Communications AB.
44b638df4SBjorn Andersson * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
54b638df4SBjorn Andersson */
64b638df4SBjorn Andersson
74b638df4SBjorn Andersson #include <linux/hwspinlock.h>
84b638df4SBjorn Andersson #include <linux/io.h>
94b638df4SBjorn Andersson #include <linux/module.h>
104b638df4SBjorn Andersson #include <linux/of.h>
114b638df4SBjorn Andersson #include <linux/of_address.h>
12b5af64fcSBjorn Andersson #include <linux/of_reserved_mem.h>
134b638df4SBjorn Andersson #include <linux/platform_device.h>
149487e2abSNiklas Cassel #include <linux/sizes.h>
154b638df4SBjorn Andersson #include <linux/slab.h>
164b638df4SBjorn Andersson #include <linux/soc/qcom/smem.h>
1717051d2cSRobert Marko #include <linux/soc/qcom/socinfo.h>
184b638df4SBjorn Andersson
194b638df4SBjorn Andersson /*
204b638df4SBjorn Andersson * The Qualcomm shared memory system is a allocate only heap structure that
214b638df4SBjorn Andersson * consists of one of more memory areas that can be accessed by the processors
224b638df4SBjorn Andersson * in the SoC.
234b638df4SBjorn Andersson *
244b638df4SBjorn Andersson * All systems contains a global heap, accessible by all processors in the SoC,
254b638df4SBjorn Andersson * with a table of contents data structure (@smem_header) at the beginning of
264b638df4SBjorn Andersson * the main shared memory block.
274b638df4SBjorn Andersson *
284b638df4SBjorn Andersson * The global header contains meta data for allocations as well as a fixed list
294b638df4SBjorn Andersson * of 512 entries (@smem_global_entry) that can be initialized to reference
304b638df4SBjorn Andersson * parts of the shared memory space.
314b638df4SBjorn Andersson *
324b638df4SBjorn Andersson *
334b638df4SBjorn Andersson * In addition to this global heap a set of "private" heaps can be set up at
344b638df4SBjorn Andersson * boot time with access restrictions so that only certain processor pairs can
354b638df4SBjorn Andersson * access the data.
364b638df4SBjorn Andersson *
374b638df4SBjorn Andersson * These partitions are referenced from an optional partition table
384b638df4SBjorn Andersson * (@smem_ptable), that is found 4kB from the end of the main smem region. The
394b638df4SBjorn Andersson * partition table entries (@smem_ptable_entry) lists the involved processors
404b638df4SBjorn Andersson * (or hosts) and their location in the main shared memory region.
414b638df4SBjorn Andersson *
424b638df4SBjorn Andersson * Each partition starts with a header (@smem_partition_header) that identifies
434b638df4SBjorn Andersson * the partition and holds properties for the two internal memory regions. The
444b638df4SBjorn Andersson * two regions are cached and non-cached memory respectively. Each region
454b638df4SBjorn Andersson * contain a link list of allocation headers (@smem_private_entry) followed by
464b638df4SBjorn Andersson * their data.
474b638df4SBjorn Andersson *
484b638df4SBjorn Andersson * Items in the non-cached region are allocated from the start of the partition
494b638df4SBjorn Andersson * while items in the cached region are allocated from the end. The free area
50c7c1dc35SBjorn Andersson * is hence the region between the cached and non-cached offsets. The header of
51c7c1dc35SBjorn Andersson * cached items comes after the data.
524b638df4SBjorn Andersson *
53d52e4048SChris Lew * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
54d52e4048SChris Lew * for the global heap. A new global partition is created from the global heap
55d52e4048SChris Lew * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
56d52e4048SChris Lew * set by the bootloader.
574b638df4SBjorn Andersson *
584b638df4SBjorn Andersson * To synchronize allocations in the shared memory heaps a remote spinlock must
594b638df4SBjorn Andersson * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
604b638df4SBjorn Andersson * platforms.
614b638df4SBjorn Andersson *
624b638df4SBjorn Andersson */
634b638df4SBjorn Andersson
644b638df4SBjorn Andersson /*
65dcc0967dSChris Lew * The version member of the smem header contains an array of versions for the
66dcc0967dSChris Lew * various software components in the SoC. We verify that the boot loader
67dcc0967dSChris Lew * version is a valid version as a sanity check.
684b638df4SBjorn Andersson */
694b638df4SBjorn Andersson #define SMEM_MASTER_SBL_VERSION_INDEX 7
70d52e4048SChris Lew #define SMEM_GLOBAL_HEAP_VERSION 11
71d52e4048SChris Lew #define SMEM_GLOBAL_PART_VERSION 12
724b638df4SBjorn Andersson
734b638df4SBjorn Andersson /*
744b638df4SBjorn Andersson * The first 8 items are only to be allocated by the boot loader while
754b638df4SBjorn Andersson * initializing the heap.
764b638df4SBjorn Andersson */
774b638df4SBjorn Andersson #define SMEM_ITEM_LAST_FIXED 8
784b638df4SBjorn Andersson
794b638df4SBjorn Andersson /* Highest accepted item number, for both global and private heaps */
804b638df4SBjorn Andersson #define SMEM_ITEM_COUNT 512
814b638df4SBjorn Andersson
824b638df4SBjorn Andersson /* Processor/host identifier for the application processor */
834b638df4SBjorn Andersson #define SMEM_HOST_APPS 0
844b638df4SBjorn Andersson
85d52e4048SChris Lew /* Processor/host identifier for the global partition */
86d52e4048SChris Lew #define SMEM_GLOBAL_HOST 0xfffe
87d52e4048SChris Lew
884b638df4SBjorn Andersson /* Max number of processors/hosts in a system */
89b302c64aSBartosz Golaszewski #define SMEM_HOST_COUNT 20
904b638df4SBjorn Andersson
914b638df4SBjorn Andersson /**
924b638df4SBjorn Andersson * struct smem_proc_comm - proc_comm communication struct (legacy)
934b638df4SBjorn Andersson * @command: current command to be executed
944b638df4SBjorn Andersson * @status: status of the currently requested command
954b638df4SBjorn Andersson * @params: parameters to the command
964b638df4SBjorn Andersson */
974b638df4SBjorn Andersson struct smem_proc_comm {
989806884dSStephen Boyd __le32 command;
999806884dSStephen Boyd __le32 status;
1009806884dSStephen Boyd __le32 params[2];
1014b638df4SBjorn Andersson };
1024b638df4SBjorn Andersson
1034b638df4SBjorn Andersson /**
1044b638df4SBjorn Andersson * struct smem_global_entry - entry to reference smem items on the heap
1054b638df4SBjorn Andersson * @allocated: boolean to indicate if this entry is used
1064b638df4SBjorn Andersson * @offset: offset to the allocated space
1074b638df4SBjorn Andersson * @size: size of the allocated space, 8 byte aligned
1084b638df4SBjorn Andersson * @aux_base: base address for the memory region used by this unit, or 0 for
1094b638df4SBjorn Andersson * the default region. bits 0,1 are reserved
1104b638df4SBjorn Andersson */
1114b638df4SBjorn Andersson struct smem_global_entry {
1129806884dSStephen Boyd __le32 allocated;
1139806884dSStephen Boyd __le32 offset;
1149806884dSStephen Boyd __le32 size;
1159806884dSStephen Boyd __le32 aux_base; /* bits 1:0 reserved */
1164b638df4SBjorn Andersson };
1174b638df4SBjorn Andersson #define AUX_BASE_MASK 0xfffffffc
1184b638df4SBjorn Andersson
1194b638df4SBjorn Andersson /**
1204b638df4SBjorn Andersson * struct smem_header - header found in beginning of primary smem region
1214b638df4SBjorn Andersson * @proc_comm: proc_comm communication interface (legacy)
1224b638df4SBjorn Andersson * @version: array of versions for the various subsystems
1234b638df4SBjorn Andersson * @initialized: boolean to indicate that smem is initialized
1244b638df4SBjorn Andersson * @free_offset: index of the first unallocated byte in smem
1254b638df4SBjorn Andersson * @available: number of bytes available for allocation
1264b638df4SBjorn Andersson * @reserved: reserved field, must be 0
127fc3699c6SLee Jones * @toc: array of references to items
1284b638df4SBjorn Andersson */
1294b638df4SBjorn Andersson struct smem_header {
1304b638df4SBjorn Andersson struct smem_proc_comm proc_comm[4];
1319806884dSStephen Boyd __le32 version[32];
1329806884dSStephen Boyd __le32 initialized;
1339806884dSStephen Boyd __le32 free_offset;
1349806884dSStephen Boyd __le32 available;
1359806884dSStephen Boyd __le32 reserved;
1364b638df4SBjorn Andersson struct smem_global_entry toc[SMEM_ITEM_COUNT];
1374b638df4SBjorn Andersson };
1384b638df4SBjorn Andersson
1394b638df4SBjorn Andersson /**
1404b638df4SBjorn Andersson * struct smem_ptable_entry - one entry in the @smem_ptable list
1414b638df4SBjorn Andersson * @offset: offset, within the main shared memory region, of the partition
1424b638df4SBjorn Andersson * @size: size of the partition
1434b638df4SBjorn Andersson * @flags: flags for the partition (currently unused)
1444b638df4SBjorn Andersson * @host0: first processor/host with access to this partition
1454b638df4SBjorn Andersson * @host1: second processor/host with access to this partition
146c7c1dc35SBjorn Andersson * @cacheline: alignment for "cached" entries
1474b638df4SBjorn Andersson * @reserved: reserved entries for later use
1484b638df4SBjorn Andersson */
1494b638df4SBjorn Andersson struct smem_ptable_entry {
1509806884dSStephen Boyd __le32 offset;
1519806884dSStephen Boyd __le32 size;
1529806884dSStephen Boyd __le32 flags;
1539806884dSStephen Boyd __le16 host0;
1549806884dSStephen Boyd __le16 host1;
155c7c1dc35SBjorn Andersson __le32 cacheline;
156c7c1dc35SBjorn Andersson __le32 reserved[7];
1574b638df4SBjorn Andersson };
1584b638df4SBjorn Andersson
1594b638df4SBjorn Andersson /**
1604b638df4SBjorn Andersson * struct smem_ptable - partition table for the private partitions
1614b638df4SBjorn Andersson * @magic: magic number, must be SMEM_PTABLE_MAGIC
1624b638df4SBjorn Andersson * @version: version of the partition table
1634b638df4SBjorn Andersson * @num_entries: number of partitions in the table
1644b638df4SBjorn Andersson * @reserved: for now reserved entries
1654b638df4SBjorn Andersson * @entry: list of @smem_ptable_entry for the @num_entries partitions
1664b638df4SBjorn Andersson */
1674b638df4SBjorn Andersson struct smem_ptable {
1689806884dSStephen Boyd u8 magic[4];
1699806884dSStephen Boyd __le32 version;
1709806884dSStephen Boyd __le32 num_entries;
1719806884dSStephen Boyd __le32 reserved[5];
1724b638df4SBjorn Andersson struct smem_ptable_entry entry[];
1734b638df4SBjorn Andersson };
1749806884dSStephen Boyd
1759806884dSStephen Boyd static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
1764b638df4SBjorn Andersson
1774b638df4SBjorn Andersson /**
1784b638df4SBjorn Andersson * struct smem_partition_header - header of the partitions
1794b638df4SBjorn Andersson * @magic: magic number, must be SMEM_PART_MAGIC
1804b638df4SBjorn Andersson * @host0: first processor/host with access to this partition
1814b638df4SBjorn Andersson * @host1: second processor/host with access to this partition
1824b638df4SBjorn Andersson * @size: size of the partition
1834b638df4SBjorn Andersson * @offset_free_uncached: offset to the first free byte of uncached memory in
1844b638df4SBjorn Andersson * this partition
1854b638df4SBjorn Andersson * @offset_free_cached: offset to the first free byte of cached memory in this
1864b638df4SBjorn Andersson * partition
1874b638df4SBjorn Andersson * @reserved: for now reserved entries
1884b638df4SBjorn Andersson */
1894b638df4SBjorn Andersson struct smem_partition_header {
1909806884dSStephen Boyd u8 magic[4];
1919806884dSStephen Boyd __le16 host0;
1929806884dSStephen Boyd __le16 host1;
1939806884dSStephen Boyd __le32 size;
1949806884dSStephen Boyd __le32 offset_free_uncached;
1959806884dSStephen Boyd __le32 offset_free_cached;
1969806884dSStephen Boyd __le32 reserved[3];
1974b638df4SBjorn Andersson };
1989806884dSStephen Boyd
19920bb6c9dSDeepak Kumar Singh /**
20020bb6c9dSDeepak Kumar Singh * struct smem_partition - describes smem partition
20120bb6c9dSDeepak Kumar Singh * @virt_base: starting virtual address of partition
20220bb6c9dSDeepak Kumar Singh * @phys_base: starting physical address of partition
20320bb6c9dSDeepak Kumar Singh * @cacheline: alignment for "cached" entries
20420bb6c9dSDeepak Kumar Singh * @size: size of partition
20520bb6c9dSDeepak Kumar Singh */
20620bb6c9dSDeepak Kumar Singh struct smem_partition {
20720bb6c9dSDeepak Kumar Singh void __iomem *virt_base;
20820bb6c9dSDeepak Kumar Singh phys_addr_t phys_base;
20920bb6c9dSDeepak Kumar Singh size_t cacheline;
21020bb6c9dSDeepak Kumar Singh size_t size;
21120bb6c9dSDeepak Kumar Singh };
21220bb6c9dSDeepak Kumar Singh
2139806884dSStephen Boyd static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
2144b638df4SBjorn Andersson
2154b638df4SBjorn Andersson /**
2164b638df4SBjorn Andersson * struct smem_private_entry - header of each item in the private partition
2174b638df4SBjorn Andersson * @canary: magic number, must be SMEM_PRIVATE_CANARY
2184b638df4SBjorn Andersson * @item: identifying number of the smem item
2194b638df4SBjorn Andersson * @size: size of the data, including padding bytes
2204b638df4SBjorn Andersson * @padding_data: number of bytes of padding of data
2214b638df4SBjorn Andersson * @padding_hdr: number of bytes of padding between the header and the data
2224b638df4SBjorn Andersson * @reserved: for now reserved entry
2234b638df4SBjorn Andersson */
2244b638df4SBjorn Andersson struct smem_private_entry {
2259806884dSStephen Boyd u16 canary; /* bytes are the same so no swapping needed */
2269806884dSStephen Boyd __le16 item;
2279806884dSStephen Boyd __le32 size; /* includes padding bytes */
2289806884dSStephen Boyd __le16 padding_data;
2299806884dSStephen Boyd __le16 padding_hdr;
2309806884dSStephen Boyd __le32 reserved;
2314b638df4SBjorn Andersson };
2324b638df4SBjorn Andersson #define SMEM_PRIVATE_CANARY 0xa5a5
2334b638df4SBjorn Andersson
2344b638df4SBjorn Andersson /**
2355b394067SChris Lew * struct smem_info - smem region info located after the table of contents
2365b394067SChris Lew * @magic: magic number, must be SMEM_INFO_MAGIC
2375b394067SChris Lew * @size: size of the smem region
2385b394067SChris Lew * @base_addr: base address of the smem region
2395b394067SChris Lew * @reserved: for now reserved entry
2405b394067SChris Lew * @num_items: highest accepted item number
2415b394067SChris Lew */
2425b394067SChris Lew struct smem_info {
2435b394067SChris Lew u8 magic[4];
2445b394067SChris Lew __le32 size;
2455b394067SChris Lew __le32 base_addr;
2465b394067SChris Lew __le32 reserved;
2475b394067SChris Lew __le16 num_items;
2485b394067SChris Lew };
2495b394067SChris Lew
2505b394067SChris Lew static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
2515b394067SChris Lew
2525b394067SChris Lew /**
2534b638df4SBjorn Andersson * struct smem_region - representation of a chunk of memory used for smem
2544b638df4SBjorn Andersson * @aux_base: identifier of aux_mem base
2554b638df4SBjorn Andersson * @virt_base: virtual base address of memory with this aux_mem identifier
2564b638df4SBjorn Andersson * @size: size of the memory region
2574b638df4SBjorn Andersson */
2584b638df4SBjorn Andersson struct smem_region {
259b5af64fcSBjorn Andersson phys_addr_t aux_base;
2604b638df4SBjorn Andersson void __iomem *virt_base;
2614b638df4SBjorn Andersson size_t size;
2624b638df4SBjorn Andersson };
2634b638df4SBjorn Andersson
2644b638df4SBjorn Andersson /**
2654b638df4SBjorn Andersson * struct qcom_smem - device data for the smem device
2664b638df4SBjorn Andersson * @dev: device pointer
2674b638df4SBjorn Andersson * @hwlock: reference to a hwspinlock
26820bb6c9dSDeepak Kumar Singh * @ptable: virtual base of partition table
26920bb6c9dSDeepak Kumar Singh * @global_partition: describes for global partition when in use
27020bb6c9dSDeepak Kumar Singh * @partitions: list of partitions of current processor/host
2715b394067SChris Lew * @item_count: max accepted item number
272fc3699c6SLee Jones * @socinfo: platform device pointer
2734b638df4SBjorn Andersson * @num_regions: number of @regions
2744b638df4SBjorn Andersson * @regions: list of the memory regions defining the shared memory
2754b638df4SBjorn Andersson */
2764b638df4SBjorn Andersson struct qcom_smem {
2774b638df4SBjorn Andersson struct device *dev;
2784b638df4SBjorn Andersson
2794b638df4SBjorn Andersson struct hwspinlock *hwlock;
2804b638df4SBjorn Andersson
2815b394067SChris Lew u32 item_count;
282efb448d0SImran Khan struct platform_device *socinfo;
28320bb6c9dSDeepak Kumar Singh struct smem_ptable *ptable;
28420bb6c9dSDeepak Kumar Singh struct smem_partition global_partition;
28520bb6c9dSDeepak Kumar Singh struct smem_partition partitions[SMEM_HOST_COUNT];
2864b638df4SBjorn Andersson
2874b638df4SBjorn Andersson unsigned num_regions;
2889f01b7a8SAlex Elder struct smem_region regions[];
2894b638df4SBjorn Andersson };
2904b638df4SBjorn Andersson
291e221a1daSAlex Elder static void *
phdr_to_last_uncached_entry(struct smem_partition_header * phdr)29201f14154SBjorn Andersson phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
2939806884dSStephen Boyd {
2949806884dSStephen Boyd void *p = phdr;
2959806884dSStephen Boyd
2969806884dSStephen Boyd return p + le32_to_cpu(phdr->offset_free_uncached);
2979806884dSStephen Boyd }
2989806884dSStephen Boyd
299e221a1daSAlex Elder static struct smem_private_entry *
phdr_to_first_cached_entry(struct smem_partition_header * phdr,size_t cacheline)300e221a1daSAlex Elder phdr_to_first_cached_entry(struct smem_partition_header *phdr,
301c7c1dc35SBjorn Andersson size_t cacheline)
302c7c1dc35SBjorn Andersson {
303c7c1dc35SBjorn Andersson void *p = phdr;
30470708749SAlex Elder struct smem_private_entry *e;
305c7c1dc35SBjorn Andersson
30670708749SAlex Elder return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
307c7c1dc35SBjorn Andersson }
308c7c1dc35SBjorn Andersson
309e221a1daSAlex Elder static void *
phdr_to_last_cached_entry(struct smem_partition_header * phdr)310e221a1daSAlex Elder phdr_to_last_cached_entry(struct smem_partition_header *phdr)
3119806884dSStephen Boyd {
3129806884dSStephen Boyd void *p = phdr;
3139806884dSStephen Boyd
3149806884dSStephen Boyd return p + le32_to_cpu(phdr->offset_free_cached);
3159806884dSStephen Boyd }
3169806884dSStephen Boyd
3179806884dSStephen Boyd static struct smem_private_entry *
phdr_to_first_uncached_entry(struct smem_partition_header * phdr)31801f14154SBjorn Andersson phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
3199806884dSStephen Boyd {
3209806884dSStephen Boyd void *p = phdr;
3219806884dSStephen Boyd
3229806884dSStephen Boyd return p + sizeof(*phdr);
3239806884dSStephen Boyd }
3249806884dSStephen Boyd
3259806884dSStephen Boyd static struct smem_private_entry *
uncached_entry_next(struct smem_private_entry * e)32601f14154SBjorn Andersson uncached_entry_next(struct smem_private_entry *e)
3279806884dSStephen Boyd {
3289806884dSStephen Boyd void *p = e;
3299806884dSStephen Boyd
3309806884dSStephen Boyd return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
3319806884dSStephen Boyd le32_to_cpu(e->size);
3329806884dSStephen Boyd }
3339806884dSStephen Boyd
334c7c1dc35SBjorn Andersson static struct smem_private_entry *
cached_entry_next(struct smem_private_entry * e,size_t cacheline)335c7c1dc35SBjorn Andersson cached_entry_next(struct smem_private_entry *e, size_t cacheline)
336c7c1dc35SBjorn Andersson {
337c7c1dc35SBjorn Andersson void *p = e;
338c7c1dc35SBjorn Andersson
339c7c1dc35SBjorn Andersson return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
340c7c1dc35SBjorn Andersson }
341c7c1dc35SBjorn Andersson
uncached_entry_to_item(struct smem_private_entry * e)34201f14154SBjorn Andersson static void *uncached_entry_to_item(struct smem_private_entry *e)
3439806884dSStephen Boyd {
3449806884dSStephen Boyd void *p = e;
3459806884dSStephen Boyd
3469806884dSStephen Boyd return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
3479806884dSStephen Boyd }
3489806884dSStephen Boyd
cached_entry_to_item(struct smem_private_entry * e)349c7c1dc35SBjorn Andersson static void *cached_entry_to_item(struct smem_private_entry *e)
350c7c1dc35SBjorn Andersson {
351c7c1dc35SBjorn Andersson void *p = e;
352c7c1dc35SBjorn Andersson
353c7c1dc35SBjorn Andersson return p - le32_to_cpu(e->size);
354c7c1dc35SBjorn Andersson }
355c7c1dc35SBjorn Andersson
3564b638df4SBjorn Andersson /* Pointer to the one and only smem handle */
3574b638df4SBjorn Andersson static struct qcom_smem *__smem;
3584b638df4SBjorn Andersson
3594b638df4SBjorn Andersson /* Timeout (ms) for the trylock of remote spinlocks */
3604b638df4SBjorn Andersson #define HWSPINLOCK_TIMEOUT 1000
3614b638df4SBjorn Andersson
362*220725deSChris Lew /* The qcom hwspinlock id is always plus one from the smem host id */
363*220725deSChris Lew #define SMEM_HOST_ID_TO_HWSPINLOCK_ID(__x) ((__x) + 1)
364*220725deSChris Lew
365*220725deSChris Lew /**
366*220725deSChris Lew * qcom_smem_bust_hwspin_lock_by_host() - bust the smem hwspinlock for a host
367*220725deSChris Lew * @host: remote processor id
368*220725deSChris Lew *
369*220725deSChris Lew * Busts the hwspin_lock for the given smem host id. This helper is intended
370*220725deSChris Lew * for remoteproc drivers that manage remoteprocs with an equivalent smem
371*220725deSChris Lew * driver instance in the remote firmware. Drivers can force a release of the
372*220725deSChris Lew * smem hwspin_lock if the rproc unexpectedly goes into a bad state.
373*220725deSChris Lew *
374*220725deSChris Lew * Context: Process context.
375*220725deSChris Lew *
376*220725deSChris Lew * Returns: 0 on success, otherwise negative errno.
377*220725deSChris Lew */
qcom_smem_bust_hwspin_lock_by_host(unsigned int host)378*220725deSChris Lew int qcom_smem_bust_hwspin_lock_by_host(unsigned int host)
379*220725deSChris Lew {
380*220725deSChris Lew /* This function is for remote procs, so ignore SMEM_HOST_APPS */
381*220725deSChris Lew if (host == SMEM_HOST_APPS || host >= SMEM_HOST_COUNT)
382*220725deSChris Lew return -EINVAL;
383*220725deSChris Lew
384*220725deSChris Lew return hwspin_lock_bust(__smem->hwlock, SMEM_HOST_ID_TO_HWSPINLOCK_ID(host));
385*220725deSChris Lew }
386*220725deSChris Lew EXPORT_SYMBOL_GPL(qcom_smem_bust_hwspin_lock_by_host);
387*220725deSChris Lew
3884dbb9e23SStephan Gerhold /**
3894dbb9e23SStephan Gerhold * qcom_smem_is_available() - Check if SMEM is available
3904dbb9e23SStephan Gerhold *
3914dbb9e23SStephan Gerhold * Return: true if SMEM is available, false otherwise.
3924dbb9e23SStephan Gerhold */
qcom_smem_is_available(void)3934dbb9e23SStephan Gerhold bool qcom_smem_is_available(void)
3944dbb9e23SStephan Gerhold {
3954dbb9e23SStephan Gerhold return !!__smem;
3964dbb9e23SStephan Gerhold }
3974dbb9e23SStephan Gerhold EXPORT_SYMBOL(qcom_smem_is_available);
3984dbb9e23SStephan Gerhold
qcom_smem_alloc_private(struct qcom_smem * smem,struct smem_partition * part,unsigned item,size_t size)3994b638df4SBjorn Andersson static int qcom_smem_alloc_private(struct qcom_smem *smem,
40020bb6c9dSDeepak Kumar Singh struct smem_partition *part,
4014b638df4SBjorn Andersson unsigned item,
4024b638df4SBjorn Andersson size_t size)
4034b638df4SBjorn Andersson {
4049806884dSStephen Boyd struct smem_private_entry *hdr, *end;
40520bb6c9dSDeepak Kumar Singh struct smem_partition_header *phdr;
4064b638df4SBjorn Andersson size_t alloc_size;
4079806884dSStephen Boyd void *cached;
408f1383348SDeepak Kumar Singh void *p_end;
4094b638df4SBjorn Andersson
41020bb6c9dSDeepak Kumar Singh phdr = (struct smem_partition_header __force *)part->virt_base;
411f1383348SDeepak Kumar Singh p_end = (void *)phdr + part->size;
41220bb6c9dSDeepak Kumar Singh
41301f14154SBjorn Andersson hdr = phdr_to_first_uncached_entry(phdr);
41401f14154SBjorn Andersson end = phdr_to_last_uncached_entry(phdr);
41501f14154SBjorn Andersson cached = phdr_to_last_cached_entry(phdr);
4164b638df4SBjorn Andersson
417f1383348SDeepak Kumar Singh if (WARN_ON((void *)end > p_end || cached > p_end))
418f1383348SDeepak Kumar Singh return -EINVAL;
419f1383348SDeepak Kumar Singh
4209806884dSStephen Boyd while (hdr < end) {
42104a512feSAlex Elder if (hdr->canary != SMEM_PRIVATE_CANARY)
42204a512feSAlex Elder goto bad_canary;
4239806884dSStephen Boyd if (le16_to_cpu(hdr->item) == item)
4244b638df4SBjorn Andersson return -EEXIST;
4254b638df4SBjorn Andersson
42601f14154SBjorn Andersson hdr = uncached_entry_next(hdr);
4274b638df4SBjorn Andersson }
4284b638df4SBjorn Andersson
429f1383348SDeepak Kumar Singh if (WARN_ON((void *)hdr > p_end))
430f1383348SDeepak Kumar Singh return -EINVAL;
431f1383348SDeepak Kumar Singh
4324b638df4SBjorn Andersson /* Check that we don't grow into the cached region */
4334b638df4SBjorn Andersson alloc_size = sizeof(*hdr) + ALIGN(size, 8);
4348377f818SAlex Elder if ((void *)hdr + alloc_size > cached) {
4354b638df4SBjorn Andersson dev_err(smem->dev, "Out of memory\n");
4364b638df4SBjorn Andersson return -ENOSPC;
4374b638df4SBjorn Andersson }
4384b638df4SBjorn Andersson
4394b638df4SBjorn Andersson hdr->canary = SMEM_PRIVATE_CANARY;
4409806884dSStephen Boyd hdr->item = cpu_to_le16(item);
4419806884dSStephen Boyd hdr->size = cpu_to_le32(ALIGN(size, 8));
4429806884dSStephen Boyd hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
4434b638df4SBjorn Andersson hdr->padding_hdr = 0;
4444b638df4SBjorn Andersson
4454b638df4SBjorn Andersson /*
4464b638df4SBjorn Andersson * Ensure the header is written before we advance the free offset, so
4474b638df4SBjorn Andersson * that remote processors that does not take the remote spinlock still
4484b638df4SBjorn Andersson * gets a consistent view of the linked list.
4494b638df4SBjorn Andersson */
4504b638df4SBjorn Andersson wmb();
4519806884dSStephen Boyd le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
4524b638df4SBjorn Andersson
4534b638df4SBjorn Andersson return 0;
45404a512feSAlex Elder bad_canary:
45504a512feSAlex Elder dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
45604a512feSAlex Elder le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
45704a512feSAlex Elder
45804a512feSAlex Elder return -EINVAL;
4594b638df4SBjorn Andersson }
4604b638df4SBjorn Andersson
qcom_smem_alloc_global(struct qcom_smem * smem,unsigned item,size_t size)4614b638df4SBjorn Andersson static int qcom_smem_alloc_global(struct qcom_smem *smem,
4624b638df4SBjorn Andersson unsigned item,
4634b638df4SBjorn Andersson size_t size)
4644b638df4SBjorn Andersson {
4654b638df4SBjorn Andersson struct smem_global_entry *entry;
466d52e4048SChris Lew struct smem_header *header;
4674b638df4SBjorn Andersson
4684b638df4SBjorn Andersson header = smem->regions[0].virt_base;
4694b638df4SBjorn Andersson entry = &header->toc[item];
4704b638df4SBjorn Andersson if (entry->allocated)
4714b638df4SBjorn Andersson return -EEXIST;
4724b638df4SBjorn Andersson
4734b638df4SBjorn Andersson size = ALIGN(size, 8);
4749806884dSStephen Boyd if (WARN_ON(size > le32_to_cpu(header->available)))
4754b638df4SBjorn Andersson return -ENOMEM;
4764b638df4SBjorn Andersson
4774b638df4SBjorn Andersson entry->offset = header->free_offset;
4789806884dSStephen Boyd entry->size = cpu_to_le32(size);
4794b638df4SBjorn Andersson
4804b638df4SBjorn Andersson /*
4814b638df4SBjorn Andersson * Ensure the header is consistent before we mark the item allocated,
4824b638df4SBjorn Andersson * so that remote processors will get a consistent view of the item
4834b638df4SBjorn Andersson * even though they do not take the spinlock on read.
4844b638df4SBjorn Andersson */
4854b638df4SBjorn Andersson wmb();
4869806884dSStephen Boyd entry->allocated = cpu_to_le32(1);
4874b638df4SBjorn Andersson
4889806884dSStephen Boyd le32_add_cpu(&header->free_offset, size);
4899806884dSStephen Boyd le32_add_cpu(&header->available, -size);
4904b638df4SBjorn Andersson
4914b638df4SBjorn Andersson return 0;
4924b638df4SBjorn Andersson }
4934b638df4SBjorn Andersson
4944b638df4SBjorn Andersson /**
4954b638df4SBjorn Andersson * qcom_smem_alloc() - allocate space for a smem item
4964b638df4SBjorn Andersson * @host: remote processor id, or -1
4974b638df4SBjorn Andersson * @item: smem item handle
4984b638df4SBjorn Andersson * @size: number of bytes to be allocated
4994b638df4SBjorn Andersson *
5004b638df4SBjorn Andersson * Allocate space for a given smem item of size @size, given that the item is
5014b638df4SBjorn Andersson * not yet allocated.
5024b638df4SBjorn Andersson */
qcom_smem_alloc(unsigned host,unsigned item,size_t size)5034b638df4SBjorn Andersson int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
5044b638df4SBjorn Andersson {
50520bb6c9dSDeepak Kumar Singh struct smem_partition *part;
5064b638df4SBjorn Andersson unsigned long flags;
5074b638df4SBjorn Andersson int ret;
5084b638df4SBjorn Andersson
5094b638df4SBjorn Andersson if (!__smem)
5104b638df4SBjorn Andersson return -EPROBE_DEFER;
5114b638df4SBjorn Andersson
5124b638df4SBjorn Andersson if (item < SMEM_ITEM_LAST_FIXED) {
5134b638df4SBjorn Andersson dev_err(__smem->dev,
5144b638df4SBjorn Andersson "Rejecting allocation of static entry %d\n", item);
5154b638df4SBjorn Andersson return -EINVAL;
5164b638df4SBjorn Andersson }
5174b638df4SBjorn Andersson
5185b394067SChris Lew if (WARN_ON(item >= __smem->item_count))
5195b394067SChris Lew return -EINVAL;
5205b394067SChris Lew
5214b638df4SBjorn Andersson ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
5224b638df4SBjorn Andersson HWSPINLOCK_TIMEOUT,
5234b638df4SBjorn Andersson &flags);
5244b638df4SBjorn Andersson if (ret)
5254b638df4SBjorn Andersson return ret;
5264b638df4SBjorn Andersson
52720bb6c9dSDeepak Kumar Singh if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
52820bb6c9dSDeepak Kumar Singh part = &__smem->partitions[host];
52920bb6c9dSDeepak Kumar Singh ret = qcom_smem_alloc_private(__smem, part, item, size);
53020bb6c9dSDeepak Kumar Singh } else if (__smem->global_partition.virt_base) {
53120bb6c9dSDeepak Kumar Singh part = &__smem->global_partition;
53220bb6c9dSDeepak Kumar Singh ret = qcom_smem_alloc_private(__smem, part, item, size);
533d52e4048SChris Lew } else {
5344b638df4SBjorn Andersson ret = qcom_smem_alloc_global(__smem, item, size);
535d52e4048SChris Lew }
5364b638df4SBjorn Andersson
5374b638df4SBjorn Andersson hwspin_unlock_irqrestore(__smem->hwlock, &flags);
5384b638df4SBjorn Andersson
5394b638df4SBjorn Andersson return ret;
5404b638df4SBjorn Andersson }
54110615007SRobert Marko EXPORT_SYMBOL_GPL(qcom_smem_alloc);
5424b638df4SBjorn Andersson
qcom_smem_get_global(struct qcom_smem * smem,unsigned item,size_t * size)5431a03964dSStephen Boyd static void *qcom_smem_get_global(struct qcom_smem *smem,
5444b638df4SBjorn Andersson unsigned item,
5454b638df4SBjorn Andersson size_t *size)
5464b638df4SBjorn Andersson {
5474b638df4SBjorn Andersson struct smem_header *header;
5489f01b7a8SAlex Elder struct smem_region *region;
5494b638df4SBjorn Andersson struct smem_global_entry *entry;
550f1383348SDeepak Kumar Singh u64 entry_offset;
551f1383348SDeepak Kumar Singh u32 e_size;
5524b638df4SBjorn Andersson u32 aux_base;
5534b638df4SBjorn Andersson unsigned i;
5544b638df4SBjorn Andersson
5554b638df4SBjorn Andersson header = smem->regions[0].virt_base;
5564b638df4SBjorn Andersson entry = &header->toc[item];
5574b638df4SBjorn Andersson if (!entry->allocated)
5581a03964dSStephen Boyd return ERR_PTR(-ENXIO);
5594b638df4SBjorn Andersson
5609806884dSStephen Boyd aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
5614b638df4SBjorn Andersson
5624b638df4SBjorn Andersson for (i = 0; i < smem->num_regions; i++) {
5639f01b7a8SAlex Elder region = &smem->regions[i];
5644b638df4SBjorn Andersson
565b5af64fcSBjorn Andersson if ((u32)region->aux_base == aux_base || !aux_base) {
566f1383348SDeepak Kumar Singh e_size = le32_to_cpu(entry->size);
567f1383348SDeepak Kumar Singh entry_offset = le32_to_cpu(entry->offset);
568f1383348SDeepak Kumar Singh
569f1383348SDeepak Kumar Singh if (WARN_ON(e_size + entry_offset > region->size))
570f1383348SDeepak Kumar Singh return ERR_PTR(-EINVAL);
571f1383348SDeepak Kumar Singh
5724b638df4SBjorn Andersson if (size != NULL)
573f1383348SDeepak Kumar Singh *size = e_size;
574f1383348SDeepak Kumar Singh
575f1383348SDeepak Kumar Singh return region->virt_base + entry_offset;
5761a03964dSStephen Boyd }
5774b638df4SBjorn Andersson }
5784b638df4SBjorn Andersson
5791a03964dSStephen Boyd return ERR_PTR(-ENOENT);
5801a03964dSStephen Boyd }
5811a03964dSStephen Boyd
qcom_smem_get_private(struct qcom_smem * smem,struct smem_partition * part,unsigned item,size_t * size)5821a03964dSStephen Boyd static void *qcom_smem_get_private(struct qcom_smem *smem,
58320bb6c9dSDeepak Kumar Singh struct smem_partition *part,
5844b638df4SBjorn Andersson unsigned item,
5854b638df4SBjorn Andersson size_t *size)
5864b638df4SBjorn Andersson {
5879806884dSStephen Boyd struct smem_private_entry *e, *end;
58820bb6c9dSDeepak Kumar Singh struct smem_partition_header *phdr;
589f1383348SDeepak Kumar Singh void *item_ptr, *p_end;
590f1383348SDeepak Kumar Singh u32 padding_data;
591f1383348SDeepak Kumar Singh u32 e_size;
59220bb6c9dSDeepak Kumar Singh
59320bb6c9dSDeepak Kumar Singh phdr = (struct smem_partition_header __force *)part->virt_base;
594f1383348SDeepak Kumar Singh p_end = (void *)phdr + part->size;
595c7c1dc35SBjorn Andersson
59601f14154SBjorn Andersson e = phdr_to_first_uncached_entry(phdr);
59701f14154SBjorn Andersson end = phdr_to_last_uncached_entry(phdr);
5984b638df4SBjorn Andersson
5999806884dSStephen Boyd while (e < end) {
600c7c1dc35SBjorn Andersson if (e->canary != SMEM_PRIVATE_CANARY)
601c7c1dc35SBjorn Andersson goto invalid_canary;
6024b638df4SBjorn Andersson
6039806884dSStephen Boyd if (le16_to_cpu(e->item) == item) {
604f1383348SDeepak Kumar Singh if (size != NULL) {
605f1383348SDeepak Kumar Singh e_size = le32_to_cpu(e->size);
606f1383348SDeepak Kumar Singh padding_data = le16_to_cpu(e->padding_data);
6074b638df4SBjorn Andersson
608f1383348SDeepak Kumar Singh if (WARN_ON(e_size > part->size || padding_data > e_size))
609f1383348SDeepak Kumar Singh return ERR_PTR(-EINVAL);
610f1383348SDeepak Kumar Singh
611f1383348SDeepak Kumar Singh *size = e_size - padding_data;
612f1383348SDeepak Kumar Singh }
613f1383348SDeepak Kumar Singh
614f1383348SDeepak Kumar Singh item_ptr = uncached_entry_to_item(e);
615f1383348SDeepak Kumar Singh if (WARN_ON(item_ptr > p_end))
616f1383348SDeepak Kumar Singh return ERR_PTR(-EINVAL);
617f1383348SDeepak Kumar Singh
618f1383348SDeepak Kumar Singh return item_ptr;
6194b638df4SBjorn Andersson }
6204b638df4SBjorn Andersson
62101f14154SBjorn Andersson e = uncached_entry_next(e);
6224b638df4SBjorn Andersson }
6234b638df4SBjorn Andersson
624f1383348SDeepak Kumar Singh if (WARN_ON((void *)e > p_end))
625f1383348SDeepak Kumar Singh return ERR_PTR(-EINVAL);
626f1383348SDeepak Kumar Singh
627c7c1dc35SBjorn Andersson /* Item was not found in the uncached list, search the cached list */
628c7c1dc35SBjorn Andersson
62920bb6c9dSDeepak Kumar Singh e = phdr_to_first_cached_entry(phdr, part->cacheline);
630c7c1dc35SBjorn Andersson end = phdr_to_last_cached_entry(phdr);
631c7c1dc35SBjorn Andersson
632f1383348SDeepak Kumar Singh if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end))
633f1383348SDeepak Kumar Singh return ERR_PTR(-EINVAL);
634f1383348SDeepak Kumar Singh
635c7c1dc35SBjorn Andersson while (e > end) {
636c7c1dc35SBjorn Andersson if (e->canary != SMEM_PRIVATE_CANARY)
637c7c1dc35SBjorn Andersson goto invalid_canary;
638c7c1dc35SBjorn Andersson
639c7c1dc35SBjorn Andersson if (le16_to_cpu(e->item) == item) {
640f1383348SDeepak Kumar Singh if (size != NULL) {
641f1383348SDeepak Kumar Singh e_size = le32_to_cpu(e->size);
642f1383348SDeepak Kumar Singh padding_data = le16_to_cpu(e->padding_data);
643c7c1dc35SBjorn Andersson
644f1383348SDeepak Kumar Singh if (WARN_ON(e_size > part->size || padding_data > e_size))
645f1383348SDeepak Kumar Singh return ERR_PTR(-EINVAL);
646f1383348SDeepak Kumar Singh
647f1383348SDeepak Kumar Singh *size = e_size - padding_data;
648f1383348SDeepak Kumar Singh }
649f1383348SDeepak Kumar Singh
650f1383348SDeepak Kumar Singh item_ptr = cached_entry_to_item(e);
651f1383348SDeepak Kumar Singh if (WARN_ON(item_ptr < (void *)phdr))
652f1383348SDeepak Kumar Singh return ERR_PTR(-EINVAL);
653f1383348SDeepak Kumar Singh
654f1383348SDeepak Kumar Singh return item_ptr;
655c7c1dc35SBjorn Andersson }
656c7c1dc35SBjorn Andersson
65720bb6c9dSDeepak Kumar Singh e = cached_entry_next(e, part->cacheline);
658c7c1dc35SBjorn Andersson }
659c7c1dc35SBjorn Andersson
660f1383348SDeepak Kumar Singh if (WARN_ON((void *)e < (void *)phdr))
661f1383348SDeepak Kumar Singh return ERR_PTR(-EINVAL);
662f1383348SDeepak Kumar Singh
6631a03964dSStephen Boyd return ERR_PTR(-ENOENT);
664c7c1dc35SBjorn Andersson
665c7c1dc35SBjorn Andersson invalid_canary:
66604a512feSAlex Elder dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
66704a512feSAlex Elder le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
668c7c1dc35SBjorn Andersson
669c7c1dc35SBjorn Andersson return ERR_PTR(-EINVAL);
6704b638df4SBjorn Andersson }
6714b638df4SBjorn Andersson
6724b638df4SBjorn Andersson /**
6734b638df4SBjorn Andersson * qcom_smem_get() - resolve ptr of size of a smem item
6744b638df4SBjorn Andersson * @host: the remote processor, or -1
6754b638df4SBjorn Andersson * @item: smem item handle
6764b638df4SBjorn Andersson * @size: pointer to be filled out with size of the item
6774b638df4SBjorn Andersson *
6781a03964dSStephen Boyd * Looks up smem item and returns pointer to it. Size of smem
6791a03964dSStephen Boyd * item is returned in @size.
6804b638df4SBjorn Andersson */
qcom_smem_get(unsigned host,unsigned item,size_t * size)6811a03964dSStephen Boyd void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
6824b638df4SBjorn Andersson {
68320bb6c9dSDeepak Kumar Singh struct smem_partition *part;
6844b638df4SBjorn Andersson unsigned long flags;
6854b638df4SBjorn Andersson int ret;
6861a03964dSStephen Boyd void *ptr = ERR_PTR(-EPROBE_DEFER);
6874b638df4SBjorn Andersson
6884b638df4SBjorn Andersson if (!__smem)
6891a03964dSStephen Boyd return ptr;
6904b638df4SBjorn Andersson
6915b394067SChris Lew if (WARN_ON(item >= __smem->item_count))
6925b394067SChris Lew return ERR_PTR(-EINVAL);
6935b394067SChris Lew
6944b638df4SBjorn Andersson ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
6954b638df4SBjorn Andersson HWSPINLOCK_TIMEOUT,
6964b638df4SBjorn Andersson &flags);
6974b638df4SBjorn Andersson if (ret)
6981a03964dSStephen Boyd return ERR_PTR(ret);
6994b638df4SBjorn Andersson
70020bb6c9dSDeepak Kumar Singh if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
70120bb6c9dSDeepak Kumar Singh part = &__smem->partitions[host];
70220bb6c9dSDeepak Kumar Singh ptr = qcom_smem_get_private(__smem, part, item, size);
70320bb6c9dSDeepak Kumar Singh } else if (__smem->global_partition.virt_base) {
70420bb6c9dSDeepak Kumar Singh part = &__smem->global_partition;
70520bb6c9dSDeepak Kumar Singh ptr = qcom_smem_get_private(__smem, part, item, size);
706d52e4048SChris Lew } else {
7071a03964dSStephen Boyd ptr = qcom_smem_get_global(__smem, item, size);
708d52e4048SChris Lew }
7094b638df4SBjorn Andersson
7104b638df4SBjorn Andersson hwspin_unlock_irqrestore(__smem->hwlock, &flags);
7111a03964dSStephen Boyd
7121a03964dSStephen Boyd return ptr;
7134b638df4SBjorn Andersson
7144b638df4SBjorn Andersson }
71510615007SRobert Marko EXPORT_SYMBOL_GPL(qcom_smem_get);
7164b638df4SBjorn Andersson
7174b638df4SBjorn Andersson /**
7184b638df4SBjorn Andersson * qcom_smem_get_free_space() - retrieve amount of free space in a partition
7194b638df4SBjorn Andersson * @host: the remote processor identifying a partition, or -1
7204b638df4SBjorn Andersson *
7214b638df4SBjorn Andersson * To be used by smem clients as a quick way to determine if any new
7224b638df4SBjorn Andersson * allocations has been made.
7234b638df4SBjorn Andersson */
qcom_smem_get_free_space(unsigned host)7244b638df4SBjorn Andersson int qcom_smem_get_free_space(unsigned host)
7254b638df4SBjorn Andersson {
72620bb6c9dSDeepak Kumar Singh struct smem_partition *part;
7274b638df4SBjorn Andersson struct smem_partition_header *phdr;
7284b638df4SBjorn Andersson struct smem_header *header;
7294b638df4SBjorn Andersson unsigned ret;
7304b638df4SBjorn Andersson
7314b638df4SBjorn Andersson if (!__smem)
7324b638df4SBjorn Andersson return -EPROBE_DEFER;
7334b638df4SBjorn Andersson
73420bb6c9dSDeepak Kumar Singh if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
73520bb6c9dSDeepak Kumar Singh part = &__smem->partitions[host];
73620bb6c9dSDeepak Kumar Singh phdr = part->virt_base;
7379806884dSStephen Boyd ret = le32_to_cpu(phdr->offset_free_cached) -
7389806884dSStephen Boyd le32_to_cpu(phdr->offset_free_uncached);
739f1383348SDeepak Kumar Singh
740f1383348SDeepak Kumar Singh if (ret > le32_to_cpu(part->size))
741f1383348SDeepak Kumar Singh return -EINVAL;
74220bb6c9dSDeepak Kumar Singh } else if (__smem->global_partition.virt_base) {
74320bb6c9dSDeepak Kumar Singh part = &__smem->global_partition;
74420bb6c9dSDeepak Kumar Singh phdr = part->virt_base;
745d52e4048SChris Lew ret = le32_to_cpu(phdr->offset_free_cached) -
746d52e4048SChris Lew le32_to_cpu(phdr->offset_free_uncached);
747f1383348SDeepak Kumar Singh
748f1383348SDeepak Kumar Singh if (ret > le32_to_cpu(part->size))
749f1383348SDeepak Kumar Singh return -EINVAL;
7504b638df4SBjorn Andersson } else {
7514b638df4SBjorn Andersson header = __smem->regions[0].virt_base;
7529806884dSStephen Boyd ret = le32_to_cpu(header->available);
753f1383348SDeepak Kumar Singh
754f1383348SDeepak Kumar Singh if (ret > __smem->regions[0].size)
755f1383348SDeepak Kumar Singh return -EINVAL;
7564b638df4SBjorn Andersson }
7574b638df4SBjorn Andersson
7584b638df4SBjorn Andersson return ret;
7594b638df4SBjorn Andersson }
76010615007SRobert Marko EXPORT_SYMBOL_GPL(qcom_smem_get_free_space);
7614b638df4SBjorn Andersson
addr_in_range(void __iomem * base,size_t size,void * addr)76220bb6c9dSDeepak Kumar Singh static bool addr_in_range(void __iomem *base, size_t size, void *addr)
76320bb6c9dSDeepak Kumar Singh {
7645f908786SChen Jiahao return base && ((void __iomem *)addr >= base && (void __iomem *)addr < base + size);
76520bb6c9dSDeepak Kumar Singh }
76620bb6c9dSDeepak Kumar Singh
7676d361c1dSAlex Elder /**
7686d361c1dSAlex Elder * qcom_smem_virt_to_phys() - return the physical address associated
7696d361c1dSAlex Elder * with an smem item pointer (previously returned by qcom_smem_get()
7706d361c1dSAlex Elder * @p: the virtual address to convert
7716d361c1dSAlex Elder *
7726d361c1dSAlex Elder * Returns 0 if the pointer provided is not within any smem region.
7736d361c1dSAlex Elder */
qcom_smem_virt_to_phys(void * p)7746d361c1dSAlex Elder phys_addr_t qcom_smem_virt_to_phys(void *p)
7756d361c1dSAlex Elder {
77620bb6c9dSDeepak Kumar Singh struct smem_partition *part;
77720bb6c9dSDeepak Kumar Singh struct smem_region *area;
77820bb6c9dSDeepak Kumar Singh u64 offset;
77920bb6c9dSDeepak Kumar Singh u32 i;
78020bb6c9dSDeepak Kumar Singh
78120bb6c9dSDeepak Kumar Singh for (i = 0; i < SMEM_HOST_COUNT; i++) {
78220bb6c9dSDeepak Kumar Singh part = &__smem->partitions[i];
78320bb6c9dSDeepak Kumar Singh
78420bb6c9dSDeepak Kumar Singh if (addr_in_range(part->virt_base, part->size, p)) {
78520bb6c9dSDeepak Kumar Singh offset = p - part->virt_base;
78620bb6c9dSDeepak Kumar Singh
78720bb6c9dSDeepak Kumar Singh return (phys_addr_t)part->phys_base + offset;
78820bb6c9dSDeepak Kumar Singh }
78920bb6c9dSDeepak Kumar Singh }
79020bb6c9dSDeepak Kumar Singh
79120bb6c9dSDeepak Kumar Singh part = &__smem->global_partition;
79220bb6c9dSDeepak Kumar Singh
79320bb6c9dSDeepak Kumar Singh if (addr_in_range(part->virt_base, part->size, p)) {
79420bb6c9dSDeepak Kumar Singh offset = p - part->virt_base;
79520bb6c9dSDeepak Kumar Singh
79620bb6c9dSDeepak Kumar Singh return (phys_addr_t)part->phys_base + offset;
79720bb6c9dSDeepak Kumar Singh }
7986d361c1dSAlex Elder
7996d361c1dSAlex Elder for (i = 0; i < __smem->num_regions; i++) {
80020bb6c9dSDeepak Kumar Singh area = &__smem->regions[i];
8016d361c1dSAlex Elder
80220bb6c9dSDeepak Kumar Singh if (addr_in_range(area->virt_base, area->size, p)) {
80320bb6c9dSDeepak Kumar Singh offset = p - area->virt_base;
8046d361c1dSAlex Elder
80520bb6c9dSDeepak Kumar Singh return (phys_addr_t)area->aux_base + offset;
8066d361c1dSAlex Elder }
8076d361c1dSAlex Elder }
8086d361c1dSAlex Elder
8096d361c1dSAlex Elder return 0;
8106d361c1dSAlex Elder }
81110615007SRobert Marko EXPORT_SYMBOL_GPL(qcom_smem_virt_to_phys);
8126d361c1dSAlex Elder
81317051d2cSRobert Marko /**
81417051d2cSRobert Marko * qcom_smem_get_soc_id() - return the SoC ID
81517051d2cSRobert Marko * @id: On success, we return the SoC ID here.
81617051d2cSRobert Marko *
81717051d2cSRobert Marko * Look up SoC ID from HW/SW build ID and return it.
81817051d2cSRobert Marko *
81917051d2cSRobert Marko * Return: 0 on success, negative errno on failure.
82017051d2cSRobert Marko */
qcom_smem_get_soc_id(u32 * id)82117051d2cSRobert Marko int qcom_smem_get_soc_id(u32 *id)
82217051d2cSRobert Marko {
82317051d2cSRobert Marko struct socinfo *info;
82417051d2cSRobert Marko
82517051d2cSRobert Marko info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, NULL);
82617051d2cSRobert Marko if (IS_ERR(info))
82717051d2cSRobert Marko return PTR_ERR(info);
82817051d2cSRobert Marko
82917051d2cSRobert Marko *id = __le32_to_cpu(info->id);
83017051d2cSRobert Marko
83117051d2cSRobert Marko return 0;
83217051d2cSRobert Marko }
83317051d2cSRobert Marko EXPORT_SYMBOL_GPL(qcom_smem_get_soc_id);
83417051d2cSRobert Marko
qcom_smem_get_sbl_version(struct qcom_smem * smem)8354b638df4SBjorn Andersson static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
8364b638df4SBjorn Andersson {
837dcc0967dSChris Lew struct smem_header *header;
8389806884dSStephen Boyd __le32 *versions;
8394b638df4SBjorn Andersson
840dcc0967dSChris Lew header = smem->regions[0].virt_base;
841dcc0967dSChris Lew versions = header->version;
8424b638df4SBjorn Andersson
8439806884dSStephen Boyd return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
8444b638df4SBjorn Andersson }
8454b638df4SBjorn Andersson
qcom_smem_get_ptable(struct qcom_smem * smem)846d52e4048SChris Lew static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
8474b638df4SBjorn Andersson {
8484b638df4SBjorn Andersson struct smem_ptable *ptable;
849d52e4048SChris Lew u32 version;
8504b638df4SBjorn Andersson
85120bb6c9dSDeepak Kumar Singh ptable = smem->ptable;
8529806884dSStephen Boyd if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
853d52e4048SChris Lew return ERR_PTR(-ENOENT);
8544b638df4SBjorn Andersson
8559806884dSStephen Boyd version = le32_to_cpu(ptable->version);
8569806884dSStephen Boyd if (version != 1) {
8574b638df4SBjorn Andersson dev_err(smem->dev,
8589806884dSStephen Boyd "Unsupported partition header version %d\n", version);
859d52e4048SChris Lew return ERR_PTR(-EINVAL);
860d52e4048SChris Lew }
861d52e4048SChris Lew return ptable;
862d52e4048SChris Lew }
863d52e4048SChris Lew
qcom_smem_get_item_count(struct qcom_smem * smem)8645b394067SChris Lew static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
8655b394067SChris Lew {
8665b394067SChris Lew struct smem_ptable *ptable;
8675b394067SChris Lew struct smem_info *info;
8685b394067SChris Lew
8695b394067SChris Lew ptable = qcom_smem_get_ptable(smem);
8705b394067SChris Lew if (IS_ERR_OR_NULL(ptable))
8715b394067SChris Lew return SMEM_ITEM_COUNT;
8725b394067SChris Lew
8735b394067SChris Lew info = (struct smem_info *)&ptable->entry[ptable->num_entries];
8745b394067SChris Lew if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
8755b394067SChris Lew return SMEM_ITEM_COUNT;
8765b394067SChris Lew
8775b394067SChris Lew return le16_to_cpu(info->num_items);
8785b394067SChris Lew }
8795b394067SChris Lew
880ada79289SAlex Elder /*
881ada79289SAlex Elder * Validate the partition header for a partition whose partition
882ada79289SAlex Elder * table entry is supplied. Returns a pointer to its header if
883ada79289SAlex Elder * valid, or a null pointer otherwise.
884ada79289SAlex Elder */
885ada79289SAlex Elder static struct smem_partition_header *
qcom_smem_partition_header(struct qcom_smem * smem,struct smem_ptable_entry * entry,u16 host0,u16 host1)886ada79289SAlex Elder qcom_smem_partition_header(struct qcom_smem *smem,
8877d019344SAlex Elder struct smem_ptable_entry *entry, u16 host0, u16 host1)
888ada79289SAlex Elder {
889ada79289SAlex Elder struct smem_partition_header *header;
89020bb6c9dSDeepak Kumar Singh u32 phys_addr;
891190b216cSAlex Elder u32 size;
892ada79289SAlex Elder
89320bb6c9dSDeepak Kumar Singh phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset);
89420bb6c9dSDeepak Kumar Singh header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size));
89520bb6c9dSDeepak Kumar Singh
89620bb6c9dSDeepak Kumar Singh if (!header)
89720bb6c9dSDeepak Kumar Singh return NULL;
898ada79289SAlex Elder
899ada79289SAlex Elder if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
9001b3df368SAndy Shevchenko dev_err(smem->dev, "bad partition magic %4ph\n", header->magic);
901ada79289SAlex Elder return NULL;
902ada79289SAlex Elder }
903ada79289SAlex Elder
9047d019344SAlex Elder if (host0 != le16_to_cpu(header->host0)) {
9057d019344SAlex Elder dev_err(smem->dev, "bad host0 (%hu != %hu)\n",
9067d019344SAlex Elder host0, le16_to_cpu(header->host0));
9077d019344SAlex Elder return NULL;
9087d019344SAlex Elder }
9097d019344SAlex Elder if (host1 != le16_to_cpu(header->host1)) {
9107d019344SAlex Elder dev_err(smem->dev, "bad host1 (%hu != %hu)\n",
9117d019344SAlex Elder host1, le16_to_cpu(header->host1));
9127d019344SAlex Elder return NULL;
9137d019344SAlex Elder }
9147d019344SAlex Elder
915190b216cSAlex Elder size = le32_to_cpu(header->size);
916190b216cSAlex Elder if (size != le32_to_cpu(entry->size)) {
917190b216cSAlex Elder dev_err(smem->dev, "bad partition size (%u != %u)\n",
918190b216cSAlex Elder size, le32_to_cpu(entry->size));
919190b216cSAlex Elder return NULL;
920190b216cSAlex Elder }
921190b216cSAlex Elder
922380dc4afSAlex Elder if (le32_to_cpu(header->offset_free_uncached) > size) {
923380dc4afSAlex Elder dev_err(smem->dev, "bad partition free uncached (%u > %u)\n",
924380dc4afSAlex Elder le32_to_cpu(header->offset_free_uncached), size);
925380dc4afSAlex Elder return NULL;
926380dc4afSAlex Elder }
927380dc4afSAlex Elder
928ada79289SAlex Elder return header;
929ada79289SAlex Elder }
930ada79289SAlex Elder
qcom_smem_set_global_partition(struct qcom_smem * smem)931d52e4048SChris Lew static int qcom_smem_set_global_partition(struct qcom_smem *smem)
932d52e4048SChris Lew {
933d52e4048SChris Lew struct smem_partition_header *header;
9348fa1a214SAlex Elder struct smem_ptable_entry *entry;
935d52e4048SChris Lew struct smem_ptable *ptable;
9368fa1a214SAlex Elder bool found = false;
937d52e4048SChris Lew int i;
938d52e4048SChris Lew
93920bb6c9dSDeepak Kumar Singh if (smem->global_partition.virt_base) {
9400b65c59eSBjorn Andersson dev_err(smem->dev, "Already found the global partition\n");
9410b65c59eSBjorn Andersson return -EINVAL;
9420b65c59eSBjorn Andersson }
9430b65c59eSBjorn Andersson
944d52e4048SChris Lew ptable = qcom_smem_get_ptable(smem);
945d52e4048SChris Lew if (IS_ERR(ptable))
946d52e4048SChris Lew return PTR_ERR(ptable);
947d52e4048SChris Lew
948d52e4048SChris Lew for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
949d52e4048SChris Lew entry = &ptable->entry[i];
950eba75702SAlex Elder if (!le32_to_cpu(entry->offset))
951eba75702SAlex Elder continue;
952eba75702SAlex Elder if (!le32_to_cpu(entry->size))
953eba75702SAlex Elder continue;
954eba75702SAlex Elder
95533fdbc4eSAlex Elder if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST)
95633fdbc4eSAlex Elder continue;
95733fdbc4eSAlex Elder
95833fdbc4eSAlex Elder if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) {
9598fa1a214SAlex Elder found = true;
960d52e4048SChris Lew break;
961d52e4048SChris Lew }
9628fa1a214SAlex Elder }
963d52e4048SChris Lew
9648fa1a214SAlex Elder if (!found) {
965d52e4048SChris Lew dev_err(smem->dev, "Missing entry for global partition\n");
9664b638df4SBjorn Andersson return -EINVAL;
9674b638df4SBjorn Andersson }
9684b638df4SBjorn Andersson
9697d019344SAlex Elder header = qcom_smem_partition_header(smem, entry,
9707d019344SAlex Elder SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST);
971ada79289SAlex Elder if (!header)
972ada79289SAlex Elder return -EINVAL;
973ada79289SAlex Elder
97420bb6c9dSDeepak Kumar Singh smem->global_partition.virt_base = (void __iomem *)header;
97520bb6c9dSDeepak Kumar Singh smem->global_partition.phys_base = smem->regions[0].aux_base +
97620bb6c9dSDeepak Kumar Singh le32_to_cpu(entry->offset);
97720bb6c9dSDeepak Kumar Singh smem->global_partition.size = le32_to_cpu(entry->size);
97820bb6c9dSDeepak Kumar Singh smem->global_partition.cacheline = le32_to_cpu(entry->cacheline);
979d52e4048SChris Lew
980d52e4048SChris Lew return 0;
981d52e4048SChris Lew }
982d52e4048SChris Lew
98313a920aeSAlex Elder static int
qcom_smem_enumerate_partitions(struct qcom_smem * smem,u16 local_host)98413a920aeSAlex Elder qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
985d52e4048SChris Lew {
986d52e4048SChris Lew struct smem_partition_header *header;
987d52e4048SChris Lew struct smem_ptable_entry *entry;
988d52e4048SChris Lew struct smem_ptable *ptable;
9890ec29ccfSBill Wendling u16 remote_host;
99013a920aeSAlex Elder u16 host0, host1;
991d52e4048SChris Lew int i;
992d52e4048SChris Lew
993d52e4048SChris Lew ptable = qcom_smem_get_ptable(smem);
994d52e4048SChris Lew if (IS_ERR(ptable))
995d52e4048SChris Lew return PTR_ERR(ptable);
996d52e4048SChris Lew
9979806884dSStephen Boyd for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
9984b638df4SBjorn Andersson entry = &ptable->entry[i];
9999806884dSStephen Boyd if (!le32_to_cpu(entry->offset))
10004b638df4SBjorn Andersson continue;
10019806884dSStephen Boyd if (!le32_to_cpu(entry->size))
10024b638df4SBjorn Andersson continue;
10034b638df4SBjorn Andersson
1004eba75702SAlex Elder host0 = le16_to_cpu(entry->host0);
1005eba75702SAlex Elder host1 = le16_to_cpu(entry->host1);
10069806884dSStephen Boyd if (host0 == local_host)
10079806884dSStephen Boyd remote_host = host1;
1008eb68cf09SAlex Elder else if (host1 == local_host)
10099806884dSStephen Boyd remote_host = host0;
1010eb68cf09SAlex Elder else
1011eb68cf09SAlex Elder continue;
10124b638df4SBjorn Andersson
10134b638df4SBjorn Andersson if (remote_host >= SMEM_HOST_COUNT) {
10140ec29ccfSBill Wendling dev_err(smem->dev, "bad host %u\n", remote_host);
10154b638df4SBjorn Andersson return -EINVAL;
10164b638df4SBjorn Andersson }
10174b638df4SBjorn Andersson
101820bb6c9dSDeepak Kumar Singh if (smem->partitions[remote_host].virt_base) {
10190ec29ccfSBill Wendling dev_err(smem->dev, "duplicate host %u\n", remote_host);
10204b638df4SBjorn Andersson return -EINVAL;
10214b638df4SBjorn Andersson }
10224b638df4SBjorn Andersson
10237d019344SAlex Elder header = qcom_smem_partition_header(smem, entry, host0, host1);
1024ada79289SAlex Elder if (!header)
1025ada79289SAlex Elder return -EINVAL;
1026ada79289SAlex Elder
102720bb6c9dSDeepak Kumar Singh smem->partitions[remote_host].virt_base = (void __iomem *)header;
102820bb6c9dSDeepak Kumar Singh smem->partitions[remote_host].phys_base = smem->regions[0].aux_base +
102920bb6c9dSDeepak Kumar Singh le32_to_cpu(entry->offset);
103020bb6c9dSDeepak Kumar Singh smem->partitions[remote_host].size = le32_to_cpu(entry->size);
103120bb6c9dSDeepak Kumar Singh smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline);
10324b638df4SBjorn Andersson }
10334b638df4SBjorn Andersson
10344b638df4SBjorn Andersson return 0;
10354b638df4SBjorn Andersson }
10364b638df4SBjorn Andersson
qcom_smem_map_toc(struct qcom_smem * smem,struct smem_region * region)103720bb6c9dSDeepak Kumar Singh static int qcom_smem_map_toc(struct qcom_smem *smem, struct smem_region *region)
103820bb6c9dSDeepak Kumar Singh {
103920bb6c9dSDeepak Kumar Singh u32 ptable_start;
104020bb6c9dSDeepak Kumar Singh
104120bb6c9dSDeepak Kumar Singh /* map starting 4K for smem header */
104220bb6c9dSDeepak Kumar Singh region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K);
104320bb6c9dSDeepak Kumar Singh ptable_start = region->aux_base + region->size - SZ_4K;
104420bb6c9dSDeepak Kumar Singh /* map last 4k for toc */
104520bb6c9dSDeepak Kumar Singh smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K);
104620bb6c9dSDeepak Kumar Singh
104720bb6c9dSDeepak Kumar Singh if (!region->virt_base || !smem->ptable)
104820bb6c9dSDeepak Kumar Singh return -ENOMEM;
104920bb6c9dSDeepak Kumar Singh
105020bb6c9dSDeepak Kumar Singh return 0;
105120bb6c9dSDeepak Kumar Singh }
105220bb6c9dSDeepak Kumar Singh
qcom_smem_map_global(struct qcom_smem * smem,u32 size)105320bb6c9dSDeepak Kumar Singh static int qcom_smem_map_global(struct qcom_smem *smem, u32 size)
105420bb6c9dSDeepak Kumar Singh {
105520bb6c9dSDeepak Kumar Singh u32 phys_addr;
105620bb6c9dSDeepak Kumar Singh
105720bb6c9dSDeepak Kumar Singh phys_addr = smem->regions[0].aux_base;
105820bb6c9dSDeepak Kumar Singh
105920bb6c9dSDeepak Kumar Singh smem->regions[0].size = size;
106020bb6c9dSDeepak Kumar Singh smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size);
106120bb6c9dSDeepak Kumar Singh
106220bb6c9dSDeepak Kumar Singh if (!smem->regions[0].virt_base)
106320bb6c9dSDeepak Kumar Singh return -ENOMEM;
106420bb6c9dSDeepak Kumar Singh
106520bb6c9dSDeepak Kumar Singh return 0;
106620bb6c9dSDeepak Kumar Singh }
106720bb6c9dSDeepak Kumar Singh
qcom_smem_resolve_mem(struct qcom_smem * smem,const char * name,struct smem_region * region)1068b5af64fcSBjorn Andersson static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name,
1069b5af64fcSBjorn Andersson struct smem_region *region)
10704b638df4SBjorn Andersson {
1071b5af64fcSBjorn Andersson struct device *dev = smem->dev;
1072d0bfd7c9SStephen Boyd struct device_node *np;
1073d0bfd7c9SStephen Boyd struct resource r;
1074d0bfd7c9SStephen Boyd int ret;
10754b638df4SBjorn Andersson
1076d0bfd7c9SStephen Boyd np = of_parse_phandle(dev->of_node, name, 0);
1077d0bfd7c9SStephen Boyd if (!np) {
1078d0bfd7c9SStephen Boyd dev_err(dev, "No %s specified\n", name);
1079d0bfd7c9SStephen Boyd return -EINVAL;
10804b638df4SBjorn Andersson }
10814b638df4SBjorn Andersson
1082d0bfd7c9SStephen Boyd ret = of_address_to_resource(np, 0, &r);
1083d0bfd7c9SStephen Boyd of_node_put(np);
1084d0bfd7c9SStephen Boyd if (ret)
1085d0bfd7c9SStephen Boyd return ret;
1086d0bfd7c9SStephen Boyd
1087b5af64fcSBjorn Andersson region->aux_base = r.start;
1088b5af64fcSBjorn Andersson region->size = resource_size(&r);
1089d0bfd7c9SStephen Boyd
1090d0bfd7c9SStephen Boyd return 0;
10914b638df4SBjorn Andersson }
10924b638df4SBjorn Andersson
qcom_smem_probe(struct platform_device * pdev)10934b638df4SBjorn Andersson static int qcom_smem_probe(struct platform_device *pdev)
10944b638df4SBjorn Andersson {
10954b638df4SBjorn Andersson struct smem_header *header;
1096b5af64fcSBjorn Andersson struct reserved_mem *rmem;
10974b638df4SBjorn Andersson struct qcom_smem *smem;
109820bb6c9dSDeepak Kumar Singh unsigned long flags;
1099d0bfd7c9SStephen Boyd int num_regions;
11004b638df4SBjorn Andersson int hwlock_id;
11014b638df4SBjorn Andersson u32 version;
110220bb6c9dSDeepak Kumar Singh u32 size;
11034b638df4SBjorn Andersson int ret;
1104b5af64fcSBjorn Andersson int i;
11054b638df4SBjorn Andersson
1106d0bfd7c9SStephen Boyd num_regions = 1;
11074a1b9f4eSRob Herring if (of_property_present(pdev->dev.of_node, "qcom,rpm-msg-ram"))
1108d0bfd7c9SStephen Boyd num_regions++;
11094b638df4SBjorn Andersson
1110cbdd13bfSChristophe JAILLET smem = devm_kzalloc(&pdev->dev, struct_size(smem, regions, num_regions),
1111cbdd13bfSChristophe JAILLET GFP_KERNEL);
11124b638df4SBjorn Andersson if (!smem)
11134b638df4SBjorn Andersson return -ENOMEM;
11144b638df4SBjorn Andersson
11154b638df4SBjorn Andersson smem->dev = &pdev->dev;
11164b638df4SBjorn Andersson smem->num_regions = num_regions;
11174b638df4SBjorn Andersson
1118b5af64fcSBjorn Andersson rmem = of_reserved_mem_lookup(pdev->dev.of_node);
1119b5af64fcSBjorn Andersson if (rmem) {
1120b5af64fcSBjorn Andersson smem->regions[0].aux_base = rmem->base;
1121b5af64fcSBjorn Andersson smem->regions[0].size = rmem->size;
1122b5af64fcSBjorn Andersson } else {
1123b5af64fcSBjorn Andersson /*
1124b5af64fcSBjorn Andersson * Fall back to the memory-region reference, if we're not a
1125b5af64fcSBjorn Andersson * reserved-memory node.
1126b5af64fcSBjorn Andersson */
1127b5af64fcSBjorn Andersson ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]);
11284b638df4SBjorn Andersson if (ret)
11294b638df4SBjorn Andersson return ret;
1130b5af64fcSBjorn Andersson }
11314b638df4SBjorn Andersson
1132b5af64fcSBjorn Andersson if (num_regions > 1) {
1133b5af64fcSBjorn Andersson ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]);
1134b5af64fcSBjorn Andersson if (ret)
1135d0bfd7c9SStephen Boyd return ret;
1136b5af64fcSBjorn Andersson }
1137b5af64fcSBjorn Andersson
113820bb6c9dSDeepak Kumar Singh
113920bb6c9dSDeepak Kumar Singh ret = qcom_smem_map_toc(smem, &smem->regions[0]);
114020bb6c9dSDeepak Kumar Singh if (ret)
114120bb6c9dSDeepak Kumar Singh return ret;
114220bb6c9dSDeepak Kumar Singh
114320bb6c9dSDeepak Kumar Singh for (i = 1; i < num_regions; i++) {
1144b5af64fcSBjorn Andersson smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev,
1145b5af64fcSBjorn Andersson smem->regions[i].aux_base,
1146b5af64fcSBjorn Andersson smem->regions[i].size);
1147b5af64fcSBjorn Andersson if (!smem->regions[i].virt_base) {
1148b5af64fcSBjorn Andersson dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base);
1149b5af64fcSBjorn Andersson return -ENOMEM;
1150b5af64fcSBjorn Andersson }
1151b5af64fcSBjorn Andersson }
11524b638df4SBjorn Andersson
11534b638df4SBjorn Andersson header = smem->regions[0].virt_base;
11549806884dSStephen Boyd if (le32_to_cpu(header->initialized) != 1 ||
11559806884dSStephen Boyd le32_to_cpu(header->reserved)) {
11564b638df4SBjorn Andersson dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
11574b638df4SBjorn Andersson return -EINVAL;
11584b638df4SBjorn Andersson }
11594b638df4SBjorn Andersson
116020bb6c9dSDeepak Kumar Singh hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
116120bb6c9dSDeepak Kumar Singh if (hwlock_id < 0) {
116220bb6c9dSDeepak Kumar Singh if (hwlock_id != -EPROBE_DEFER)
116320bb6c9dSDeepak Kumar Singh dev_err(&pdev->dev, "failed to retrieve hwlock\n");
116420bb6c9dSDeepak Kumar Singh return hwlock_id;
116520bb6c9dSDeepak Kumar Singh }
116620bb6c9dSDeepak Kumar Singh
116720bb6c9dSDeepak Kumar Singh smem->hwlock = hwspin_lock_request_specific(hwlock_id);
116820bb6c9dSDeepak Kumar Singh if (!smem->hwlock)
116920bb6c9dSDeepak Kumar Singh return -ENXIO;
117020bb6c9dSDeepak Kumar Singh
117120bb6c9dSDeepak Kumar Singh ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags);
117220bb6c9dSDeepak Kumar Singh if (ret)
117320bb6c9dSDeepak Kumar Singh return ret;
117420bb6c9dSDeepak Kumar Singh size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset);
117520bb6c9dSDeepak Kumar Singh hwspin_unlock_irqrestore(smem->hwlock, &flags);
117620bb6c9dSDeepak Kumar Singh
11774b638df4SBjorn Andersson version = qcom_smem_get_sbl_version(smem);
117820bb6c9dSDeepak Kumar Singh /*
117920bb6c9dSDeepak Kumar Singh * smem header mapping is required only in heap version scheme, so unmap
118020bb6c9dSDeepak Kumar Singh * it here. It will be remapped in qcom_smem_map_global() when whole
118120bb6c9dSDeepak Kumar Singh * partition is mapped again.
118220bb6c9dSDeepak Kumar Singh */
118320bb6c9dSDeepak Kumar Singh devm_iounmap(smem->dev, smem->regions[0].virt_base);
1184d52e4048SChris Lew switch (version >> 16) {
1185d52e4048SChris Lew case SMEM_GLOBAL_PART_VERSION:
1186d52e4048SChris Lew ret = qcom_smem_set_global_partition(smem);
1187d52e4048SChris Lew if (ret < 0)
1188d52e4048SChris Lew return ret;
11895b394067SChris Lew smem->item_count = qcom_smem_get_item_count(smem);
11905b394067SChris Lew break;
1191d52e4048SChris Lew case SMEM_GLOBAL_HEAP_VERSION:
119220bb6c9dSDeepak Kumar Singh qcom_smem_map_global(smem, size);
11935b394067SChris Lew smem->item_count = SMEM_ITEM_COUNT;
1194d52e4048SChris Lew break;
1195d52e4048SChris Lew default:
11964b638df4SBjorn Andersson dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
11974b638df4SBjorn Andersson return -EINVAL;
11984b638df4SBjorn Andersson }
11994b638df4SBjorn Andersson
120013a920aeSAlex Elder BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT);
12014b638df4SBjorn Andersson ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
1202d52e4048SChris Lew if (ret < 0 && ret != -ENOENT)
12034b638df4SBjorn Andersson return ret;
12044b638df4SBjorn Andersson
12054b638df4SBjorn Andersson __smem = smem;
12064b638df4SBjorn Andersson
1207efb448d0SImran Khan smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
1208efb448d0SImran Khan PLATFORM_DEVID_NONE, NULL,
1209efb448d0SImran Khan 0);
1210efb448d0SImran Khan if (IS_ERR(smem->socinfo))
1211efb448d0SImran Khan dev_dbg(&pdev->dev, "failed to register socinfo device\n");
1212efb448d0SImran Khan
12134b638df4SBjorn Andersson return 0;
12144b638df4SBjorn Andersson }
12154b638df4SBjorn Andersson
qcom_smem_remove(struct platform_device * pdev)12164b638df4SBjorn Andersson static int qcom_smem_remove(struct platform_device *pdev)
12174b638df4SBjorn Andersson {
1218efb448d0SImran Khan platform_device_unregister(__smem->socinfo);
1219efb448d0SImran Khan
12204b638df4SBjorn Andersson hwspin_lock_free(__smem->hwlock);
1221f8c67df7SStephen Boyd __smem = NULL;
12224b638df4SBjorn Andersson
12234b638df4SBjorn Andersson return 0;
12244b638df4SBjorn Andersson }
12254b638df4SBjorn Andersson
12264b638df4SBjorn Andersson static const struct of_device_id qcom_smem_of_match[] = {
12274b638df4SBjorn Andersson { .compatible = "qcom,smem" },
12284b638df4SBjorn Andersson {}
12294b638df4SBjorn Andersson };
12304b638df4SBjorn Andersson MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
12314b638df4SBjorn Andersson
12324b638df4SBjorn Andersson static struct platform_driver qcom_smem_driver = {
12334b638df4SBjorn Andersson .probe = qcom_smem_probe,
12344b638df4SBjorn Andersson .remove = qcom_smem_remove,
12354b638df4SBjorn Andersson .driver = {
12364b638df4SBjorn Andersson .name = "qcom-smem",
12374b638df4SBjorn Andersson .of_match_table = qcom_smem_of_match,
12384b638df4SBjorn Andersson .suppress_bind_attrs = true,
12394b638df4SBjorn Andersson },
12404b638df4SBjorn Andersson };
12414b638df4SBjorn Andersson
qcom_smem_init(void)12424b638df4SBjorn Andersson static int __init qcom_smem_init(void)
12434b638df4SBjorn Andersson {
12444b638df4SBjorn Andersson return platform_driver_register(&qcom_smem_driver);
12454b638df4SBjorn Andersson }
12464b638df4SBjorn Andersson arch_initcall(qcom_smem_init);
12474b638df4SBjorn Andersson
qcom_smem_exit(void)12484b638df4SBjorn Andersson static void __exit qcom_smem_exit(void)
12494b638df4SBjorn Andersson {
12504b638df4SBjorn Andersson platform_driver_unregister(&qcom_smem_driver);
12514b638df4SBjorn Andersson }
12524b638df4SBjorn Andersson module_exit(qcom_smem_exit)
12534b638df4SBjorn Andersson
12544b638df4SBjorn Andersson MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
12554b638df4SBjorn Andersson MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
12564b638df4SBjorn Andersson MODULE_LICENSE("GPL v2");
1257