xref: /openbmc/linux/drivers/soc/qcom/smem.c (revision 4dbb9e2322a3a9c912ce796c20c27045ae8dae22)
197fb5e8dSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
24b638df4SBjorn Andersson /*
34b638df4SBjorn Andersson  * Copyright (c) 2015, Sony Mobile Communications AB.
44b638df4SBjorn Andersson  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
54b638df4SBjorn Andersson  */
64b638df4SBjorn Andersson 
74b638df4SBjorn Andersson #include <linux/hwspinlock.h>
84b638df4SBjorn Andersson #include <linux/io.h>
94b638df4SBjorn Andersson #include <linux/module.h>
104b638df4SBjorn Andersson #include <linux/of.h>
114b638df4SBjorn Andersson #include <linux/of_address.h>
12b5af64fcSBjorn Andersson #include <linux/of_reserved_mem.h>
134b638df4SBjorn Andersson #include <linux/platform_device.h>
149487e2abSNiklas Cassel #include <linux/sizes.h>
154b638df4SBjorn Andersson #include <linux/slab.h>
164b638df4SBjorn Andersson #include <linux/soc/qcom/smem.h>
1717051d2cSRobert Marko #include <linux/soc/qcom/socinfo.h>
184b638df4SBjorn Andersson 
194b638df4SBjorn Andersson /*
204b638df4SBjorn Andersson  * The Qualcomm shared memory system is a allocate only heap structure that
214b638df4SBjorn Andersson  * consists of one of more memory areas that can be accessed by the processors
224b638df4SBjorn Andersson  * in the SoC.
234b638df4SBjorn Andersson  *
244b638df4SBjorn Andersson  * All systems contains a global heap, accessible by all processors in the SoC,
254b638df4SBjorn Andersson  * with a table of contents data structure (@smem_header) at the beginning of
264b638df4SBjorn Andersson  * the main shared memory block.
274b638df4SBjorn Andersson  *
284b638df4SBjorn Andersson  * The global header contains meta data for allocations as well as a fixed list
294b638df4SBjorn Andersson  * of 512 entries (@smem_global_entry) that can be initialized to reference
304b638df4SBjorn Andersson  * parts of the shared memory space.
314b638df4SBjorn Andersson  *
324b638df4SBjorn Andersson  *
334b638df4SBjorn Andersson  * In addition to this global heap a set of "private" heaps can be set up at
344b638df4SBjorn Andersson  * boot time with access restrictions so that only certain processor pairs can
354b638df4SBjorn Andersson  * access the data.
364b638df4SBjorn Andersson  *
374b638df4SBjorn Andersson  * These partitions are referenced from an optional partition table
384b638df4SBjorn Andersson  * (@smem_ptable), that is found 4kB from the end of the main smem region. The
394b638df4SBjorn Andersson  * partition table entries (@smem_ptable_entry) lists the involved processors
404b638df4SBjorn Andersson  * (or hosts) and their location in the main shared memory region.
414b638df4SBjorn Andersson  *
424b638df4SBjorn Andersson  * Each partition starts with a header (@smem_partition_header) that identifies
434b638df4SBjorn Andersson  * the partition and holds properties for the two internal memory regions. The
444b638df4SBjorn Andersson  * two regions are cached and non-cached memory respectively. Each region
454b638df4SBjorn Andersson  * contain a link list of allocation headers (@smem_private_entry) followed by
464b638df4SBjorn Andersson  * their data.
474b638df4SBjorn Andersson  *
484b638df4SBjorn Andersson  * Items in the non-cached region are allocated from the start of the partition
494b638df4SBjorn Andersson  * while items in the cached region are allocated from the end. The free area
50c7c1dc35SBjorn Andersson  * is hence the region between the cached and non-cached offsets. The header of
51c7c1dc35SBjorn Andersson  * cached items comes after the data.
524b638df4SBjorn Andersson  *
53d52e4048SChris Lew  * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
54d52e4048SChris Lew  * for the global heap. A new global partition is created from the global heap
55d52e4048SChris Lew  * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
56d52e4048SChris Lew  * set by the bootloader.
574b638df4SBjorn Andersson  *
584b638df4SBjorn Andersson  * To synchronize allocations in the shared memory heaps a remote spinlock must
594b638df4SBjorn Andersson  * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
604b638df4SBjorn Andersson  * platforms.
614b638df4SBjorn Andersson  *
624b638df4SBjorn Andersson  */
634b638df4SBjorn Andersson 
644b638df4SBjorn Andersson /*
65dcc0967dSChris Lew  * The version member of the smem header contains an array of versions for the
66dcc0967dSChris Lew  * various software components in the SoC. We verify that the boot loader
67dcc0967dSChris Lew  * version is a valid version as a sanity check.
684b638df4SBjorn Andersson  */
694b638df4SBjorn Andersson #define SMEM_MASTER_SBL_VERSION_INDEX	7
70d52e4048SChris Lew #define SMEM_GLOBAL_HEAP_VERSION	11
71d52e4048SChris Lew #define SMEM_GLOBAL_PART_VERSION	12
724b638df4SBjorn Andersson 
734b638df4SBjorn Andersson /*
744b638df4SBjorn Andersson  * The first 8 items are only to be allocated by the boot loader while
754b638df4SBjorn Andersson  * initializing the heap.
764b638df4SBjorn Andersson  */
774b638df4SBjorn Andersson #define SMEM_ITEM_LAST_FIXED	8
784b638df4SBjorn Andersson 
794b638df4SBjorn Andersson /* Highest accepted item number, for both global and private heaps */
804b638df4SBjorn Andersson #define SMEM_ITEM_COUNT		512
814b638df4SBjorn Andersson 
824b638df4SBjorn Andersson /* Processor/host identifier for the application processor */
834b638df4SBjorn Andersson #define SMEM_HOST_APPS		0
844b638df4SBjorn Andersson 
85d52e4048SChris Lew /* Processor/host identifier for the global partition */
86d52e4048SChris Lew #define SMEM_GLOBAL_HOST	0xfffe
87d52e4048SChris Lew 
884b638df4SBjorn Andersson /* Max number of processors/hosts in a system */
89b302c64aSBartosz Golaszewski #define SMEM_HOST_COUNT		20
904b638df4SBjorn Andersson 
914b638df4SBjorn Andersson /**
924b638df4SBjorn Andersson   * struct smem_proc_comm - proc_comm communication struct (legacy)
934b638df4SBjorn Andersson   * @command:	current command to be executed
944b638df4SBjorn Andersson   * @status:	status of the currently requested command
954b638df4SBjorn Andersson   * @params:	parameters to the command
964b638df4SBjorn Andersson   */
974b638df4SBjorn Andersson struct smem_proc_comm {
989806884dSStephen Boyd 	__le32 command;
999806884dSStephen Boyd 	__le32 status;
1009806884dSStephen Boyd 	__le32 params[2];
1014b638df4SBjorn Andersson };
1024b638df4SBjorn Andersson 
1034b638df4SBjorn Andersson /**
1044b638df4SBjorn Andersson  * struct smem_global_entry - entry to reference smem items on the heap
1054b638df4SBjorn Andersson  * @allocated:	boolean to indicate if this entry is used
1064b638df4SBjorn Andersson  * @offset:	offset to the allocated space
1074b638df4SBjorn Andersson  * @size:	size of the allocated space, 8 byte aligned
1084b638df4SBjorn Andersson  * @aux_base:	base address for the memory region used by this unit, or 0 for
1094b638df4SBjorn Andersson  *		the default region. bits 0,1 are reserved
1104b638df4SBjorn Andersson  */
1114b638df4SBjorn Andersson struct smem_global_entry {
1129806884dSStephen Boyd 	__le32 allocated;
1139806884dSStephen Boyd 	__le32 offset;
1149806884dSStephen Boyd 	__le32 size;
1159806884dSStephen Boyd 	__le32 aux_base; /* bits 1:0 reserved */
1164b638df4SBjorn Andersson };
1174b638df4SBjorn Andersson #define AUX_BASE_MASK		0xfffffffc
1184b638df4SBjorn Andersson 
1194b638df4SBjorn Andersson /**
1204b638df4SBjorn Andersson  * struct smem_header - header found in beginning of primary smem region
1214b638df4SBjorn Andersson  * @proc_comm:		proc_comm communication interface (legacy)
1224b638df4SBjorn Andersson  * @version:		array of versions for the various subsystems
1234b638df4SBjorn Andersson  * @initialized:	boolean to indicate that smem is initialized
1244b638df4SBjorn Andersson  * @free_offset:	index of the first unallocated byte in smem
1254b638df4SBjorn Andersson  * @available:		number of bytes available for allocation
1264b638df4SBjorn Andersson  * @reserved:		reserved field, must be 0
127fc3699c6SLee Jones  * @toc:		array of references to items
1284b638df4SBjorn Andersson  */
1294b638df4SBjorn Andersson struct smem_header {
1304b638df4SBjorn Andersson 	struct smem_proc_comm proc_comm[4];
1319806884dSStephen Boyd 	__le32 version[32];
1329806884dSStephen Boyd 	__le32 initialized;
1339806884dSStephen Boyd 	__le32 free_offset;
1349806884dSStephen Boyd 	__le32 available;
1359806884dSStephen Boyd 	__le32 reserved;
1364b638df4SBjorn Andersson 	struct smem_global_entry toc[SMEM_ITEM_COUNT];
1374b638df4SBjorn Andersson };
1384b638df4SBjorn Andersson 
1394b638df4SBjorn Andersson /**
1404b638df4SBjorn Andersson  * struct smem_ptable_entry - one entry in the @smem_ptable list
1414b638df4SBjorn Andersson  * @offset:	offset, within the main shared memory region, of the partition
1424b638df4SBjorn Andersson  * @size:	size of the partition
1434b638df4SBjorn Andersson  * @flags:	flags for the partition (currently unused)
1444b638df4SBjorn Andersson  * @host0:	first processor/host with access to this partition
1454b638df4SBjorn Andersson  * @host1:	second processor/host with access to this partition
146c7c1dc35SBjorn Andersson  * @cacheline:	alignment for "cached" entries
1474b638df4SBjorn Andersson  * @reserved:	reserved entries for later use
1484b638df4SBjorn Andersson  */
1494b638df4SBjorn Andersson struct smem_ptable_entry {
1509806884dSStephen Boyd 	__le32 offset;
1519806884dSStephen Boyd 	__le32 size;
1529806884dSStephen Boyd 	__le32 flags;
1539806884dSStephen Boyd 	__le16 host0;
1549806884dSStephen Boyd 	__le16 host1;
155c7c1dc35SBjorn Andersson 	__le32 cacheline;
156c7c1dc35SBjorn Andersson 	__le32 reserved[7];
1574b638df4SBjorn Andersson };
1584b638df4SBjorn Andersson 
1594b638df4SBjorn Andersson /**
1604b638df4SBjorn Andersson  * struct smem_ptable - partition table for the private partitions
1614b638df4SBjorn Andersson  * @magic:	magic number, must be SMEM_PTABLE_MAGIC
1624b638df4SBjorn Andersson  * @version:	version of the partition table
1634b638df4SBjorn Andersson  * @num_entries: number of partitions in the table
1644b638df4SBjorn Andersson  * @reserved:	for now reserved entries
1654b638df4SBjorn Andersson  * @entry:	list of @smem_ptable_entry for the @num_entries partitions
1664b638df4SBjorn Andersson  */
1674b638df4SBjorn Andersson struct smem_ptable {
1689806884dSStephen Boyd 	u8 magic[4];
1699806884dSStephen Boyd 	__le32 version;
1709806884dSStephen Boyd 	__le32 num_entries;
1719806884dSStephen Boyd 	__le32 reserved[5];
1724b638df4SBjorn Andersson 	struct smem_ptable_entry entry[];
1734b638df4SBjorn Andersson };
1749806884dSStephen Boyd 
1759806884dSStephen Boyd static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
1764b638df4SBjorn Andersson 
1774b638df4SBjorn Andersson /**
1784b638df4SBjorn Andersson  * struct smem_partition_header - header of the partitions
1794b638df4SBjorn Andersson  * @magic:	magic number, must be SMEM_PART_MAGIC
1804b638df4SBjorn Andersson  * @host0:	first processor/host with access to this partition
1814b638df4SBjorn Andersson  * @host1:	second processor/host with access to this partition
1824b638df4SBjorn Andersson  * @size:	size of the partition
1834b638df4SBjorn Andersson  * @offset_free_uncached: offset to the first free byte of uncached memory in
1844b638df4SBjorn Andersson  *		this partition
1854b638df4SBjorn Andersson  * @offset_free_cached: offset to the first free byte of cached memory in this
1864b638df4SBjorn Andersson  *		partition
1874b638df4SBjorn Andersson  * @reserved:	for now reserved entries
1884b638df4SBjorn Andersson  */
1894b638df4SBjorn Andersson struct smem_partition_header {
1909806884dSStephen Boyd 	u8 magic[4];
1919806884dSStephen Boyd 	__le16 host0;
1929806884dSStephen Boyd 	__le16 host1;
1939806884dSStephen Boyd 	__le32 size;
1949806884dSStephen Boyd 	__le32 offset_free_uncached;
1959806884dSStephen Boyd 	__le32 offset_free_cached;
1969806884dSStephen Boyd 	__le32 reserved[3];
1974b638df4SBjorn Andersson };
1989806884dSStephen Boyd 
19920bb6c9dSDeepak Kumar Singh /**
20020bb6c9dSDeepak Kumar Singh  * struct smem_partition - describes smem partition
20120bb6c9dSDeepak Kumar Singh  * @virt_base:	starting virtual address of partition
20220bb6c9dSDeepak Kumar Singh  * @phys_base:	starting physical address of partition
20320bb6c9dSDeepak Kumar Singh  * @cacheline:	alignment for "cached" entries
20420bb6c9dSDeepak Kumar Singh  * @size:	size of partition
20520bb6c9dSDeepak Kumar Singh  */
20620bb6c9dSDeepak Kumar Singh struct smem_partition {
20720bb6c9dSDeepak Kumar Singh 	void __iomem *virt_base;
20820bb6c9dSDeepak Kumar Singh 	phys_addr_t phys_base;
20920bb6c9dSDeepak Kumar Singh 	size_t cacheline;
21020bb6c9dSDeepak Kumar Singh 	size_t size;
21120bb6c9dSDeepak Kumar Singh };
21220bb6c9dSDeepak Kumar Singh 
2139806884dSStephen Boyd static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
2144b638df4SBjorn Andersson 
2154b638df4SBjorn Andersson /**
2164b638df4SBjorn Andersson  * struct smem_private_entry - header of each item in the private partition
2174b638df4SBjorn Andersson  * @canary:	magic number, must be SMEM_PRIVATE_CANARY
2184b638df4SBjorn Andersson  * @item:	identifying number of the smem item
2194b638df4SBjorn Andersson  * @size:	size of the data, including padding bytes
2204b638df4SBjorn Andersson  * @padding_data: number of bytes of padding of data
2214b638df4SBjorn Andersson  * @padding_hdr: number of bytes of padding between the header and the data
2224b638df4SBjorn Andersson  * @reserved:	for now reserved entry
2234b638df4SBjorn Andersson  */
2244b638df4SBjorn Andersson struct smem_private_entry {
2259806884dSStephen Boyd 	u16 canary; /* bytes are the same so no swapping needed */
2269806884dSStephen Boyd 	__le16 item;
2279806884dSStephen Boyd 	__le32 size; /* includes padding bytes */
2289806884dSStephen Boyd 	__le16 padding_data;
2299806884dSStephen Boyd 	__le16 padding_hdr;
2309806884dSStephen Boyd 	__le32 reserved;
2314b638df4SBjorn Andersson };
2324b638df4SBjorn Andersson #define SMEM_PRIVATE_CANARY	0xa5a5
2334b638df4SBjorn Andersson 
2344b638df4SBjorn Andersson /**
2355b394067SChris Lew  * struct smem_info - smem region info located after the table of contents
2365b394067SChris Lew  * @magic:	magic number, must be SMEM_INFO_MAGIC
2375b394067SChris Lew  * @size:	size of the smem region
2385b394067SChris Lew  * @base_addr:	base address of the smem region
2395b394067SChris Lew  * @reserved:	for now reserved entry
2405b394067SChris Lew  * @num_items:	highest accepted item number
2415b394067SChris Lew  */
2425b394067SChris Lew struct smem_info {
2435b394067SChris Lew 	u8 magic[4];
2445b394067SChris Lew 	__le32 size;
2455b394067SChris Lew 	__le32 base_addr;
2465b394067SChris Lew 	__le32 reserved;
2475b394067SChris Lew 	__le16 num_items;
2485b394067SChris Lew };
2495b394067SChris Lew 
2505b394067SChris Lew static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
2515b394067SChris Lew 
2525b394067SChris Lew /**
2534b638df4SBjorn Andersson  * struct smem_region - representation of a chunk of memory used for smem
2544b638df4SBjorn Andersson  * @aux_base:	identifier of aux_mem base
2554b638df4SBjorn Andersson  * @virt_base:	virtual base address of memory with this aux_mem identifier
2564b638df4SBjorn Andersson  * @size:	size of the memory region
2574b638df4SBjorn Andersson  */
2584b638df4SBjorn Andersson struct smem_region {
259b5af64fcSBjorn Andersson 	phys_addr_t aux_base;
2604b638df4SBjorn Andersson 	void __iomem *virt_base;
2614b638df4SBjorn Andersson 	size_t size;
2624b638df4SBjorn Andersson };
2634b638df4SBjorn Andersson 
2644b638df4SBjorn Andersson /**
2654b638df4SBjorn Andersson  * struct qcom_smem - device data for the smem device
2664b638df4SBjorn Andersson  * @dev:	device pointer
2674b638df4SBjorn Andersson  * @hwlock:	reference to a hwspinlock
26820bb6c9dSDeepak Kumar Singh  * @ptable: virtual base of partition table
26920bb6c9dSDeepak Kumar Singh  * @global_partition: describes for global partition when in use
27020bb6c9dSDeepak Kumar Singh  * @partitions: list of partitions of current processor/host
2715b394067SChris Lew  * @item_count: max accepted item number
272fc3699c6SLee Jones  * @socinfo:	platform device pointer
2734b638df4SBjorn Andersson  * @num_regions: number of @regions
2744b638df4SBjorn Andersson  * @regions:	list of the memory regions defining the shared memory
2754b638df4SBjorn Andersson  */
2764b638df4SBjorn Andersson struct qcom_smem {
2774b638df4SBjorn Andersson 	struct device *dev;
2784b638df4SBjorn Andersson 
2794b638df4SBjorn Andersson 	struct hwspinlock *hwlock;
2804b638df4SBjorn Andersson 
2815b394067SChris Lew 	u32 item_count;
282efb448d0SImran Khan 	struct platform_device *socinfo;
28320bb6c9dSDeepak Kumar Singh 	struct smem_ptable *ptable;
28420bb6c9dSDeepak Kumar Singh 	struct smem_partition global_partition;
28520bb6c9dSDeepak Kumar Singh 	struct smem_partition partitions[SMEM_HOST_COUNT];
2864b638df4SBjorn Andersson 
2874b638df4SBjorn Andersson 	unsigned num_regions;
2889f01b7a8SAlex Elder 	struct smem_region regions[];
2894b638df4SBjorn Andersson };
2904b638df4SBjorn Andersson 
291e221a1daSAlex Elder static void *
29201f14154SBjorn Andersson phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
2939806884dSStephen Boyd {
2949806884dSStephen Boyd 	void *p = phdr;
2959806884dSStephen Boyd 
2969806884dSStephen Boyd 	return p + le32_to_cpu(phdr->offset_free_uncached);
2979806884dSStephen Boyd }
2989806884dSStephen Boyd 
299e221a1daSAlex Elder static struct smem_private_entry *
300e221a1daSAlex Elder phdr_to_first_cached_entry(struct smem_partition_header *phdr,
301c7c1dc35SBjorn Andersson 					size_t cacheline)
302c7c1dc35SBjorn Andersson {
303c7c1dc35SBjorn Andersson 	void *p = phdr;
30470708749SAlex Elder 	struct smem_private_entry *e;
305c7c1dc35SBjorn Andersson 
30670708749SAlex Elder 	return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
307c7c1dc35SBjorn Andersson }
308c7c1dc35SBjorn Andersson 
309e221a1daSAlex Elder static void *
310e221a1daSAlex Elder phdr_to_last_cached_entry(struct smem_partition_header *phdr)
3119806884dSStephen Boyd {
3129806884dSStephen Boyd 	void *p = phdr;
3139806884dSStephen Boyd 
3149806884dSStephen Boyd 	return p + le32_to_cpu(phdr->offset_free_cached);
3159806884dSStephen Boyd }
3169806884dSStephen Boyd 
3179806884dSStephen Boyd static struct smem_private_entry *
31801f14154SBjorn Andersson phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
3199806884dSStephen Boyd {
3209806884dSStephen Boyd 	void *p = phdr;
3219806884dSStephen Boyd 
3229806884dSStephen Boyd 	return p + sizeof(*phdr);
3239806884dSStephen Boyd }
3249806884dSStephen Boyd 
3259806884dSStephen Boyd static struct smem_private_entry *
32601f14154SBjorn Andersson uncached_entry_next(struct smem_private_entry *e)
3279806884dSStephen Boyd {
3289806884dSStephen Boyd 	void *p = e;
3299806884dSStephen Boyd 
3309806884dSStephen Boyd 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
3319806884dSStephen Boyd 	       le32_to_cpu(e->size);
3329806884dSStephen Boyd }
3339806884dSStephen Boyd 
334c7c1dc35SBjorn Andersson static struct smem_private_entry *
335c7c1dc35SBjorn Andersson cached_entry_next(struct smem_private_entry *e, size_t cacheline)
336c7c1dc35SBjorn Andersson {
337c7c1dc35SBjorn Andersson 	void *p = e;
338c7c1dc35SBjorn Andersson 
339c7c1dc35SBjorn Andersson 	return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
340c7c1dc35SBjorn Andersson }
341c7c1dc35SBjorn Andersson 
34201f14154SBjorn Andersson static void *uncached_entry_to_item(struct smem_private_entry *e)
3439806884dSStephen Boyd {
3449806884dSStephen Boyd 	void *p = e;
3459806884dSStephen Boyd 
3469806884dSStephen Boyd 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
3479806884dSStephen Boyd }
3489806884dSStephen Boyd 
349c7c1dc35SBjorn Andersson static void *cached_entry_to_item(struct smem_private_entry *e)
350c7c1dc35SBjorn Andersson {
351c7c1dc35SBjorn Andersson 	void *p = e;
352c7c1dc35SBjorn Andersson 
353c7c1dc35SBjorn Andersson 	return p - le32_to_cpu(e->size);
354c7c1dc35SBjorn Andersson }
355c7c1dc35SBjorn Andersson 
3564b638df4SBjorn Andersson /* Pointer to the one and only smem handle */
3574b638df4SBjorn Andersson static struct qcom_smem *__smem;
3584b638df4SBjorn Andersson 
3594b638df4SBjorn Andersson /* Timeout (ms) for the trylock of remote spinlocks */
3604b638df4SBjorn Andersson #define HWSPINLOCK_TIMEOUT	1000
3614b638df4SBjorn Andersson 
362*4dbb9e23SStephan Gerhold /**
363*4dbb9e23SStephan Gerhold  * qcom_smem_is_available() - Check if SMEM is available
364*4dbb9e23SStephan Gerhold  *
365*4dbb9e23SStephan Gerhold  * Return: true if SMEM is available, false otherwise.
366*4dbb9e23SStephan Gerhold  */
367*4dbb9e23SStephan Gerhold bool qcom_smem_is_available(void)
368*4dbb9e23SStephan Gerhold {
369*4dbb9e23SStephan Gerhold 	return !!__smem;
370*4dbb9e23SStephan Gerhold }
371*4dbb9e23SStephan Gerhold EXPORT_SYMBOL(qcom_smem_is_available);
372*4dbb9e23SStephan Gerhold 
3734b638df4SBjorn Andersson static int qcom_smem_alloc_private(struct qcom_smem *smem,
37420bb6c9dSDeepak Kumar Singh 				   struct smem_partition *part,
3754b638df4SBjorn Andersson 				   unsigned item,
3764b638df4SBjorn Andersson 				   size_t size)
3774b638df4SBjorn Andersson {
3789806884dSStephen Boyd 	struct smem_private_entry *hdr, *end;
37920bb6c9dSDeepak Kumar Singh 	struct smem_partition_header *phdr;
3804b638df4SBjorn Andersson 	size_t alloc_size;
3819806884dSStephen Boyd 	void *cached;
382f1383348SDeepak Kumar Singh 	void *p_end;
3834b638df4SBjorn Andersson 
38420bb6c9dSDeepak Kumar Singh 	phdr = (struct smem_partition_header __force *)part->virt_base;
385f1383348SDeepak Kumar Singh 	p_end = (void *)phdr + part->size;
38620bb6c9dSDeepak Kumar Singh 
38701f14154SBjorn Andersson 	hdr = phdr_to_first_uncached_entry(phdr);
38801f14154SBjorn Andersson 	end = phdr_to_last_uncached_entry(phdr);
38901f14154SBjorn Andersson 	cached = phdr_to_last_cached_entry(phdr);
3904b638df4SBjorn Andersson 
391f1383348SDeepak Kumar Singh 	if (WARN_ON((void *)end > p_end || cached > p_end))
392f1383348SDeepak Kumar Singh 		return -EINVAL;
393f1383348SDeepak Kumar Singh 
3949806884dSStephen Boyd 	while (hdr < end) {
39504a512feSAlex Elder 		if (hdr->canary != SMEM_PRIVATE_CANARY)
39604a512feSAlex Elder 			goto bad_canary;
3979806884dSStephen Boyd 		if (le16_to_cpu(hdr->item) == item)
3984b638df4SBjorn Andersson 			return -EEXIST;
3994b638df4SBjorn Andersson 
40001f14154SBjorn Andersson 		hdr = uncached_entry_next(hdr);
4014b638df4SBjorn Andersson 	}
4024b638df4SBjorn Andersson 
403f1383348SDeepak Kumar Singh 	if (WARN_ON((void *)hdr > p_end))
404f1383348SDeepak Kumar Singh 		return -EINVAL;
405f1383348SDeepak Kumar Singh 
4064b638df4SBjorn Andersson 	/* Check that we don't grow into the cached region */
4074b638df4SBjorn Andersson 	alloc_size = sizeof(*hdr) + ALIGN(size, 8);
4088377f818SAlex Elder 	if ((void *)hdr + alloc_size > cached) {
4094b638df4SBjorn Andersson 		dev_err(smem->dev, "Out of memory\n");
4104b638df4SBjorn Andersson 		return -ENOSPC;
4114b638df4SBjorn Andersson 	}
4124b638df4SBjorn Andersson 
4134b638df4SBjorn Andersson 	hdr->canary = SMEM_PRIVATE_CANARY;
4149806884dSStephen Boyd 	hdr->item = cpu_to_le16(item);
4159806884dSStephen Boyd 	hdr->size = cpu_to_le32(ALIGN(size, 8));
4169806884dSStephen Boyd 	hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
4174b638df4SBjorn Andersson 	hdr->padding_hdr = 0;
4184b638df4SBjorn Andersson 
4194b638df4SBjorn Andersson 	/*
4204b638df4SBjorn Andersson 	 * Ensure the header is written before we advance the free offset, so
4214b638df4SBjorn Andersson 	 * that remote processors that does not take the remote spinlock still
4224b638df4SBjorn Andersson 	 * gets a consistent view of the linked list.
4234b638df4SBjorn Andersson 	 */
4244b638df4SBjorn Andersson 	wmb();
4259806884dSStephen Boyd 	le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
4264b638df4SBjorn Andersson 
4274b638df4SBjorn Andersson 	return 0;
42804a512feSAlex Elder bad_canary:
42904a512feSAlex Elder 	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
43004a512feSAlex Elder 		le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
43104a512feSAlex Elder 
43204a512feSAlex Elder 	return -EINVAL;
4334b638df4SBjorn Andersson }
4344b638df4SBjorn Andersson 
4354b638df4SBjorn Andersson static int qcom_smem_alloc_global(struct qcom_smem *smem,
4364b638df4SBjorn Andersson 				  unsigned item,
4374b638df4SBjorn Andersson 				  size_t size)
4384b638df4SBjorn Andersson {
4394b638df4SBjorn Andersson 	struct smem_global_entry *entry;
440d52e4048SChris Lew 	struct smem_header *header;
4414b638df4SBjorn Andersson 
4424b638df4SBjorn Andersson 	header = smem->regions[0].virt_base;
4434b638df4SBjorn Andersson 	entry = &header->toc[item];
4444b638df4SBjorn Andersson 	if (entry->allocated)
4454b638df4SBjorn Andersson 		return -EEXIST;
4464b638df4SBjorn Andersson 
4474b638df4SBjorn Andersson 	size = ALIGN(size, 8);
4489806884dSStephen Boyd 	if (WARN_ON(size > le32_to_cpu(header->available)))
4494b638df4SBjorn Andersson 		return -ENOMEM;
4504b638df4SBjorn Andersson 
4514b638df4SBjorn Andersson 	entry->offset = header->free_offset;
4529806884dSStephen Boyd 	entry->size = cpu_to_le32(size);
4534b638df4SBjorn Andersson 
4544b638df4SBjorn Andersson 	/*
4554b638df4SBjorn Andersson 	 * Ensure the header is consistent before we mark the item allocated,
4564b638df4SBjorn Andersson 	 * so that remote processors will get a consistent view of the item
4574b638df4SBjorn Andersson 	 * even though they do not take the spinlock on read.
4584b638df4SBjorn Andersson 	 */
4594b638df4SBjorn Andersson 	wmb();
4609806884dSStephen Boyd 	entry->allocated = cpu_to_le32(1);
4614b638df4SBjorn Andersson 
4629806884dSStephen Boyd 	le32_add_cpu(&header->free_offset, size);
4639806884dSStephen Boyd 	le32_add_cpu(&header->available, -size);
4644b638df4SBjorn Andersson 
4654b638df4SBjorn Andersson 	return 0;
4664b638df4SBjorn Andersson }
4674b638df4SBjorn Andersson 
4684b638df4SBjorn Andersson /**
4694b638df4SBjorn Andersson  * qcom_smem_alloc() - allocate space for a smem item
4704b638df4SBjorn Andersson  * @host:	remote processor id, or -1
4714b638df4SBjorn Andersson  * @item:	smem item handle
4724b638df4SBjorn Andersson  * @size:	number of bytes to be allocated
4734b638df4SBjorn Andersson  *
4744b638df4SBjorn Andersson  * Allocate space for a given smem item of size @size, given that the item is
4754b638df4SBjorn Andersson  * not yet allocated.
4764b638df4SBjorn Andersson  */
4774b638df4SBjorn Andersson int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
4784b638df4SBjorn Andersson {
47920bb6c9dSDeepak Kumar Singh 	struct smem_partition *part;
4804b638df4SBjorn Andersson 	unsigned long flags;
4814b638df4SBjorn Andersson 	int ret;
4824b638df4SBjorn Andersson 
4834b638df4SBjorn Andersson 	if (!__smem)
4844b638df4SBjorn Andersson 		return -EPROBE_DEFER;
4854b638df4SBjorn Andersson 
4864b638df4SBjorn Andersson 	if (item < SMEM_ITEM_LAST_FIXED) {
4874b638df4SBjorn Andersson 		dev_err(__smem->dev,
4884b638df4SBjorn Andersson 			"Rejecting allocation of static entry %d\n", item);
4894b638df4SBjorn Andersson 		return -EINVAL;
4904b638df4SBjorn Andersson 	}
4914b638df4SBjorn Andersson 
4925b394067SChris Lew 	if (WARN_ON(item >= __smem->item_count))
4935b394067SChris Lew 		return -EINVAL;
4945b394067SChris Lew 
4954b638df4SBjorn Andersson 	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
4964b638df4SBjorn Andersson 					  HWSPINLOCK_TIMEOUT,
4974b638df4SBjorn Andersson 					  &flags);
4984b638df4SBjorn Andersson 	if (ret)
4994b638df4SBjorn Andersson 		return ret;
5004b638df4SBjorn Andersson 
50120bb6c9dSDeepak Kumar Singh 	if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
50220bb6c9dSDeepak Kumar Singh 		part = &__smem->partitions[host];
50320bb6c9dSDeepak Kumar Singh 		ret = qcom_smem_alloc_private(__smem, part, item, size);
50420bb6c9dSDeepak Kumar Singh 	} else if (__smem->global_partition.virt_base) {
50520bb6c9dSDeepak Kumar Singh 		part = &__smem->global_partition;
50620bb6c9dSDeepak Kumar Singh 		ret = qcom_smem_alloc_private(__smem, part, item, size);
507d52e4048SChris Lew 	} else {
5084b638df4SBjorn Andersson 		ret = qcom_smem_alloc_global(__smem, item, size);
509d52e4048SChris Lew 	}
5104b638df4SBjorn Andersson 
5114b638df4SBjorn Andersson 	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
5124b638df4SBjorn Andersson 
5134b638df4SBjorn Andersson 	return ret;
5144b638df4SBjorn Andersson }
51510615007SRobert Marko EXPORT_SYMBOL_GPL(qcom_smem_alloc);
5164b638df4SBjorn Andersson 
5171a03964dSStephen Boyd static void *qcom_smem_get_global(struct qcom_smem *smem,
5184b638df4SBjorn Andersson 				  unsigned item,
5194b638df4SBjorn Andersson 				  size_t *size)
5204b638df4SBjorn Andersson {
5214b638df4SBjorn Andersson 	struct smem_header *header;
5229f01b7a8SAlex Elder 	struct smem_region *region;
5234b638df4SBjorn Andersson 	struct smem_global_entry *entry;
524f1383348SDeepak Kumar Singh 	u64 entry_offset;
525f1383348SDeepak Kumar Singh 	u32 e_size;
5264b638df4SBjorn Andersson 	u32 aux_base;
5274b638df4SBjorn Andersson 	unsigned i;
5284b638df4SBjorn Andersson 
5294b638df4SBjorn Andersson 	header = smem->regions[0].virt_base;
5304b638df4SBjorn Andersson 	entry = &header->toc[item];
5314b638df4SBjorn Andersson 	if (!entry->allocated)
5321a03964dSStephen Boyd 		return ERR_PTR(-ENXIO);
5334b638df4SBjorn Andersson 
5349806884dSStephen Boyd 	aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
5354b638df4SBjorn Andersson 
5364b638df4SBjorn Andersson 	for (i = 0; i < smem->num_regions; i++) {
5379f01b7a8SAlex Elder 		region = &smem->regions[i];
5384b638df4SBjorn Andersson 
539b5af64fcSBjorn Andersson 		if ((u32)region->aux_base == aux_base || !aux_base) {
540f1383348SDeepak Kumar Singh 			e_size = le32_to_cpu(entry->size);
541f1383348SDeepak Kumar Singh 			entry_offset = le32_to_cpu(entry->offset);
542f1383348SDeepak Kumar Singh 
543f1383348SDeepak Kumar Singh 			if (WARN_ON(e_size + entry_offset > region->size))
544f1383348SDeepak Kumar Singh 				return ERR_PTR(-EINVAL);
545f1383348SDeepak Kumar Singh 
5464b638df4SBjorn Andersson 			if (size != NULL)
547f1383348SDeepak Kumar Singh 				*size = e_size;
548f1383348SDeepak Kumar Singh 
549f1383348SDeepak Kumar Singh 			return region->virt_base + entry_offset;
5501a03964dSStephen Boyd 		}
5514b638df4SBjorn Andersson 	}
5524b638df4SBjorn Andersson 
5531a03964dSStephen Boyd 	return ERR_PTR(-ENOENT);
5541a03964dSStephen Boyd }
5551a03964dSStephen Boyd 
5561a03964dSStephen Boyd static void *qcom_smem_get_private(struct qcom_smem *smem,
55720bb6c9dSDeepak Kumar Singh 				   struct smem_partition *part,
5584b638df4SBjorn Andersson 				   unsigned item,
5594b638df4SBjorn Andersson 				   size_t *size)
5604b638df4SBjorn Andersson {
5619806884dSStephen Boyd 	struct smem_private_entry *e, *end;
56220bb6c9dSDeepak Kumar Singh 	struct smem_partition_header *phdr;
563f1383348SDeepak Kumar Singh 	void *item_ptr, *p_end;
564f1383348SDeepak Kumar Singh 	u32 padding_data;
565f1383348SDeepak Kumar Singh 	u32 e_size;
56620bb6c9dSDeepak Kumar Singh 
56720bb6c9dSDeepak Kumar Singh 	phdr = (struct smem_partition_header __force *)part->virt_base;
568f1383348SDeepak Kumar Singh 	p_end = (void *)phdr + part->size;
569c7c1dc35SBjorn Andersson 
57001f14154SBjorn Andersson 	e = phdr_to_first_uncached_entry(phdr);
57101f14154SBjorn Andersson 	end = phdr_to_last_uncached_entry(phdr);
5724b638df4SBjorn Andersson 
5739806884dSStephen Boyd 	while (e < end) {
574c7c1dc35SBjorn Andersson 		if (e->canary != SMEM_PRIVATE_CANARY)
575c7c1dc35SBjorn Andersson 			goto invalid_canary;
5764b638df4SBjorn Andersson 
5779806884dSStephen Boyd 		if (le16_to_cpu(e->item) == item) {
578f1383348SDeepak Kumar Singh 			if (size != NULL) {
579f1383348SDeepak Kumar Singh 				e_size = le32_to_cpu(e->size);
580f1383348SDeepak Kumar Singh 				padding_data = le16_to_cpu(e->padding_data);
5814b638df4SBjorn Andersson 
582f1383348SDeepak Kumar Singh 				if (WARN_ON(e_size > part->size || padding_data > e_size))
583f1383348SDeepak Kumar Singh 					return ERR_PTR(-EINVAL);
584f1383348SDeepak Kumar Singh 
585f1383348SDeepak Kumar Singh 				*size = e_size - padding_data;
586f1383348SDeepak Kumar Singh 			}
587f1383348SDeepak Kumar Singh 
588f1383348SDeepak Kumar Singh 			item_ptr = uncached_entry_to_item(e);
589f1383348SDeepak Kumar Singh 			if (WARN_ON(item_ptr > p_end))
590f1383348SDeepak Kumar Singh 				return ERR_PTR(-EINVAL);
591f1383348SDeepak Kumar Singh 
592f1383348SDeepak Kumar Singh 			return item_ptr;
5934b638df4SBjorn Andersson 		}
5944b638df4SBjorn Andersson 
59501f14154SBjorn Andersson 		e = uncached_entry_next(e);
5964b638df4SBjorn Andersson 	}
5974b638df4SBjorn Andersson 
598f1383348SDeepak Kumar Singh 	if (WARN_ON((void *)e > p_end))
599f1383348SDeepak Kumar Singh 		return ERR_PTR(-EINVAL);
600f1383348SDeepak Kumar Singh 
601c7c1dc35SBjorn Andersson 	/* Item was not found in the uncached list, search the cached list */
602c7c1dc35SBjorn Andersson 
60320bb6c9dSDeepak Kumar Singh 	e = phdr_to_first_cached_entry(phdr, part->cacheline);
604c7c1dc35SBjorn Andersson 	end = phdr_to_last_cached_entry(phdr);
605c7c1dc35SBjorn Andersson 
606f1383348SDeepak Kumar Singh 	if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end))
607f1383348SDeepak Kumar Singh 		return ERR_PTR(-EINVAL);
608f1383348SDeepak Kumar Singh 
609c7c1dc35SBjorn Andersson 	while (e > end) {
610c7c1dc35SBjorn Andersson 		if (e->canary != SMEM_PRIVATE_CANARY)
611c7c1dc35SBjorn Andersson 			goto invalid_canary;
612c7c1dc35SBjorn Andersson 
613c7c1dc35SBjorn Andersson 		if (le16_to_cpu(e->item) == item) {
614f1383348SDeepak Kumar Singh 			if (size != NULL) {
615f1383348SDeepak Kumar Singh 				e_size = le32_to_cpu(e->size);
616f1383348SDeepak Kumar Singh 				padding_data = le16_to_cpu(e->padding_data);
617c7c1dc35SBjorn Andersson 
618f1383348SDeepak Kumar Singh 				if (WARN_ON(e_size > part->size || padding_data > e_size))
619f1383348SDeepak Kumar Singh 					return ERR_PTR(-EINVAL);
620f1383348SDeepak Kumar Singh 
621f1383348SDeepak Kumar Singh 				*size = e_size - padding_data;
622f1383348SDeepak Kumar Singh 			}
623f1383348SDeepak Kumar Singh 
624f1383348SDeepak Kumar Singh 			item_ptr = cached_entry_to_item(e);
625f1383348SDeepak Kumar Singh 			if (WARN_ON(item_ptr < (void *)phdr))
626f1383348SDeepak Kumar Singh 				return ERR_PTR(-EINVAL);
627f1383348SDeepak Kumar Singh 
628f1383348SDeepak Kumar Singh 			return item_ptr;
629c7c1dc35SBjorn Andersson 		}
630c7c1dc35SBjorn Andersson 
63120bb6c9dSDeepak Kumar Singh 		e = cached_entry_next(e, part->cacheline);
632c7c1dc35SBjorn Andersson 	}
633c7c1dc35SBjorn Andersson 
634f1383348SDeepak Kumar Singh 	if (WARN_ON((void *)e < (void *)phdr))
635f1383348SDeepak Kumar Singh 		return ERR_PTR(-EINVAL);
636f1383348SDeepak Kumar Singh 
6371a03964dSStephen Boyd 	return ERR_PTR(-ENOENT);
638c7c1dc35SBjorn Andersson 
639c7c1dc35SBjorn Andersson invalid_canary:
64004a512feSAlex Elder 	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
64104a512feSAlex Elder 			le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
642c7c1dc35SBjorn Andersson 
643c7c1dc35SBjorn Andersson 	return ERR_PTR(-EINVAL);
6444b638df4SBjorn Andersson }
6454b638df4SBjorn Andersson 
6464b638df4SBjorn Andersson /**
6474b638df4SBjorn Andersson  * qcom_smem_get() - resolve ptr of size of a smem item
6484b638df4SBjorn Andersson  * @host:	the remote processor, or -1
6494b638df4SBjorn Andersson  * @item:	smem item handle
6504b638df4SBjorn Andersson  * @size:	pointer to be filled out with size of the item
6514b638df4SBjorn Andersson  *
6521a03964dSStephen Boyd  * Looks up smem item and returns pointer to it. Size of smem
6531a03964dSStephen Boyd  * item is returned in @size.
6544b638df4SBjorn Andersson  */
6551a03964dSStephen Boyd void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
6564b638df4SBjorn Andersson {
65720bb6c9dSDeepak Kumar Singh 	struct smem_partition *part;
6584b638df4SBjorn Andersson 	unsigned long flags;
6594b638df4SBjorn Andersson 	int ret;
6601a03964dSStephen Boyd 	void *ptr = ERR_PTR(-EPROBE_DEFER);
6614b638df4SBjorn Andersson 
6624b638df4SBjorn Andersson 	if (!__smem)
6631a03964dSStephen Boyd 		return ptr;
6644b638df4SBjorn Andersson 
6655b394067SChris Lew 	if (WARN_ON(item >= __smem->item_count))
6665b394067SChris Lew 		return ERR_PTR(-EINVAL);
6675b394067SChris Lew 
6684b638df4SBjorn Andersson 	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
6694b638df4SBjorn Andersson 					  HWSPINLOCK_TIMEOUT,
6704b638df4SBjorn Andersson 					  &flags);
6714b638df4SBjorn Andersson 	if (ret)
6721a03964dSStephen Boyd 		return ERR_PTR(ret);
6734b638df4SBjorn Andersson 
67420bb6c9dSDeepak Kumar Singh 	if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
67520bb6c9dSDeepak Kumar Singh 		part = &__smem->partitions[host];
67620bb6c9dSDeepak Kumar Singh 		ptr = qcom_smem_get_private(__smem, part, item, size);
67720bb6c9dSDeepak Kumar Singh 	} else if (__smem->global_partition.virt_base) {
67820bb6c9dSDeepak Kumar Singh 		part = &__smem->global_partition;
67920bb6c9dSDeepak Kumar Singh 		ptr = qcom_smem_get_private(__smem, part, item, size);
680d52e4048SChris Lew 	} else {
6811a03964dSStephen Boyd 		ptr = qcom_smem_get_global(__smem, item, size);
682d52e4048SChris Lew 	}
6834b638df4SBjorn Andersson 
6844b638df4SBjorn Andersson 	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
6851a03964dSStephen Boyd 
6861a03964dSStephen Boyd 	return ptr;
6874b638df4SBjorn Andersson 
6884b638df4SBjorn Andersson }
68910615007SRobert Marko EXPORT_SYMBOL_GPL(qcom_smem_get);
6904b638df4SBjorn Andersson 
6914b638df4SBjorn Andersson /**
6924b638df4SBjorn Andersson  * qcom_smem_get_free_space() - retrieve amount of free space in a partition
6934b638df4SBjorn Andersson  * @host:	the remote processor identifying a partition, or -1
6944b638df4SBjorn Andersson  *
6954b638df4SBjorn Andersson  * To be used by smem clients as a quick way to determine if any new
6964b638df4SBjorn Andersson  * allocations has been made.
6974b638df4SBjorn Andersson  */
6984b638df4SBjorn Andersson int qcom_smem_get_free_space(unsigned host)
6994b638df4SBjorn Andersson {
70020bb6c9dSDeepak Kumar Singh 	struct smem_partition *part;
7014b638df4SBjorn Andersson 	struct smem_partition_header *phdr;
7024b638df4SBjorn Andersson 	struct smem_header *header;
7034b638df4SBjorn Andersson 	unsigned ret;
7044b638df4SBjorn Andersson 
7054b638df4SBjorn Andersson 	if (!__smem)
7064b638df4SBjorn Andersson 		return -EPROBE_DEFER;
7074b638df4SBjorn Andersson 
70820bb6c9dSDeepak Kumar Singh 	if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
70920bb6c9dSDeepak Kumar Singh 		part = &__smem->partitions[host];
71020bb6c9dSDeepak Kumar Singh 		phdr = part->virt_base;
7119806884dSStephen Boyd 		ret = le32_to_cpu(phdr->offset_free_cached) -
7129806884dSStephen Boyd 		      le32_to_cpu(phdr->offset_free_uncached);
713f1383348SDeepak Kumar Singh 
714f1383348SDeepak Kumar Singh 		if (ret > le32_to_cpu(part->size))
715f1383348SDeepak Kumar Singh 			return -EINVAL;
71620bb6c9dSDeepak Kumar Singh 	} else if (__smem->global_partition.virt_base) {
71720bb6c9dSDeepak Kumar Singh 		part = &__smem->global_partition;
71820bb6c9dSDeepak Kumar Singh 		phdr = part->virt_base;
719d52e4048SChris Lew 		ret = le32_to_cpu(phdr->offset_free_cached) -
720d52e4048SChris Lew 		      le32_to_cpu(phdr->offset_free_uncached);
721f1383348SDeepak Kumar Singh 
722f1383348SDeepak Kumar Singh 		if (ret > le32_to_cpu(part->size))
723f1383348SDeepak Kumar Singh 			return -EINVAL;
7244b638df4SBjorn Andersson 	} else {
7254b638df4SBjorn Andersson 		header = __smem->regions[0].virt_base;
7269806884dSStephen Boyd 		ret = le32_to_cpu(header->available);
727f1383348SDeepak Kumar Singh 
728f1383348SDeepak Kumar Singh 		if (ret > __smem->regions[0].size)
729f1383348SDeepak Kumar Singh 			return -EINVAL;
7304b638df4SBjorn Andersson 	}
7314b638df4SBjorn Andersson 
7324b638df4SBjorn Andersson 	return ret;
7334b638df4SBjorn Andersson }
73410615007SRobert Marko EXPORT_SYMBOL_GPL(qcom_smem_get_free_space);
7354b638df4SBjorn Andersson 
73620bb6c9dSDeepak Kumar Singh static bool addr_in_range(void __iomem *base, size_t size, void *addr)
73720bb6c9dSDeepak Kumar Singh {
73820bb6c9dSDeepak Kumar Singh 	return base && (addr >= base && addr < base + size);
73920bb6c9dSDeepak Kumar Singh }
74020bb6c9dSDeepak Kumar Singh 
7416d361c1dSAlex Elder /**
7426d361c1dSAlex Elder  * qcom_smem_virt_to_phys() - return the physical address associated
7436d361c1dSAlex Elder  * with an smem item pointer (previously returned by qcom_smem_get()
7446d361c1dSAlex Elder  * @p:	the virtual address to convert
7456d361c1dSAlex Elder  *
7466d361c1dSAlex Elder  * Returns 0 if the pointer provided is not within any smem region.
7476d361c1dSAlex Elder  */
7486d361c1dSAlex Elder phys_addr_t qcom_smem_virt_to_phys(void *p)
7496d361c1dSAlex Elder {
75020bb6c9dSDeepak Kumar Singh 	struct smem_partition *part;
75120bb6c9dSDeepak Kumar Singh 	struct smem_region *area;
75220bb6c9dSDeepak Kumar Singh 	u64 offset;
75320bb6c9dSDeepak Kumar Singh 	u32 i;
75420bb6c9dSDeepak Kumar Singh 
75520bb6c9dSDeepak Kumar Singh 	for (i = 0; i < SMEM_HOST_COUNT; i++) {
75620bb6c9dSDeepak Kumar Singh 		part = &__smem->partitions[i];
75720bb6c9dSDeepak Kumar Singh 
75820bb6c9dSDeepak Kumar Singh 		if (addr_in_range(part->virt_base, part->size, p)) {
75920bb6c9dSDeepak Kumar Singh 			offset = p - part->virt_base;
76020bb6c9dSDeepak Kumar Singh 
76120bb6c9dSDeepak Kumar Singh 			return (phys_addr_t)part->phys_base + offset;
76220bb6c9dSDeepak Kumar Singh 		}
76320bb6c9dSDeepak Kumar Singh 	}
76420bb6c9dSDeepak Kumar Singh 
76520bb6c9dSDeepak Kumar Singh 	part = &__smem->global_partition;
76620bb6c9dSDeepak Kumar Singh 
76720bb6c9dSDeepak Kumar Singh 	if (addr_in_range(part->virt_base, part->size, p)) {
76820bb6c9dSDeepak Kumar Singh 		offset = p - part->virt_base;
76920bb6c9dSDeepak Kumar Singh 
77020bb6c9dSDeepak Kumar Singh 		return (phys_addr_t)part->phys_base + offset;
77120bb6c9dSDeepak Kumar Singh 	}
7726d361c1dSAlex Elder 
7736d361c1dSAlex Elder 	for (i = 0; i < __smem->num_regions; i++) {
77420bb6c9dSDeepak Kumar Singh 		area = &__smem->regions[i];
7756d361c1dSAlex Elder 
77620bb6c9dSDeepak Kumar Singh 		if (addr_in_range(area->virt_base, area->size, p)) {
77720bb6c9dSDeepak Kumar Singh 			offset = p - area->virt_base;
7786d361c1dSAlex Elder 
77920bb6c9dSDeepak Kumar Singh 			return (phys_addr_t)area->aux_base + offset;
7806d361c1dSAlex Elder 		}
7816d361c1dSAlex Elder 	}
7826d361c1dSAlex Elder 
7836d361c1dSAlex Elder 	return 0;
7846d361c1dSAlex Elder }
78510615007SRobert Marko EXPORT_SYMBOL_GPL(qcom_smem_virt_to_phys);
7866d361c1dSAlex Elder 
78717051d2cSRobert Marko /**
78817051d2cSRobert Marko  * qcom_smem_get_soc_id() - return the SoC ID
78917051d2cSRobert Marko  * @id:	On success, we return the SoC ID here.
79017051d2cSRobert Marko  *
79117051d2cSRobert Marko  * Look up SoC ID from HW/SW build ID and return it.
79217051d2cSRobert Marko  *
79317051d2cSRobert Marko  * Return: 0 on success, negative errno on failure.
79417051d2cSRobert Marko  */
79517051d2cSRobert Marko int qcom_smem_get_soc_id(u32 *id)
79617051d2cSRobert Marko {
79717051d2cSRobert Marko 	struct socinfo *info;
79817051d2cSRobert Marko 
79917051d2cSRobert Marko 	info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, NULL);
80017051d2cSRobert Marko 	if (IS_ERR(info))
80117051d2cSRobert Marko 		return PTR_ERR(info);
80217051d2cSRobert Marko 
80317051d2cSRobert Marko 	*id = __le32_to_cpu(info->id);
80417051d2cSRobert Marko 
80517051d2cSRobert Marko 	return 0;
80617051d2cSRobert Marko }
80717051d2cSRobert Marko EXPORT_SYMBOL_GPL(qcom_smem_get_soc_id);
80817051d2cSRobert Marko 
8094b638df4SBjorn Andersson static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
8104b638df4SBjorn Andersson {
811dcc0967dSChris Lew 	struct smem_header *header;
8129806884dSStephen Boyd 	__le32 *versions;
8134b638df4SBjorn Andersson 
814dcc0967dSChris Lew 	header = smem->regions[0].virt_base;
815dcc0967dSChris Lew 	versions = header->version;
8164b638df4SBjorn Andersson 
8179806884dSStephen Boyd 	return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
8184b638df4SBjorn Andersson }
8194b638df4SBjorn Andersson 
820d52e4048SChris Lew static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
8214b638df4SBjorn Andersson {
8224b638df4SBjorn Andersson 	struct smem_ptable *ptable;
823d52e4048SChris Lew 	u32 version;
8244b638df4SBjorn Andersson 
82520bb6c9dSDeepak Kumar Singh 	ptable = smem->ptable;
8269806884dSStephen Boyd 	if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
827d52e4048SChris Lew 		return ERR_PTR(-ENOENT);
8284b638df4SBjorn Andersson 
8299806884dSStephen Boyd 	version = le32_to_cpu(ptable->version);
8309806884dSStephen Boyd 	if (version != 1) {
8314b638df4SBjorn Andersson 		dev_err(smem->dev,
8329806884dSStephen Boyd 			"Unsupported partition header version %d\n", version);
833d52e4048SChris Lew 		return ERR_PTR(-EINVAL);
834d52e4048SChris Lew 	}
835d52e4048SChris Lew 	return ptable;
836d52e4048SChris Lew }
837d52e4048SChris Lew 
8385b394067SChris Lew static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
8395b394067SChris Lew {
8405b394067SChris Lew 	struct smem_ptable *ptable;
8415b394067SChris Lew 	struct smem_info *info;
8425b394067SChris Lew 
8435b394067SChris Lew 	ptable = qcom_smem_get_ptable(smem);
8445b394067SChris Lew 	if (IS_ERR_OR_NULL(ptable))
8455b394067SChris Lew 		return SMEM_ITEM_COUNT;
8465b394067SChris Lew 
8475b394067SChris Lew 	info = (struct smem_info *)&ptable->entry[ptable->num_entries];
8485b394067SChris Lew 	if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
8495b394067SChris Lew 		return SMEM_ITEM_COUNT;
8505b394067SChris Lew 
8515b394067SChris Lew 	return le16_to_cpu(info->num_items);
8525b394067SChris Lew }
8535b394067SChris Lew 
854ada79289SAlex Elder /*
855ada79289SAlex Elder  * Validate the partition header for a partition whose partition
856ada79289SAlex Elder  * table entry is supplied.  Returns a pointer to its header if
857ada79289SAlex Elder  * valid, or a null pointer otherwise.
858ada79289SAlex Elder  */
859ada79289SAlex Elder static struct smem_partition_header *
860ada79289SAlex Elder qcom_smem_partition_header(struct qcom_smem *smem,
8617d019344SAlex Elder 		struct smem_ptable_entry *entry, u16 host0, u16 host1)
862ada79289SAlex Elder {
863ada79289SAlex Elder 	struct smem_partition_header *header;
86420bb6c9dSDeepak Kumar Singh 	u32 phys_addr;
865190b216cSAlex Elder 	u32 size;
866ada79289SAlex Elder 
86720bb6c9dSDeepak Kumar Singh 	phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset);
86820bb6c9dSDeepak Kumar Singh 	header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size));
86920bb6c9dSDeepak Kumar Singh 
87020bb6c9dSDeepak Kumar Singh 	if (!header)
87120bb6c9dSDeepak Kumar Singh 		return NULL;
872ada79289SAlex Elder 
873ada79289SAlex Elder 	if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
8741b3df368SAndy Shevchenko 		dev_err(smem->dev, "bad partition magic %4ph\n", header->magic);
875ada79289SAlex Elder 		return NULL;
876ada79289SAlex Elder 	}
877ada79289SAlex Elder 
8787d019344SAlex Elder 	if (host0 != le16_to_cpu(header->host0)) {
8797d019344SAlex Elder 		dev_err(smem->dev, "bad host0 (%hu != %hu)\n",
8807d019344SAlex Elder 				host0, le16_to_cpu(header->host0));
8817d019344SAlex Elder 		return NULL;
8827d019344SAlex Elder 	}
8837d019344SAlex Elder 	if (host1 != le16_to_cpu(header->host1)) {
8847d019344SAlex Elder 		dev_err(smem->dev, "bad host1 (%hu != %hu)\n",
8857d019344SAlex Elder 				host1, le16_to_cpu(header->host1));
8867d019344SAlex Elder 		return NULL;
8877d019344SAlex Elder 	}
8887d019344SAlex Elder 
889190b216cSAlex Elder 	size = le32_to_cpu(header->size);
890190b216cSAlex Elder 	if (size != le32_to_cpu(entry->size)) {
891190b216cSAlex Elder 		dev_err(smem->dev, "bad partition size (%u != %u)\n",
892190b216cSAlex Elder 			size, le32_to_cpu(entry->size));
893190b216cSAlex Elder 		return NULL;
894190b216cSAlex Elder 	}
895190b216cSAlex Elder 
896380dc4afSAlex Elder 	if (le32_to_cpu(header->offset_free_uncached) > size) {
897380dc4afSAlex Elder 		dev_err(smem->dev, "bad partition free uncached (%u > %u)\n",
898380dc4afSAlex Elder 			le32_to_cpu(header->offset_free_uncached), size);
899380dc4afSAlex Elder 		return NULL;
900380dc4afSAlex Elder 	}
901380dc4afSAlex Elder 
902ada79289SAlex Elder 	return header;
903ada79289SAlex Elder }
904ada79289SAlex Elder 
905d52e4048SChris Lew static int qcom_smem_set_global_partition(struct qcom_smem *smem)
906d52e4048SChris Lew {
907d52e4048SChris Lew 	struct smem_partition_header *header;
9088fa1a214SAlex Elder 	struct smem_ptable_entry *entry;
909d52e4048SChris Lew 	struct smem_ptable *ptable;
9108fa1a214SAlex Elder 	bool found = false;
911d52e4048SChris Lew 	int i;
912d52e4048SChris Lew 
91320bb6c9dSDeepak Kumar Singh 	if (smem->global_partition.virt_base) {
9140b65c59eSBjorn Andersson 		dev_err(smem->dev, "Already found the global partition\n");
9150b65c59eSBjorn Andersson 		return -EINVAL;
9160b65c59eSBjorn Andersson 	}
9170b65c59eSBjorn Andersson 
918d52e4048SChris Lew 	ptable = qcom_smem_get_ptable(smem);
919d52e4048SChris Lew 	if (IS_ERR(ptable))
920d52e4048SChris Lew 		return PTR_ERR(ptable);
921d52e4048SChris Lew 
922d52e4048SChris Lew 	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
923d52e4048SChris Lew 		entry = &ptable->entry[i];
924eba75702SAlex Elder 		if (!le32_to_cpu(entry->offset))
925eba75702SAlex Elder 			continue;
926eba75702SAlex Elder 		if (!le32_to_cpu(entry->size))
927eba75702SAlex Elder 			continue;
928eba75702SAlex Elder 
92933fdbc4eSAlex Elder 		if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST)
93033fdbc4eSAlex Elder 			continue;
93133fdbc4eSAlex Elder 
93233fdbc4eSAlex Elder 		if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) {
9338fa1a214SAlex Elder 			found = true;
934d52e4048SChris Lew 			break;
935d52e4048SChris Lew 		}
9368fa1a214SAlex Elder 	}
937d52e4048SChris Lew 
9388fa1a214SAlex Elder 	if (!found) {
939d52e4048SChris Lew 		dev_err(smem->dev, "Missing entry for global partition\n");
9404b638df4SBjorn Andersson 		return -EINVAL;
9414b638df4SBjorn Andersson 	}
9424b638df4SBjorn Andersson 
9437d019344SAlex Elder 	header = qcom_smem_partition_header(smem, entry,
9447d019344SAlex Elder 				SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST);
945ada79289SAlex Elder 	if (!header)
946ada79289SAlex Elder 		return -EINVAL;
947ada79289SAlex Elder 
94820bb6c9dSDeepak Kumar Singh 	smem->global_partition.virt_base = (void __iomem *)header;
94920bb6c9dSDeepak Kumar Singh 	smem->global_partition.phys_base = smem->regions[0].aux_base +
95020bb6c9dSDeepak Kumar Singh 								le32_to_cpu(entry->offset);
95120bb6c9dSDeepak Kumar Singh 	smem->global_partition.size = le32_to_cpu(entry->size);
95220bb6c9dSDeepak Kumar Singh 	smem->global_partition.cacheline = le32_to_cpu(entry->cacheline);
953d52e4048SChris Lew 
954d52e4048SChris Lew 	return 0;
955d52e4048SChris Lew }
956d52e4048SChris Lew 
95713a920aeSAlex Elder static int
95813a920aeSAlex Elder qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
959d52e4048SChris Lew {
960d52e4048SChris Lew 	struct smem_partition_header *header;
961d52e4048SChris Lew 	struct smem_ptable_entry *entry;
962d52e4048SChris Lew 	struct smem_ptable *ptable;
9630ec29ccfSBill Wendling 	u16 remote_host;
96413a920aeSAlex Elder 	u16 host0, host1;
965d52e4048SChris Lew 	int i;
966d52e4048SChris Lew 
967d52e4048SChris Lew 	ptable = qcom_smem_get_ptable(smem);
968d52e4048SChris Lew 	if (IS_ERR(ptable))
969d52e4048SChris Lew 		return PTR_ERR(ptable);
970d52e4048SChris Lew 
9719806884dSStephen Boyd 	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
9724b638df4SBjorn Andersson 		entry = &ptable->entry[i];
9739806884dSStephen Boyd 		if (!le32_to_cpu(entry->offset))
9744b638df4SBjorn Andersson 			continue;
9759806884dSStephen Boyd 		if (!le32_to_cpu(entry->size))
9764b638df4SBjorn Andersson 			continue;
9774b638df4SBjorn Andersson 
978eba75702SAlex Elder 		host0 = le16_to_cpu(entry->host0);
979eba75702SAlex Elder 		host1 = le16_to_cpu(entry->host1);
9809806884dSStephen Boyd 		if (host0 == local_host)
9819806884dSStephen Boyd 			remote_host = host1;
982eb68cf09SAlex Elder 		else if (host1 == local_host)
9839806884dSStephen Boyd 			remote_host = host0;
984eb68cf09SAlex Elder 		else
985eb68cf09SAlex Elder 			continue;
9864b638df4SBjorn Andersson 
9874b638df4SBjorn Andersson 		if (remote_host >= SMEM_HOST_COUNT) {
9880ec29ccfSBill Wendling 			dev_err(smem->dev, "bad host %u\n", remote_host);
9894b638df4SBjorn Andersson 			return -EINVAL;
9904b638df4SBjorn Andersson 		}
9914b638df4SBjorn Andersson 
99220bb6c9dSDeepak Kumar Singh 		if (smem->partitions[remote_host].virt_base) {
9930ec29ccfSBill Wendling 			dev_err(smem->dev, "duplicate host %u\n", remote_host);
9944b638df4SBjorn Andersson 			return -EINVAL;
9954b638df4SBjorn Andersson 		}
9964b638df4SBjorn Andersson 
9977d019344SAlex Elder 		header = qcom_smem_partition_header(smem, entry, host0, host1);
998ada79289SAlex Elder 		if (!header)
999ada79289SAlex Elder 			return -EINVAL;
1000ada79289SAlex Elder 
100120bb6c9dSDeepak Kumar Singh 		smem->partitions[remote_host].virt_base = (void __iomem *)header;
100220bb6c9dSDeepak Kumar Singh 		smem->partitions[remote_host].phys_base = smem->regions[0].aux_base +
100320bb6c9dSDeepak Kumar Singh 										le32_to_cpu(entry->offset);
100420bb6c9dSDeepak Kumar Singh 		smem->partitions[remote_host].size = le32_to_cpu(entry->size);
100520bb6c9dSDeepak Kumar Singh 		smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline);
10064b638df4SBjorn Andersson 	}
10074b638df4SBjorn Andersson 
10084b638df4SBjorn Andersson 	return 0;
10094b638df4SBjorn Andersson }
10104b638df4SBjorn Andersson 
101120bb6c9dSDeepak Kumar Singh static int qcom_smem_map_toc(struct qcom_smem *smem, struct smem_region *region)
101220bb6c9dSDeepak Kumar Singh {
101320bb6c9dSDeepak Kumar Singh 	u32 ptable_start;
101420bb6c9dSDeepak Kumar Singh 
101520bb6c9dSDeepak Kumar Singh 	/* map starting 4K for smem header */
101620bb6c9dSDeepak Kumar Singh 	region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K);
101720bb6c9dSDeepak Kumar Singh 	ptable_start = region->aux_base + region->size - SZ_4K;
101820bb6c9dSDeepak Kumar Singh 	/* map last 4k for toc */
101920bb6c9dSDeepak Kumar Singh 	smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K);
102020bb6c9dSDeepak Kumar Singh 
102120bb6c9dSDeepak Kumar Singh 	if (!region->virt_base || !smem->ptable)
102220bb6c9dSDeepak Kumar Singh 		return -ENOMEM;
102320bb6c9dSDeepak Kumar Singh 
102420bb6c9dSDeepak Kumar Singh 	return 0;
102520bb6c9dSDeepak Kumar Singh }
102620bb6c9dSDeepak Kumar Singh 
102720bb6c9dSDeepak Kumar Singh static int qcom_smem_map_global(struct qcom_smem *smem, u32 size)
102820bb6c9dSDeepak Kumar Singh {
102920bb6c9dSDeepak Kumar Singh 	u32 phys_addr;
103020bb6c9dSDeepak Kumar Singh 
103120bb6c9dSDeepak Kumar Singh 	phys_addr = smem->regions[0].aux_base;
103220bb6c9dSDeepak Kumar Singh 
103320bb6c9dSDeepak Kumar Singh 	smem->regions[0].size = size;
103420bb6c9dSDeepak Kumar Singh 	smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size);
103520bb6c9dSDeepak Kumar Singh 
103620bb6c9dSDeepak Kumar Singh 	if (!smem->regions[0].virt_base)
103720bb6c9dSDeepak Kumar Singh 		return -ENOMEM;
103820bb6c9dSDeepak Kumar Singh 
103920bb6c9dSDeepak Kumar Singh 	return 0;
104020bb6c9dSDeepak Kumar Singh }
104120bb6c9dSDeepak Kumar Singh 
1042b5af64fcSBjorn Andersson static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name,
1043b5af64fcSBjorn Andersson 				 struct smem_region *region)
10444b638df4SBjorn Andersson {
1045b5af64fcSBjorn Andersson 	struct device *dev = smem->dev;
1046d0bfd7c9SStephen Boyd 	struct device_node *np;
1047d0bfd7c9SStephen Boyd 	struct resource r;
1048d0bfd7c9SStephen Boyd 	int ret;
10494b638df4SBjorn Andersson 
1050d0bfd7c9SStephen Boyd 	np = of_parse_phandle(dev->of_node, name, 0);
1051d0bfd7c9SStephen Boyd 	if (!np) {
1052d0bfd7c9SStephen Boyd 		dev_err(dev, "No %s specified\n", name);
1053d0bfd7c9SStephen Boyd 		return -EINVAL;
10544b638df4SBjorn Andersson 	}
10554b638df4SBjorn Andersson 
1056d0bfd7c9SStephen Boyd 	ret = of_address_to_resource(np, 0, &r);
1057d0bfd7c9SStephen Boyd 	of_node_put(np);
1058d0bfd7c9SStephen Boyd 	if (ret)
1059d0bfd7c9SStephen Boyd 		return ret;
1060d0bfd7c9SStephen Boyd 
1061b5af64fcSBjorn Andersson 	region->aux_base = r.start;
1062b5af64fcSBjorn Andersson 	region->size = resource_size(&r);
1063d0bfd7c9SStephen Boyd 
1064d0bfd7c9SStephen Boyd 	return 0;
10654b638df4SBjorn Andersson }
10664b638df4SBjorn Andersson 
10674b638df4SBjorn Andersson static int qcom_smem_probe(struct platform_device *pdev)
10684b638df4SBjorn Andersson {
10694b638df4SBjorn Andersson 	struct smem_header *header;
1070b5af64fcSBjorn Andersson 	struct reserved_mem *rmem;
10714b638df4SBjorn Andersson 	struct qcom_smem *smem;
107220bb6c9dSDeepak Kumar Singh 	unsigned long flags;
1073d0bfd7c9SStephen Boyd 	int num_regions;
10744b638df4SBjorn Andersson 	int hwlock_id;
10754b638df4SBjorn Andersson 	u32 version;
107620bb6c9dSDeepak Kumar Singh 	u32 size;
10774b638df4SBjorn Andersson 	int ret;
1078b5af64fcSBjorn Andersson 	int i;
10794b638df4SBjorn Andersson 
1080d0bfd7c9SStephen Boyd 	num_regions = 1;
10814a1b9f4eSRob Herring 	if (of_property_present(pdev->dev.of_node, "qcom,rpm-msg-ram"))
1082d0bfd7c9SStephen Boyd 		num_regions++;
10834b638df4SBjorn Andersson 
1084cbdd13bfSChristophe JAILLET 	smem = devm_kzalloc(&pdev->dev, struct_size(smem, regions, num_regions),
1085cbdd13bfSChristophe JAILLET 			    GFP_KERNEL);
10864b638df4SBjorn Andersson 	if (!smem)
10874b638df4SBjorn Andersson 		return -ENOMEM;
10884b638df4SBjorn Andersson 
10894b638df4SBjorn Andersson 	smem->dev = &pdev->dev;
10904b638df4SBjorn Andersson 	smem->num_regions = num_regions;
10914b638df4SBjorn Andersson 
1092b5af64fcSBjorn Andersson 	rmem = of_reserved_mem_lookup(pdev->dev.of_node);
1093b5af64fcSBjorn Andersson 	if (rmem) {
1094b5af64fcSBjorn Andersson 		smem->regions[0].aux_base = rmem->base;
1095b5af64fcSBjorn Andersson 		smem->regions[0].size = rmem->size;
1096b5af64fcSBjorn Andersson 	} else {
1097b5af64fcSBjorn Andersson 		/*
1098b5af64fcSBjorn Andersson 		 * Fall back to the memory-region reference, if we're not a
1099b5af64fcSBjorn Andersson 		 * reserved-memory node.
1100b5af64fcSBjorn Andersson 		 */
1101b5af64fcSBjorn Andersson 		ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]);
11024b638df4SBjorn Andersson 		if (ret)
11034b638df4SBjorn Andersson 			return ret;
1104b5af64fcSBjorn Andersson 	}
11054b638df4SBjorn Andersson 
1106b5af64fcSBjorn Andersson 	if (num_regions > 1) {
1107b5af64fcSBjorn Andersson 		ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]);
1108b5af64fcSBjorn Andersson 		if (ret)
1109d0bfd7c9SStephen Boyd 			return ret;
1110b5af64fcSBjorn Andersson 	}
1111b5af64fcSBjorn Andersson 
111220bb6c9dSDeepak Kumar Singh 
111320bb6c9dSDeepak Kumar Singh 	ret = qcom_smem_map_toc(smem, &smem->regions[0]);
111420bb6c9dSDeepak Kumar Singh 	if (ret)
111520bb6c9dSDeepak Kumar Singh 		return ret;
111620bb6c9dSDeepak Kumar Singh 
111720bb6c9dSDeepak Kumar Singh 	for (i = 1; i < num_regions; i++) {
1118b5af64fcSBjorn Andersson 		smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev,
1119b5af64fcSBjorn Andersson 							     smem->regions[i].aux_base,
1120b5af64fcSBjorn Andersson 							     smem->regions[i].size);
1121b5af64fcSBjorn Andersson 		if (!smem->regions[i].virt_base) {
1122b5af64fcSBjorn Andersson 			dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base);
1123b5af64fcSBjorn Andersson 			return -ENOMEM;
1124b5af64fcSBjorn Andersson 		}
1125b5af64fcSBjorn Andersson 	}
11264b638df4SBjorn Andersson 
11274b638df4SBjorn Andersson 	header = smem->regions[0].virt_base;
11289806884dSStephen Boyd 	if (le32_to_cpu(header->initialized) != 1 ||
11299806884dSStephen Boyd 	    le32_to_cpu(header->reserved)) {
11304b638df4SBjorn Andersson 		dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
11314b638df4SBjorn Andersson 		return -EINVAL;
11324b638df4SBjorn Andersson 	}
11334b638df4SBjorn Andersson 
113420bb6c9dSDeepak Kumar Singh 	hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
113520bb6c9dSDeepak Kumar Singh 	if (hwlock_id < 0) {
113620bb6c9dSDeepak Kumar Singh 		if (hwlock_id != -EPROBE_DEFER)
113720bb6c9dSDeepak Kumar Singh 			dev_err(&pdev->dev, "failed to retrieve hwlock\n");
113820bb6c9dSDeepak Kumar Singh 		return hwlock_id;
113920bb6c9dSDeepak Kumar Singh 	}
114020bb6c9dSDeepak Kumar Singh 
114120bb6c9dSDeepak Kumar Singh 	smem->hwlock = hwspin_lock_request_specific(hwlock_id);
114220bb6c9dSDeepak Kumar Singh 	if (!smem->hwlock)
114320bb6c9dSDeepak Kumar Singh 		return -ENXIO;
114420bb6c9dSDeepak Kumar Singh 
114520bb6c9dSDeepak Kumar Singh 	ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags);
114620bb6c9dSDeepak Kumar Singh 	if (ret)
114720bb6c9dSDeepak Kumar Singh 		return ret;
114820bb6c9dSDeepak Kumar Singh 	size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset);
114920bb6c9dSDeepak Kumar Singh 	hwspin_unlock_irqrestore(smem->hwlock, &flags);
115020bb6c9dSDeepak Kumar Singh 
11514b638df4SBjorn Andersson 	version = qcom_smem_get_sbl_version(smem);
115220bb6c9dSDeepak Kumar Singh 	/*
115320bb6c9dSDeepak Kumar Singh 	 * smem header mapping is required only in heap version scheme, so unmap
115420bb6c9dSDeepak Kumar Singh 	 * it here. It will be remapped in qcom_smem_map_global() when whole
115520bb6c9dSDeepak Kumar Singh 	 * partition is mapped again.
115620bb6c9dSDeepak Kumar Singh 	 */
115720bb6c9dSDeepak Kumar Singh 	devm_iounmap(smem->dev, smem->regions[0].virt_base);
1158d52e4048SChris Lew 	switch (version >> 16) {
1159d52e4048SChris Lew 	case SMEM_GLOBAL_PART_VERSION:
1160d52e4048SChris Lew 		ret = qcom_smem_set_global_partition(smem);
1161d52e4048SChris Lew 		if (ret < 0)
1162d52e4048SChris Lew 			return ret;
11635b394067SChris Lew 		smem->item_count = qcom_smem_get_item_count(smem);
11645b394067SChris Lew 		break;
1165d52e4048SChris Lew 	case SMEM_GLOBAL_HEAP_VERSION:
116620bb6c9dSDeepak Kumar Singh 		qcom_smem_map_global(smem, size);
11675b394067SChris Lew 		smem->item_count = SMEM_ITEM_COUNT;
1168d52e4048SChris Lew 		break;
1169d52e4048SChris Lew 	default:
11704b638df4SBjorn Andersson 		dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
11714b638df4SBjorn Andersson 		return -EINVAL;
11724b638df4SBjorn Andersson 	}
11734b638df4SBjorn Andersson 
117413a920aeSAlex Elder 	BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT);
11754b638df4SBjorn Andersson 	ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
1176d52e4048SChris Lew 	if (ret < 0 && ret != -ENOENT)
11774b638df4SBjorn Andersson 		return ret;
11784b638df4SBjorn Andersson 
11794b638df4SBjorn Andersson 	__smem = smem;
11804b638df4SBjorn Andersson 
1181efb448d0SImran Khan 	smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
1182efb448d0SImran Khan 						      PLATFORM_DEVID_NONE, NULL,
1183efb448d0SImran Khan 						      0);
1184efb448d0SImran Khan 	if (IS_ERR(smem->socinfo))
1185efb448d0SImran Khan 		dev_dbg(&pdev->dev, "failed to register socinfo device\n");
1186efb448d0SImran Khan 
11874b638df4SBjorn Andersson 	return 0;
11884b638df4SBjorn Andersson }
11894b638df4SBjorn Andersson 
11904b638df4SBjorn Andersson static int qcom_smem_remove(struct platform_device *pdev)
11914b638df4SBjorn Andersson {
1192efb448d0SImran Khan 	platform_device_unregister(__smem->socinfo);
1193efb448d0SImran Khan 
11944b638df4SBjorn Andersson 	hwspin_lock_free(__smem->hwlock);
1195f8c67df7SStephen Boyd 	__smem = NULL;
11964b638df4SBjorn Andersson 
11974b638df4SBjorn Andersson 	return 0;
11984b638df4SBjorn Andersson }
11994b638df4SBjorn Andersson 
12004b638df4SBjorn Andersson static const struct of_device_id qcom_smem_of_match[] = {
12014b638df4SBjorn Andersson 	{ .compatible = "qcom,smem" },
12024b638df4SBjorn Andersson 	{}
12034b638df4SBjorn Andersson };
12044b638df4SBjorn Andersson MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
12054b638df4SBjorn Andersson 
12064b638df4SBjorn Andersson static struct platform_driver qcom_smem_driver = {
12074b638df4SBjorn Andersson 	.probe = qcom_smem_probe,
12084b638df4SBjorn Andersson 	.remove = qcom_smem_remove,
12094b638df4SBjorn Andersson 	.driver  = {
12104b638df4SBjorn Andersson 		.name = "qcom-smem",
12114b638df4SBjorn Andersson 		.of_match_table = qcom_smem_of_match,
12124b638df4SBjorn Andersson 		.suppress_bind_attrs = true,
12134b638df4SBjorn Andersson 	},
12144b638df4SBjorn Andersson };
12154b638df4SBjorn Andersson 
12164b638df4SBjorn Andersson static int __init qcom_smem_init(void)
12174b638df4SBjorn Andersson {
12184b638df4SBjorn Andersson 	return platform_driver_register(&qcom_smem_driver);
12194b638df4SBjorn Andersson }
12204b638df4SBjorn Andersson arch_initcall(qcom_smem_init);
12214b638df4SBjorn Andersson 
12224b638df4SBjorn Andersson static void __exit qcom_smem_exit(void)
12234b638df4SBjorn Andersson {
12244b638df4SBjorn Andersson 	platform_driver_unregister(&qcom_smem_driver);
12254b638df4SBjorn Andersson }
12264b638df4SBjorn Andersson module_exit(qcom_smem_exit)
12274b638df4SBjorn Andersson 
12284b638df4SBjorn Andersson MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
12294b638df4SBjorn Andersson MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
12304b638df4SBjorn Andersson MODULE_LICENSE("GPL v2");
1231