xref: /openbmc/linux/drivers/soc/qcom/smem.c (revision 10615007483b6938da9df290fe5bf460f6a07c60)
197fb5e8dSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
24b638df4SBjorn Andersson /*
34b638df4SBjorn Andersson  * Copyright (c) 2015, Sony Mobile Communications AB.
44b638df4SBjorn Andersson  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
54b638df4SBjorn Andersson  */
64b638df4SBjorn Andersson 
74b638df4SBjorn Andersson #include <linux/hwspinlock.h>
84b638df4SBjorn Andersson #include <linux/io.h>
94b638df4SBjorn Andersson #include <linux/module.h>
104b638df4SBjorn Andersson #include <linux/of.h>
114b638df4SBjorn Andersson #include <linux/of_address.h>
12b5af64fcSBjorn Andersson #include <linux/of_reserved_mem.h>
134b638df4SBjorn Andersson #include <linux/platform_device.h>
149487e2abSNiklas Cassel #include <linux/sizes.h>
154b638df4SBjorn Andersson #include <linux/slab.h>
164b638df4SBjorn Andersson #include <linux/soc/qcom/smem.h>
174b638df4SBjorn Andersson 
184b638df4SBjorn Andersson /*
194b638df4SBjorn Andersson  * The Qualcomm shared memory system is a allocate only heap structure that
204b638df4SBjorn Andersson  * consists of one of more memory areas that can be accessed by the processors
214b638df4SBjorn Andersson  * in the SoC.
224b638df4SBjorn Andersson  *
234b638df4SBjorn Andersson  * All systems contains a global heap, accessible by all processors in the SoC,
244b638df4SBjorn Andersson  * with a table of contents data structure (@smem_header) at the beginning of
254b638df4SBjorn Andersson  * the main shared memory block.
264b638df4SBjorn Andersson  *
274b638df4SBjorn Andersson  * The global header contains meta data for allocations as well as a fixed list
284b638df4SBjorn Andersson  * of 512 entries (@smem_global_entry) that can be initialized to reference
294b638df4SBjorn Andersson  * parts of the shared memory space.
304b638df4SBjorn Andersson  *
314b638df4SBjorn Andersson  *
324b638df4SBjorn Andersson  * In addition to this global heap a set of "private" heaps can be set up at
334b638df4SBjorn Andersson  * boot time with access restrictions so that only certain processor pairs can
344b638df4SBjorn Andersson  * access the data.
354b638df4SBjorn Andersson  *
364b638df4SBjorn Andersson  * These partitions are referenced from an optional partition table
374b638df4SBjorn Andersson  * (@smem_ptable), that is found 4kB from the end of the main smem region. The
384b638df4SBjorn Andersson  * partition table entries (@smem_ptable_entry) lists the involved processors
394b638df4SBjorn Andersson  * (or hosts) and their location in the main shared memory region.
404b638df4SBjorn Andersson  *
414b638df4SBjorn Andersson  * Each partition starts with a header (@smem_partition_header) that identifies
424b638df4SBjorn Andersson  * the partition and holds properties for the two internal memory regions. The
434b638df4SBjorn Andersson  * two regions are cached and non-cached memory respectively. Each region
444b638df4SBjorn Andersson  * contain a link list of allocation headers (@smem_private_entry) followed by
454b638df4SBjorn Andersson  * their data.
464b638df4SBjorn Andersson  *
474b638df4SBjorn Andersson  * Items in the non-cached region are allocated from the start of the partition
484b638df4SBjorn Andersson  * while items in the cached region are allocated from the end. The free area
49c7c1dc35SBjorn Andersson  * is hence the region between the cached and non-cached offsets. The header of
50c7c1dc35SBjorn Andersson  * cached items comes after the data.
514b638df4SBjorn Andersson  *
52d52e4048SChris Lew  * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
53d52e4048SChris Lew  * for the global heap. A new global partition is created from the global heap
54d52e4048SChris Lew  * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
55d52e4048SChris Lew  * set by the bootloader.
564b638df4SBjorn Andersson  *
574b638df4SBjorn Andersson  * To synchronize allocations in the shared memory heaps a remote spinlock must
584b638df4SBjorn Andersson  * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
594b638df4SBjorn Andersson  * platforms.
604b638df4SBjorn Andersson  *
614b638df4SBjorn Andersson  */
624b638df4SBjorn Andersson 
634b638df4SBjorn Andersson /*
64dcc0967dSChris Lew  * The version member of the smem header contains an array of versions for the
65dcc0967dSChris Lew  * various software components in the SoC. We verify that the boot loader
66dcc0967dSChris Lew  * version is a valid version as a sanity check.
674b638df4SBjorn Andersson  */
684b638df4SBjorn Andersson #define SMEM_MASTER_SBL_VERSION_INDEX	7
69d52e4048SChris Lew #define SMEM_GLOBAL_HEAP_VERSION	11
70d52e4048SChris Lew #define SMEM_GLOBAL_PART_VERSION	12
714b638df4SBjorn Andersson 
724b638df4SBjorn Andersson /*
734b638df4SBjorn Andersson  * The first 8 items are only to be allocated by the boot loader while
744b638df4SBjorn Andersson  * initializing the heap.
754b638df4SBjorn Andersson  */
764b638df4SBjorn Andersson #define SMEM_ITEM_LAST_FIXED	8
774b638df4SBjorn Andersson 
784b638df4SBjorn Andersson /* Highest accepted item number, for both global and private heaps */
794b638df4SBjorn Andersson #define SMEM_ITEM_COUNT		512
804b638df4SBjorn Andersson 
814b638df4SBjorn Andersson /* Processor/host identifier for the application processor */
824b638df4SBjorn Andersson #define SMEM_HOST_APPS		0
834b638df4SBjorn Andersson 
84d52e4048SChris Lew /* Processor/host identifier for the global partition */
85d52e4048SChris Lew #define SMEM_GLOBAL_HOST	0xfffe
86d52e4048SChris Lew 
874b638df4SBjorn Andersson /* Max number of processors/hosts in a system */
88b302c64aSBartosz Golaszewski #define SMEM_HOST_COUNT		20
894b638df4SBjorn Andersson 
904b638df4SBjorn Andersson /**
914b638df4SBjorn Andersson   * struct smem_proc_comm - proc_comm communication struct (legacy)
924b638df4SBjorn Andersson   * @command:	current command to be executed
934b638df4SBjorn Andersson   * @status:	status of the currently requested command
944b638df4SBjorn Andersson   * @params:	parameters to the command
954b638df4SBjorn Andersson   */
964b638df4SBjorn Andersson struct smem_proc_comm {
979806884dSStephen Boyd 	__le32 command;
989806884dSStephen Boyd 	__le32 status;
999806884dSStephen Boyd 	__le32 params[2];
1004b638df4SBjorn Andersson };
1014b638df4SBjorn Andersson 
1024b638df4SBjorn Andersson /**
1034b638df4SBjorn Andersson  * struct smem_global_entry - entry to reference smem items on the heap
1044b638df4SBjorn Andersson  * @allocated:	boolean to indicate if this entry is used
1054b638df4SBjorn Andersson  * @offset:	offset to the allocated space
1064b638df4SBjorn Andersson  * @size:	size of the allocated space, 8 byte aligned
1074b638df4SBjorn Andersson  * @aux_base:	base address for the memory region used by this unit, or 0 for
1084b638df4SBjorn Andersson  *		the default region. bits 0,1 are reserved
1094b638df4SBjorn Andersson  */
1104b638df4SBjorn Andersson struct smem_global_entry {
1119806884dSStephen Boyd 	__le32 allocated;
1129806884dSStephen Boyd 	__le32 offset;
1139806884dSStephen Boyd 	__le32 size;
1149806884dSStephen Boyd 	__le32 aux_base; /* bits 1:0 reserved */
1154b638df4SBjorn Andersson };
1164b638df4SBjorn Andersson #define AUX_BASE_MASK		0xfffffffc
1174b638df4SBjorn Andersson 
1184b638df4SBjorn Andersson /**
1194b638df4SBjorn Andersson  * struct smem_header - header found in beginning of primary smem region
1204b638df4SBjorn Andersson  * @proc_comm:		proc_comm communication interface (legacy)
1214b638df4SBjorn Andersson  * @version:		array of versions for the various subsystems
1224b638df4SBjorn Andersson  * @initialized:	boolean to indicate that smem is initialized
1234b638df4SBjorn Andersson  * @free_offset:	index of the first unallocated byte in smem
1244b638df4SBjorn Andersson  * @available:		number of bytes available for allocation
1254b638df4SBjorn Andersson  * @reserved:		reserved field, must be 0
126fc3699c6SLee Jones  * @toc:		array of references to items
1274b638df4SBjorn Andersson  */
1284b638df4SBjorn Andersson struct smem_header {
1294b638df4SBjorn Andersson 	struct smem_proc_comm proc_comm[4];
1309806884dSStephen Boyd 	__le32 version[32];
1319806884dSStephen Boyd 	__le32 initialized;
1329806884dSStephen Boyd 	__le32 free_offset;
1339806884dSStephen Boyd 	__le32 available;
1349806884dSStephen Boyd 	__le32 reserved;
1354b638df4SBjorn Andersson 	struct smem_global_entry toc[SMEM_ITEM_COUNT];
1364b638df4SBjorn Andersson };
1374b638df4SBjorn Andersson 
1384b638df4SBjorn Andersson /**
1394b638df4SBjorn Andersson  * struct smem_ptable_entry - one entry in the @smem_ptable list
1404b638df4SBjorn Andersson  * @offset:	offset, within the main shared memory region, of the partition
1414b638df4SBjorn Andersson  * @size:	size of the partition
1424b638df4SBjorn Andersson  * @flags:	flags for the partition (currently unused)
1434b638df4SBjorn Andersson  * @host0:	first processor/host with access to this partition
1444b638df4SBjorn Andersson  * @host1:	second processor/host with access to this partition
145c7c1dc35SBjorn Andersson  * @cacheline:	alignment for "cached" entries
1464b638df4SBjorn Andersson  * @reserved:	reserved entries for later use
1474b638df4SBjorn Andersson  */
1484b638df4SBjorn Andersson struct smem_ptable_entry {
1499806884dSStephen Boyd 	__le32 offset;
1509806884dSStephen Boyd 	__le32 size;
1519806884dSStephen Boyd 	__le32 flags;
1529806884dSStephen Boyd 	__le16 host0;
1539806884dSStephen Boyd 	__le16 host1;
154c7c1dc35SBjorn Andersson 	__le32 cacheline;
155c7c1dc35SBjorn Andersson 	__le32 reserved[7];
1564b638df4SBjorn Andersson };
1574b638df4SBjorn Andersson 
1584b638df4SBjorn Andersson /**
1594b638df4SBjorn Andersson  * struct smem_ptable - partition table for the private partitions
1604b638df4SBjorn Andersson  * @magic:	magic number, must be SMEM_PTABLE_MAGIC
1614b638df4SBjorn Andersson  * @version:	version of the partition table
1624b638df4SBjorn Andersson  * @num_entries: number of partitions in the table
1634b638df4SBjorn Andersson  * @reserved:	for now reserved entries
1644b638df4SBjorn Andersson  * @entry:	list of @smem_ptable_entry for the @num_entries partitions
1654b638df4SBjorn Andersson  */
1664b638df4SBjorn Andersson struct smem_ptable {
1679806884dSStephen Boyd 	u8 magic[4];
1689806884dSStephen Boyd 	__le32 version;
1699806884dSStephen Boyd 	__le32 num_entries;
1709806884dSStephen Boyd 	__le32 reserved[5];
1714b638df4SBjorn Andersson 	struct smem_ptable_entry entry[];
1724b638df4SBjorn Andersson };
1739806884dSStephen Boyd 
1749806884dSStephen Boyd static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
1754b638df4SBjorn Andersson 
1764b638df4SBjorn Andersson /**
1774b638df4SBjorn Andersson  * struct smem_partition_header - header of the partitions
1784b638df4SBjorn Andersson  * @magic:	magic number, must be SMEM_PART_MAGIC
1794b638df4SBjorn Andersson  * @host0:	first processor/host with access to this partition
1804b638df4SBjorn Andersson  * @host1:	second processor/host with access to this partition
1814b638df4SBjorn Andersson  * @size:	size of the partition
1824b638df4SBjorn Andersson  * @offset_free_uncached: offset to the first free byte of uncached memory in
1834b638df4SBjorn Andersson  *		this partition
1844b638df4SBjorn Andersson  * @offset_free_cached: offset to the first free byte of cached memory in this
1854b638df4SBjorn Andersson  *		partition
1864b638df4SBjorn Andersson  * @reserved:	for now reserved entries
1874b638df4SBjorn Andersson  */
1884b638df4SBjorn Andersson struct smem_partition_header {
1899806884dSStephen Boyd 	u8 magic[4];
1909806884dSStephen Boyd 	__le16 host0;
1919806884dSStephen Boyd 	__le16 host1;
1929806884dSStephen Boyd 	__le32 size;
1939806884dSStephen Boyd 	__le32 offset_free_uncached;
1949806884dSStephen Boyd 	__le32 offset_free_cached;
1959806884dSStephen Boyd 	__le32 reserved[3];
1964b638df4SBjorn Andersson };
1979806884dSStephen Boyd 
19820bb6c9dSDeepak Kumar Singh /**
19920bb6c9dSDeepak Kumar Singh  * struct smem_partition - describes smem partition
20020bb6c9dSDeepak Kumar Singh  * @virt_base:	starting virtual address of partition
20120bb6c9dSDeepak Kumar Singh  * @phys_base:	starting physical address of partition
20220bb6c9dSDeepak Kumar Singh  * @cacheline:	alignment for "cached" entries
20320bb6c9dSDeepak Kumar Singh  * @size:	size of partition
20420bb6c9dSDeepak Kumar Singh  */
20520bb6c9dSDeepak Kumar Singh struct smem_partition {
20620bb6c9dSDeepak Kumar Singh 	void __iomem *virt_base;
20720bb6c9dSDeepak Kumar Singh 	phys_addr_t phys_base;
20820bb6c9dSDeepak Kumar Singh 	size_t cacheline;
20920bb6c9dSDeepak Kumar Singh 	size_t size;
21020bb6c9dSDeepak Kumar Singh };
21120bb6c9dSDeepak Kumar Singh 
2129806884dSStephen Boyd static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
2134b638df4SBjorn Andersson 
2144b638df4SBjorn Andersson /**
2154b638df4SBjorn Andersson  * struct smem_private_entry - header of each item in the private partition
2164b638df4SBjorn Andersson  * @canary:	magic number, must be SMEM_PRIVATE_CANARY
2174b638df4SBjorn Andersson  * @item:	identifying number of the smem item
2184b638df4SBjorn Andersson  * @size:	size of the data, including padding bytes
2194b638df4SBjorn Andersson  * @padding_data: number of bytes of padding of data
2204b638df4SBjorn Andersson  * @padding_hdr: number of bytes of padding between the header and the data
2214b638df4SBjorn Andersson  * @reserved:	for now reserved entry
2224b638df4SBjorn Andersson  */
2234b638df4SBjorn Andersson struct smem_private_entry {
2249806884dSStephen Boyd 	u16 canary; /* bytes are the same so no swapping needed */
2259806884dSStephen Boyd 	__le16 item;
2269806884dSStephen Boyd 	__le32 size; /* includes padding bytes */
2279806884dSStephen Boyd 	__le16 padding_data;
2289806884dSStephen Boyd 	__le16 padding_hdr;
2299806884dSStephen Boyd 	__le32 reserved;
2304b638df4SBjorn Andersson };
2314b638df4SBjorn Andersson #define SMEM_PRIVATE_CANARY	0xa5a5
2324b638df4SBjorn Andersson 
2334b638df4SBjorn Andersson /**
2345b394067SChris Lew  * struct smem_info - smem region info located after the table of contents
2355b394067SChris Lew  * @magic:	magic number, must be SMEM_INFO_MAGIC
2365b394067SChris Lew  * @size:	size of the smem region
2375b394067SChris Lew  * @base_addr:	base address of the smem region
2385b394067SChris Lew  * @reserved:	for now reserved entry
2395b394067SChris Lew  * @num_items:	highest accepted item number
2405b394067SChris Lew  */
2415b394067SChris Lew struct smem_info {
2425b394067SChris Lew 	u8 magic[4];
2435b394067SChris Lew 	__le32 size;
2445b394067SChris Lew 	__le32 base_addr;
2455b394067SChris Lew 	__le32 reserved;
2465b394067SChris Lew 	__le16 num_items;
2475b394067SChris Lew };
2485b394067SChris Lew 
2495b394067SChris Lew static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
2505b394067SChris Lew 
2515b394067SChris Lew /**
2524b638df4SBjorn Andersson  * struct smem_region - representation of a chunk of memory used for smem
2534b638df4SBjorn Andersson  * @aux_base:	identifier of aux_mem base
2544b638df4SBjorn Andersson  * @virt_base:	virtual base address of memory with this aux_mem identifier
2554b638df4SBjorn Andersson  * @size:	size of the memory region
2564b638df4SBjorn Andersson  */
2574b638df4SBjorn Andersson struct smem_region {
258b5af64fcSBjorn Andersson 	phys_addr_t aux_base;
2594b638df4SBjorn Andersson 	void __iomem *virt_base;
2604b638df4SBjorn Andersson 	size_t size;
2614b638df4SBjorn Andersson };
2624b638df4SBjorn Andersson 
2634b638df4SBjorn Andersson /**
2644b638df4SBjorn Andersson  * struct qcom_smem - device data for the smem device
2654b638df4SBjorn Andersson  * @dev:	device pointer
2664b638df4SBjorn Andersson  * @hwlock:	reference to a hwspinlock
26720bb6c9dSDeepak Kumar Singh  * @ptable: virtual base of partition table
26820bb6c9dSDeepak Kumar Singh  * @global_partition: describes for global partition when in use
26920bb6c9dSDeepak Kumar Singh  * @partitions: list of partitions of current processor/host
2705b394067SChris Lew  * @item_count: max accepted item number
271fc3699c6SLee Jones  * @socinfo:	platform device pointer
2724b638df4SBjorn Andersson  * @num_regions: number of @regions
2734b638df4SBjorn Andersson  * @regions:	list of the memory regions defining the shared memory
2744b638df4SBjorn Andersson  */
2754b638df4SBjorn Andersson struct qcom_smem {
2764b638df4SBjorn Andersson 	struct device *dev;
2774b638df4SBjorn Andersson 
2784b638df4SBjorn Andersson 	struct hwspinlock *hwlock;
2794b638df4SBjorn Andersson 
2805b394067SChris Lew 	u32 item_count;
281efb448d0SImran Khan 	struct platform_device *socinfo;
28220bb6c9dSDeepak Kumar Singh 	struct smem_ptable *ptable;
28320bb6c9dSDeepak Kumar Singh 	struct smem_partition global_partition;
28420bb6c9dSDeepak Kumar Singh 	struct smem_partition partitions[SMEM_HOST_COUNT];
2854b638df4SBjorn Andersson 
2864b638df4SBjorn Andersson 	unsigned num_regions;
2879f01b7a8SAlex Elder 	struct smem_region regions[];
2884b638df4SBjorn Andersson };
2894b638df4SBjorn Andersson 
290e221a1daSAlex Elder static void *
29101f14154SBjorn Andersson phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
2929806884dSStephen Boyd {
2939806884dSStephen Boyd 	void *p = phdr;
2949806884dSStephen Boyd 
2959806884dSStephen Boyd 	return p + le32_to_cpu(phdr->offset_free_uncached);
2969806884dSStephen Boyd }
2979806884dSStephen Boyd 
298e221a1daSAlex Elder static struct smem_private_entry *
299e221a1daSAlex Elder phdr_to_first_cached_entry(struct smem_partition_header *phdr,
300c7c1dc35SBjorn Andersson 					size_t cacheline)
301c7c1dc35SBjorn Andersson {
302c7c1dc35SBjorn Andersson 	void *p = phdr;
30370708749SAlex Elder 	struct smem_private_entry *e;
304c7c1dc35SBjorn Andersson 
30570708749SAlex Elder 	return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
306c7c1dc35SBjorn Andersson }
307c7c1dc35SBjorn Andersson 
308e221a1daSAlex Elder static void *
309e221a1daSAlex Elder phdr_to_last_cached_entry(struct smem_partition_header *phdr)
3109806884dSStephen Boyd {
3119806884dSStephen Boyd 	void *p = phdr;
3129806884dSStephen Boyd 
3139806884dSStephen Boyd 	return p + le32_to_cpu(phdr->offset_free_cached);
3149806884dSStephen Boyd }
3159806884dSStephen Boyd 
3169806884dSStephen Boyd static struct smem_private_entry *
31701f14154SBjorn Andersson phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
3189806884dSStephen Boyd {
3199806884dSStephen Boyd 	void *p = phdr;
3209806884dSStephen Boyd 
3219806884dSStephen Boyd 	return p + sizeof(*phdr);
3229806884dSStephen Boyd }
3239806884dSStephen Boyd 
3249806884dSStephen Boyd static struct smem_private_entry *
32501f14154SBjorn Andersson uncached_entry_next(struct smem_private_entry *e)
3269806884dSStephen Boyd {
3279806884dSStephen Boyd 	void *p = e;
3289806884dSStephen Boyd 
3299806884dSStephen Boyd 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
3309806884dSStephen Boyd 	       le32_to_cpu(e->size);
3319806884dSStephen Boyd }
3329806884dSStephen Boyd 
333c7c1dc35SBjorn Andersson static struct smem_private_entry *
334c7c1dc35SBjorn Andersson cached_entry_next(struct smem_private_entry *e, size_t cacheline)
335c7c1dc35SBjorn Andersson {
336c7c1dc35SBjorn Andersson 	void *p = e;
337c7c1dc35SBjorn Andersson 
338c7c1dc35SBjorn Andersson 	return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
339c7c1dc35SBjorn Andersson }
340c7c1dc35SBjorn Andersson 
34101f14154SBjorn Andersson static void *uncached_entry_to_item(struct smem_private_entry *e)
3429806884dSStephen Boyd {
3439806884dSStephen Boyd 	void *p = e;
3449806884dSStephen Boyd 
3459806884dSStephen Boyd 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
3469806884dSStephen Boyd }
3479806884dSStephen Boyd 
348c7c1dc35SBjorn Andersson static void *cached_entry_to_item(struct smem_private_entry *e)
349c7c1dc35SBjorn Andersson {
350c7c1dc35SBjorn Andersson 	void *p = e;
351c7c1dc35SBjorn Andersson 
352c7c1dc35SBjorn Andersson 	return p - le32_to_cpu(e->size);
353c7c1dc35SBjorn Andersson }
354c7c1dc35SBjorn Andersson 
3554b638df4SBjorn Andersson /* Pointer to the one and only smem handle */
3564b638df4SBjorn Andersson static struct qcom_smem *__smem;
3574b638df4SBjorn Andersson 
3584b638df4SBjorn Andersson /* Timeout (ms) for the trylock of remote spinlocks */
3594b638df4SBjorn Andersson #define HWSPINLOCK_TIMEOUT	1000
3604b638df4SBjorn Andersson 
3614b638df4SBjorn Andersson static int qcom_smem_alloc_private(struct qcom_smem *smem,
36220bb6c9dSDeepak Kumar Singh 				   struct smem_partition *part,
3634b638df4SBjorn Andersson 				   unsigned item,
3644b638df4SBjorn Andersson 				   size_t size)
3654b638df4SBjorn Andersson {
3669806884dSStephen Boyd 	struct smem_private_entry *hdr, *end;
36720bb6c9dSDeepak Kumar Singh 	struct smem_partition_header *phdr;
3684b638df4SBjorn Andersson 	size_t alloc_size;
3699806884dSStephen Boyd 	void *cached;
370f1383348SDeepak Kumar Singh 	void *p_end;
3714b638df4SBjorn Andersson 
37220bb6c9dSDeepak Kumar Singh 	phdr = (struct smem_partition_header __force *)part->virt_base;
373f1383348SDeepak Kumar Singh 	p_end = (void *)phdr + part->size;
37420bb6c9dSDeepak Kumar Singh 
37501f14154SBjorn Andersson 	hdr = phdr_to_first_uncached_entry(phdr);
37601f14154SBjorn Andersson 	end = phdr_to_last_uncached_entry(phdr);
37701f14154SBjorn Andersson 	cached = phdr_to_last_cached_entry(phdr);
3784b638df4SBjorn Andersson 
379f1383348SDeepak Kumar Singh 	if (WARN_ON((void *)end > p_end || cached > p_end))
380f1383348SDeepak Kumar Singh 		return -EINVAL;
381f1383348SDeepak Kumar Singh 
3829806884dSStephen Boyd 	while (hdr < end) {
38304a512feSAlex Elder 		if (hdr->canary != SMEM_PRIVATE_CANARY)
38404a512feSAlex Elder 			goto bad_canary;
3859806884dSStephen Boyd 		if (le16_to_cpu(hdr->item) == item)
3864b638df4SBjorn Andersson 			return -EEXIST;
3874b638df4SBjorn Andersson 
38801f14154SBjorn Andersson 		hdr = uncached_entry_next(hdr);
3894b638df4SBjorn Andersson 	}
3904b638df4SBjorn Andersson 
391f1383348SDeepak Kumar Singh 	if (WARN_ON((void *)hdr > p_end))
392f1383348SDeepak Kumar Singh 		return -EINVAL;
393f1383348SDeepak Kumar Singh 
3944b638df4SBjorn Andersson 	/* Check that we don't grow into the cached region */
3954b638df4SBjorn Andersson 	alloc_size = sizeof(*hdr) + ALIGN(size, 8);
3968377f818SAlex Elder 	if ((void *)hdr + alloc_size > cached) {
3974b638df4SBjorn Andersson 		dev_err(smem->dev, "Out of memory\n");
3984b638df4SBjorn Andersson 		return -ENOSPC;
3994b638df4SBjorn Andersson 	}
4004b638df4SBjorn Andersson 
4014b638df4SBjorn Andersson 	hdr->canary = SMEM_PRIVATE_CANARY;
4029806884dSStephen Boyd 	hdr->item = cpu_to_le16(item);
4039806884dSStephen Boyd 	hdr->size = cpu_to_le32(ALIGN(size, 8));
4049806884dSStephen Boyd 	hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
4054b638df4SBjorn Andersson 	hdr->padding_hdr = 0;
4064b638df4SBjorn Andersson 
4074b638df4SBjorn Andersson 	/*
4084b638df4SBjorn Andersson 	 * Ensure the header is written before we advance the free offset, so
4094b638df4SBjorn Andersson 	 * that remote processors that does not take the remote spinlock still
4104b638df4SBjorn Andersson 	 * gets a consistent view of the linked list.
4114b638df4SBjorn Andersson 	 */
4124b638df4SBjorn Andersson 	wmb();
4139806884dSStephen Boyd 	le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
4144b638df4SBjorn Andersson 
4154b638df4SBjorn Andersson 	return 0;
41604a512feSAlex Elder bad_canary:
41704a512feSAlex Elder 	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
41804a512feSAlex Elder 		le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
41904a512feSAlex Elder 
42004a512feSAlex Elder 	return -EINVAL;
4214b638df4SBjorn Andersson }
4224b638df4SBjorn Andersson 
4234b638df4SBjorn Andersson static int qcom_smem_alloc_global(struct qcom_smem *smem,
4244b638df4SBjorn Andersson 				  unsigned item,
4254b638df4SBjorn Andersson 				  size_t size)
4264b638df4SBjorn Andersson {
4274b638df4SBjorn Andersson 	struct smem_global_entry *entry;
428d52e4048SChris Lew 	struct smem_header *header;
4294b638df4SBjorn Andersson 
4304b638df4SBjorn Andersson 	header = smem->regions[0].virt_base;
4314b638df4SBjorn Andersson 	entry = &header->toc[item];
4324b638df4SBjorn Andersson 	if (entry->allocated)
4334b638df4SBjorn Andersson 		return -EEXIST;
4344b638df4SBjorn Andersson 
4354b638df4SBjorn Andersson 	size = ALIGN(size, 8);
4369806884dSStephen Boyd 	if (WARN_ON(size > le32_to_cpu(header->available)))
4374b638df4SBjorn Andersson 		return -ENOMEM;
4384b638df4SBjorn Andersson 
4394b638df4SBjorn Andersson 	entry->offset = header->free_offset;
4409806884dSStephen Boyd 	entry->size = cpu_to_le32(size);
4414b638df4SBjorn Andersson 
4424b638df4SBjorn Andersson 	/*
4434b638df4SBjorn Andersson 	 * Ensure the header is consistent before we mark the item allocated,
4444b638df4SBjorn Andersson 	 * so that remote processors will get a consistent view of the item
4454b638df4SBjorn Andersson 	 * even though they do not take the spinlock on read.
4464b638df4SBjorn Andersson 	 */
4474b638df4SBjorn Andersson 	wmb();
4489806884dSStephen Boyd 	entry->allocated = cpu_to_le32(1);
4494b638df4SBjorn Andersson 
4509806884dSStephen Boyd 	le32_add_cpu(&header->free_offset, size);
4519806884dSStephen Boyd 	le32_add_cpu(&header->available, -size);
4524b638df4SBjorn Andersson 
4534b638df4SBjorn Andersson 	return 0;
4544b638df4SBjorn Andersson }
4554b638df4SBjorn Andersson 
4564b638df4SBjorn Andersson /**
4574b638df4SBjorn Andersson  * qcom_smem_alloc() - allocate space for a smem item
4584b638df4SBjorn Andersson  * @host:	remote processor id, or -1
4594b638df4SBjorn Andersson  * @item:	smem item handle
4604b638df4SBjorn Andersson  * @size:	number of bytes to be allocated
4614b638df4SBjorn Andersson  *
4624b638df4SBjorn Andersson  * Allocate space for a given smem item of size @size, given that the item is
4634b638df4SBjorn Andersson  * not yet allocated.
4644b638df4SBjorn Andersson  */
4654b638df4SBjorn Andersson int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
4664b638df4SBjorn Andersson {
46720bb6c9dSDeepak Kumar Singh 	struct smem_partition *part;
4684b638df4SBjorn Andersson 	unsigned long flags;
4694b638df4SBjorn Andersson 	int ret;
4704b638df4SBjorn Andersson 
4714b638df4SBjorn Andersson 	if (!__smem)
4724b638df4SBjorn Andersson 		return -EPROBE_DEFER;
4734b638df4SBjorn Andersson 
4744b638df4SBjorn Andersson 	if (item < SMEM_ITEM_LAST_FIXED) {
4754b638df4SBjorn Andersson 		dev_err(__smem->dev,
4764b638df4SBjorn Andersson 			"Rejecting allocation of static entry %d\n", item);
4774b638df4SBjorn Andersson 		return -EINVAL;
4784b638df4SBjorn Andersson 	}
4794b638df4SBjorn Andersson 
4805b394067SChris Lew 	if (WARN_ON(item >= __smem->item_count))
4815b394067SChris Lew 		return -EINVAL;
4825b394067SChris Lew 
4834b638df4SBjorn Andersson 	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
4844b638df4SBjorn Andersson 					  HWSPINLOCK_TIMEOUT,
4854b638df4SBjorn Andersson 					  &flags);
4864b638df4SBjorn Andersson 	if (ret)
4874b638df4SBjorn Andersson 		return ret;
4884b638df4SBjorn Andersson 
48920bb6c9dSDeepak Kumar Singh 	if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
49020bb6c9dSDeepak Kumar Singh 		part = &__smem->partitions[host];
49120bb6c9dSDeepak Kumar Singh 		ret = qcom_smem_alloc_private(__smem, part, item, size);
49220bb6c9dSDeepak Kumar Singh 	} else if (__smem->global_partition.virt_base) {
49320bb6c9dSDeepak Kumar Singh 		part = &__smem->global_partition;
49420bb6c9dSDeepak Kumar Singh 		ret = qcom_smem_alloc_private(__smem, part, item, size);
495d52e4048SChris Lew 	} else {
4964b638df4SBjorn Andersson 		ret = qcom_smem_alloc_global(__smem, item, size);
497d52e4048SChris Lew 	}
4984b638df4SBjorn Andersson 
4994b638df4SBjorn Andersson 	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
5004b638df4SBjorn Andersson 
5014b638df4SBjorn Andersson 	return ret;
5024b638df4SBjorn Andersson }
503*10615007SRobert Marko EXPORT_SYMBOL_GPL(qcom_smem_alloc);
5044b638df4SBjorn Andersson 
5051a03964dSStephen Boyd static void *qcom_smem_get_global(struct qcom_smem *smem,
5064b638df4SBjorn Andersson 				  unsigned item,
5074b638df4SBjorn Andersson 				  size_t *size)
5084b638df4SBjorn Andersson {
5094b638df4SBjorn Andersson 	struct smem_header *header;
5109f01b7a8SAlex Elder 	struct smem_region *region;
5114b638df4SBjorn Andersson 	struct smem_global_entry *entry;
512f1383348SDeepak Kumar Singh 	u64 entry_offset;
513f1383348SDeepak Kumar Singh 	u32 e_size;
5144b638df4SBjorn Andersson 	u32 aux_base;
5154b638df4SBjorn Andersson 	unsigned i;
5164b638df4SBjorn Andersson 
5174b638df4SBjorn Andersson 	header = smem->regions[0].virt_base;
5184b638df4SBjorn Andersson 	entry = &header->toc[item];
5194b638df4SBjorn Andersson 	if (!entry->allocated)
5201a03964dSStephen Boyd 		return ERR_PTR(-ENXIO);
5214b638df4SBjorn Andersson 
5229806884dSStephen Boyd 	aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
5234b638df4SBjorn Andersson 
5244b638df4SBjorn Andersson 	for (i = 0; i < smem->num_regions; i++) {
5259f01b7a8SAlex Elder 		region = &smem->regions[i];
5264b638df4SBjorn Andersson 
527b5af64fcSBjorn Andersson 		if ((u32)region->aux_base == aux_base || !aux_base) {
528f1383348SDeepak Kumar Singh 			e_size = le32_to_cpu(entry->size);
529f1383348SDeepak Kumar Singh 			entry_offset = le32_to_cpu(entry->offset);
530f1383348SDeepak Kumar Singh 
531f1383348SDeepak Kumar Singh 			if (WARN_ON(e_size + entry_offset > region->size))
532f1383348SDeepak Kumar Singh 				return ERR_PTR(-EINVAL);
533f1383348SDeepak Kumar Singh 
5344b638df4SBjorn Andersson 			if (size != NULL)
535f1383348SDeepak Kumar Singh 				*size = e_size;
536f1383348SDeepak Kumar Singh 
537f1383348SDeepak Kumar Singh 			return region->virt_base + entry_offset;
5381a03964dSStephen Boyd 		}
5394b638df4SBjorn Andersson 	}
5404b638df4SBjorn Andersson 
5411a03964dSStephen Boyd 	return ERR_PTR(-ENOENT);
5421a03964dSStephen Boyd }
5431a03964dSStephen Boyd 
5441a03964dSStephen Boyd static void *qcom_smem_get_private(struct qcom_smem *smem,
54520bb6c9dSDeepak Kumar Singh 				   struct smem_partition *part,
5464b638df4SBjorn Andersson 				   unsigned item,
5474b638df4SBjorn Andersson 				   size_t *size)
5484b638df4SBjorn Andersson {
5499806884dSStephen Boyd 	struct smem_private_entry *e, *end;
55020bb6c9dSDeepak Kumar Singh 	struct smem_partition_header *phdr;
551f1383348SDeepak Kumar Singh 	void *item_ptr, *p_end;
552f1383348SDeepak Kumar Singh 	u32 padding_data;
553f1383348SDeepak Kumar Singh 	u32 e_size;
55420bb6c9dSDeepak Kumar Singh 
55520bb6c9dSDeepak Kumar Singh 	phdr = (struct smem_partition_header __force *)part->virt_base;
556f1383348SDeepak Kumar Singh 	p_end = (void *)phdr + part->size;
557c7c1dc35SBjorn Andersson 
55801f14154SBjorn Andersson 	e = phdr_to_first_uncached_entry(phdr);
55901f14154SBjorn Andersson 	end = phdr_to_last_uncached_entry(phdr);
5604b638df4SBjorn Andersson 
5619806884dSStephen Boyd 	while (e < end) {
562c7c1dc35SBjorn Andersson 		if (e->canary != SMEM_PRIVATE_CANARY)
563c7c1dc35SBjorn Andersson 			goto invalid_canary;
5644b638df4SBjorn Andersson 
5659806884dSStephen Boyd 		if (le16_to_cpu(e->item) == item) {
566f1383348SDeepak Kumar Singh 			if (size != NULL) {
567f1383348SDeepak Kumar Singh 				e_size = le32_to_cpu(e->size);
568f1383348SDeepak Kumar Singh 				padding_data = le16_to_cpu(e->padding_data);
5694b638df4SBjorn Andersson 
570f1383348SDeepak Kumar Singh 				if (WARN_ON(e_size > part->size || padding_data > e_size))
571f1383348SDeepak Kumar Singh 					return ERR_PTR(-EINVAL);
572f1383348SDeepak Kumar Singh 
573f1383348SDeepak Kumar Singh 				*size = e_size - padding_data;
574f1383348SDeepak Kumar Singh 			}
575f1383348SDeepak Kumar Singh 
576f1383348SDeepak Kumar Singh 			item_ptr = uncached_entry_to_item(e);
577f1383348SDeepak Kumar Singh 			if (WARN_ON(item_ptr > p_end))
578f1383348SDeepak Kumar Singh 				return ERR_PTR(-EINVAL);
579f1383348SDeepak Kumar Singh 
580f1383348SDeepak Kumar Singh 			return item_ptr;
5814b638df4SBjorn Andersson 		}
5824b638df4SBjorn Andersson 
58301f14154SBjorn Andersson 		e = uncached_entry_next(e);
5844b638df4SBjorn Andersson 	}
5854b638df4SBjorn Andersson 
586f1383348SDeepak Kumar Singh 	if (WARN_ON((void *)e > p_end))
587f1383348SDeepak Kumar Singh 		return ERR_PTR(-EINVAL);
588f1383348SDeepak Kumar Singh 
589c7c1dc35SBjorn Andersson 	/* Item was not found in the uncached list, search the cached list */
590c7c1dc35SBjorn Andersson 
59120bb6c9dSDeepak Kumar Singh 	e = phdr_to_first_cached_entry(phdr, part->cacheline);
592c7c1dc35SBjorn Andersson 	end = phdr_to_last_cached_entry(phdr);
593c7c1dc35SBjorn Andersson 
594f1383348SDeepak Kumar Singh 	if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end))
595f1383348SDeepak Kumar Singh 		return ERR_PTR(-EINVAL);
596f1383348SDeepak Kumar Singh 
597c7c1dc35SBjorn Andersson 	while (e > end) {
598c7c1dc35SBjorn Andersson 		if (e->canary != SMEM_PRIVATE_CANARY)
599c7c1dc35SBjorn Andersson 			goto invalid_canary;
600c7c1dc35SBjorn Andersson 
601c7c1dc35SBjorn Andersson 		if (le16_to_cpu(e->item) == item) {
602f1383348SDeepak Kumar Singh 			if (size != NULL) {
603f1383348SDeepak Kumar Singh 				e_size = le32_to_cpu(e->size);
604f1383348SDeepak Kumar Singh 				padding_data = le16_to_cpu(e->padding_data);
605c7c1dc35SBjorn Andersson 
606f1383348SDeepak Kumar Singh 				if (WARN_ON(e_size > part->size || padding_data > e_size))
607f1383348SDeepak Kumar Singh 					return ERR_PTR(-EINVAL);
608f1383348SDeepak Kumar Singh 
609f1383348SDeepak Kumar Singh 				*size = e_size - padding_data;
610f1383348SDeepak Kumar Singh 			}
611f1383348SDeepak Kumar Singh 
612f1383348SDeepak Kumar Singh 			item_ptr = cached_entry_to_item(e);
613f1383348SDeepak Kumar Singh 			if (WARN_ON(item_ptr < (void *)phdr))
614f1383348SDeepak Kumar Singh 				return ERR_PTR(-EINVAL);
615f1383348SDeepak Kumar Singh 
616f1383348SDeepak Kumar Singh 			return item_ptr;
617c7c1dc35SBjorn Andersson 		}
618c7c1dc35SBjorn Andersson 
61920bb6c9dSDeepak Kumar Singh 		e = cached_entry_next(e, part->cacheline);
620c7c1dc35SBjorn Andersson 	}
621c7c1dc35SBjorn Andersson 
622f1383348SDeepak Kumar Singh 	if (WARN_ON((void *)e < (void *)phdr))
623f1383348SDeepak Kumar Singh 		return ERR_PTR(-EINVAL);
624f1383348SDeepak Kumar Singh 
6251a03964dSStephen Boyd 	return ERR_PTR(-ENOENT);
626c7c1dc35SBjorn Andersson 
627c7c1dc35SBjorn Andersson invalid_canary:
62804a512feSAlex Elder 	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
62904a512feSAlex Elder 			le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
630c7c1dc35SBjorn Andersson 
631c7c1dc35SBjorn Andersson 	return ERR_PTR(-EINVAL);
6324b638df4SBjorn Andersson }
6334b638df4SBjorn Andersson 
6344b638df4SBjorn Andersson /**
6354b638df4SBjorn Andersson  * qcom_smem_get() - resolve ptr of size of a smem item
6364b638df4SBjorn Andersson  * @host:	the remote processor, or -1
6374b638df4SBjorn Andersson  * @item:	smem item handle
6384b638df4SBjorn Andersson  * @size:	pointer to be filled out with size of the item
6394b638df4SBjorn Andersson  *
6401a03964dSStephen Boyd  * Looks up smem item and returns pointer to it. Size of smem
6411a03964dSStephen Boyd  * item is returned in @size.
6424b638df4SBjorn Andersson  */
6431a03964dSStephen Boyd void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
6444b638df4SBjorn Andersson {
64520bb6c9dSDeepak Kumar Singh 	struct smem_partition *part;
6464b638df4SBjorn Andersson 	unsigned long flags;
6474b638df4SBjorn Andersson 	int ret;
6481a03964dSStephen Boyd 	void *ptr = ERR_PTR(-EPROBE_DEFER);
6494b638df4SBjorn Andersson 
6504b638df4SBjorn Andersson 	if (!__smem)
6511a03964dSStephen Boyd 		return ptr;
6524b638df4SBjorn Andersson 
6535b394067SChris Lew 	if (WARN_ON(item >= __smem->item_count))
6545b394067SChris Lew 		return ERR_PTR(-EINVAL);
6555b394067SChris Lew 
6564b638df4SBjorn Andersson 	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
6574b638df4SBjorn Andersson 					  HWSPINLOCK_TIMEOUT,
6584b638df4SBjorn Andersson 					  &flags);
6594b638df4SBjorn Andersson 	if (ret)
6601a03964dSStephen Boyd 		return ERR_PTR(ret);
6614b638df4SBjorn Andersson 
66220bb6c9dSDeepak Kumar Singh 	if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
66320bb6c9dSDeepak Kumar Singh 		part = &__smem->partitions[host];
66420bb6c9dSDeepak Kumar Singh 		ptr = qcom_smem_get_private(__smem, part, item, size);
66520bb6c9dSDeepak Kumar Singh 	} else if (__smem->global_partition.virt_base) {
66620bb6c9dSDeepak Kumar Singh 		part = &__smem->global_partition;
66720bb6c9dSDeepak Kumar Singh 		ptr = qcom_smem_get_private(__smem, part, item, size);
668d52e4048SChris Lew 	} else {
6691a03964dSStephen Boyd 		ptr = qcom_smem_get_global(__smem, item, size);
670d52e4048SChris Lew 	}
6714b638df4SBjorn Andersson 
6724b638df4SBjorn Andersson 	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
6731a03964dSStephen Boyd 
6741a03964dSStephen Boyd 	return ptr;
6754b638df4SBjorn Andersson 
6764b638df4SBjorn Andersson }
677*10615007SRobert Marko EXPORT_SYMBOL_GPL(qcom_smem_get);
6784b638df4SBjorn Andersson 
6794b638df4SBjorn Andersson /**
6804b638df4SBjorn Andersson  * qcom_smem_get_free_space() - retrieve amount of free space in a partition
6814b638df4SBjorn Andersson  * @host:	the remote processor identifying a partition, or -1
6824b638df4SBjorn Andersson  *
6834b638df4SBjorn Andersson  * To be used by smem clients as a quick way to determine if any new
6844b638df4SBjorn Andersson  * allocations has been made.
6854b638df4SBjorn Andersson  */
6864b638df4SBjorn Andersson int qcom_smem_get_free_space(unsigned host)
6874b638df4SBjorn Andersson {
68820bb6c9dSDeepak Kumar Singh 	struct smem_partition *part;
6894b638df4SBjorn Andersson 	struct smem_partition_header *phdr;
6904b638df4SBjorn Andersson 	struct smem_header *header;
6914b638df4SBjorn Andersson 	unsigned ret;
6924b638df4SBjorn Andersson 
6934b638df4SBjorn Andersson 	if (!__smem)
6944b638df4SBjorn Andersson 		return -EPROBE_DEFER;
6954b638df4SBjorn Andersson 
69620bb6c9dSDeepak Kumar Singh 	if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
69720bb6c9dSDeepak Kumar Singh 		part = &__smem->partitions[host];
69820bb6c9dSDeepak Kumar Singh 		phdr = part->virt_base;
6999806884dSStephen Boyd 		ret = le32_to_cpu(phdr->offset_free_cached) -
7009806884dSStephen Boyd 		      le32_to_cpu(phdr->offset_free_uncached);
701f1383348SDeepak Kumar Singh 
702f1383348SDeepak Kumar Singh 		if (ret > le32_to_cpu(part->size))
703f1383348SDeepak Kumar Singh 			return -EINVAL;
70420bb6c9dSDeepak Kumar Singh 	} else if (__smem->global_partition.virt_base) {
70520bb6c9dSDeepak Kumar Singh 		part = &__smem->global_partition;
70620bb6c9dSDeepak Kumar Singh 		phdr = part->virt_base;
707d52e4048SChris Lew 		ret = le32_to_cpu(phdr->offset_free_cached) -
708d52e4048SChris Lew 		      le32_to_cpu(phdr->offset_free_uncached);
709f1383348SDeepak Kumar Singh 
710f1383348SDeepak Kumar Singh 		if (ret > le32_to_cpu(part->size))
711f1383348SDeepak Kumar Singh 			return -EINVAL;
7124b638df4SBjorn Andersson 	} else {
7134b638df4SBjorn Andersson 		header = __smem->regions[0].virt_base;
7149806884dSStephen Boyd 		ret = le32_to_cpu(header->available);
715f1383348SDeepak Kumar Singh 
716f1383348SDeepak Kumar Singh 		if (ret > __smem->regions[0].size)
717f1383348SDeepak Kumar Singh 			return -EINVAL;
7184b638df4SBjorn Andersson 	}
7194b638df4SBjorn Andersson 
7204b638df4SBjorn Andersson 	return ret;
7214b638df4SBjorn Andersson }
722*10615007SRobert Marko EXPORT_SYMBOL_GPL(qcom_smem_get_free_space);
7234b638df4SBjorn Andersson 
72420bb6c9dSDeepak Kumar Singh static bool addr_in_range(void __iomem *base, size_t size, void *addr)
72520bb6c9dSDeepak Kumar Singh {
72620bb6c9dSDeepak Kumar Singh 	return base && (addr >= base && addr < base + size);
72720bb6c9dSDeepak Kumar Singh }
72820bb6c9dSDeepak Kumar Singh 
7296d361c1dSAlex Elder /**
7306d361c1dSAlex Elder  * qcom_smem_virt_to_phys() - return the physical address associated
7316d361c1dSAlex Elder  * with an smem item pointer (previously returned by qcom_smem_get()
7326d361c1dSAlex Elder  * @p:	the virtual address to convert
7336d361c1dSAlex Elder  *
7346d361c1dSAlex Elder  * Returns 0 if the pointer provided is not within any smem region.
7356d361c1dSAlex Elder  */
7366d361c1dSAlex Elder phys_addr_t qcom_smem_virt_to_phys(void *p)
7376d361c1dSAlex Elder {
73820bb6c9dSDeepak Kumar Singh 	struct smem_partition *part;
73920bb6c9dSDeepak Kumar Singh 	struct smem_region *area;
74020bb6c9dSDeepak Kumar Singh 	u64 offset;
74120bb6c9dSDeepak Kumar Singh 	u32 i;
74220bb6c9dSDeepak Kumar Singh 
74320bb6c9dSDeepak Kumar Singh 	for (i = 0; i < SMEM_HOST_COUNT; i++) {
74420bb6c9dSDeepak Kumar Singh 		part = &__smem->partitions[i];
74520bb6c9dSDeepak Kumar Singh 
74620bb6c9dSDeepak Kumar Singh 		if (addr_in_range(part->virt_base, part->size, p)) {
74720bb6c9dSDeepak Kumar Singh 			offset = p - part->virt_base;
74820bb6c9dSDeepak Kumar Singh 
74920bb6c9dSDeepak Kumar Singh 			return (phys_addr_t)part->phys_base + offset;
75020bb6c9dSDeepak Kumar Singh 		}
75120bb6c9dSDeepak Kumar Singh 	}
75220bb6c9dSDeepak Kumar Singh 
75320bb6c9dSDeepak Kumar Singh 	part = &__smem->global_partition;
75420bb6c9dSDeepak Kumar Singh 
75520bb6c9dSDeepak Kumar Singh 	if (addr_in_range(part->virt_base, part->size, p)) {
75620bb6c9dSDeepak Kumar Singh 		offset = p - part->virt_base;
75720bb6c9dSDeepak Kumar Singh 
75820bb6c9dSDeepak Kumar Singh 		return (phys_addr_t)part->phys_base + offset;
75920bb6c9dSDeepak Kumar Singh 	}
7606d361c1dSAlex Elder 
7616d361c1dSAlex Elder 	for (i = 0; i < __smem->num_regions; i++) {
76220bb6c9dSDeepak Kumar Singh 		area = &__smem->regions[i];
7636d361c1dSAlex Elder 
76420bb6c9dSDeepak Kumar Singh 		if (addr_in_range(area->virt_base, area->size, p)) {
76520bb6c9dSDeepak Kumar Singh 			offset = p - area->virt_base;
7666d361c1dSAlex Elder 
76720bb6c9dSDeepak Kumar Singh 			return (phys_addr_t)area->aux_base + offset;
7686d361c1dSAlex Elder 		}
7696d361c1dSAlex Elder 	}
7706d361c1dSAlex Elder 
7716d361c1dSAlex Elder 	return 0;
7726d361c1dSAlex Elder }
773*10615007SRobert Marko EXPORT_SYMBOL_GPL(qcom_smem_virt_to_phys);
7746d361c1dSAlex Elder 
7754b638df4SBjorn Andersson static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
7764b638df4SBjorn Andersson {
777dcc0967dSChris Lew 	struct smem_header *header;
7789806884dSStephen Boyd 	__le32 *versions;
7794b638df4SBjorn Andersson 
780dcc0967dSChris Lew 	header = smem->regions[0].virt_base;
781dcc0967dSChris Lew 	versions = header->version;
7824b638df4SBjorn Andersson 
7839806884dSStephen Boyd 	return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
7844b638df4SBjorn Andersson }
7854b638df4SBjorn Andersson 
786d52e4048SChris Lew static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
7874b638df4SBjorn Andersson {
7884b638df4SBjorn Andersson 	struct smem_ptable *ptable;
789d52e4048SChris Lew 	u32 version;
7904b638df4SBjorn Andersson 
79120bb6c9dSDeepak Kumar Singh 	ptable = smem->ptable;
7929806884dSStephen Boyd 	if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
793d52e4048SChris Lew 		return ERR_PTR(-ENOENT);
7944b638df4SBjorn Andersson 
7959806884dSStephen Boyd 	version = le32_to_cpu(ptable->version);
7969806884dSStephen Boyd 	if (version != 1) {
7974b638df4SBjorn Andersson 		dev_err(smem->dev,
7989806884dSStephen Boyd 			"Unsupported partition header version %d\n", version);
799d52e4048SChris Lew 		return ERR_PTR(-EINVAL);
800d52e4048SChris Lew 	}
801d52e4048SChris Lew 	return ptable;
802d52e4048SChris Lew }
803d52e4048SChris Lew 
8045b394067SChris Lew static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
8055b394067SChris Lew {
8065b394067SChris Lew 	struct smem_ptable *ptable;
8075b394067SChris Lew 	struct smem_info *info;
8085b394067SChris Lew 
8095b394067SChris Lew 	ptable = qcom_smem_get_ptable(smem);
8105b394067SChris Lew 	if (IS_ERR_OR_NULL(ptable))
8115b394067SChris Lew 		return SMEM_ITEM_COUNT;
8125b394067SChris Lew 
8135b394067SChris Lew 	info = (struct smem_info *)&ptable->entry[ptable->num_entries];
8145b394067SChris Lew 	if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
8155b394067SChris Lew 		return SMEM_ITEM_COUNT;
8165b394067SChris Lew 
8175b394067SChris Lew 	return le16_to_cpu(info->num_items);
8185b394067SChris Lew }
8195b394067SChris Lew 
820ada79289SAlex Elder /*
821ada79289SAlex Elder  * Validate the partition header for a partition whose partition
822ada79289SAlex Elder  * table entry is supplied.  Returns a pointer to its header if
823ada79289SAlex Elder  * valid, or a null pointer otherwise.
824ada79289SAlex Elder  */
825ada79289SAlex Elder static struct smem_partition_header *
826ada79289SAlex Elder qcom_smem_partition_header(struct qcom_smem *smem,
8277d019344SAlex Elder 		struct smem_ptable_entry *entry, u16 host0, u16 host1)
828ada79289SAlex Elder {
829ada79289SAlex Elder 	struct smem_partition_header *header;
83020bb6c9dSDeepak Kumar Singh 	u32 phys_addr;
831190b216cSAlex Elder 	u32 size;
832ada79289SAlex Elder 
83320bb6c9dSDeepak Kumar Singh 	phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset);
83420bb6c9dSDeepak Kumar Singh 	header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size));
83520bb6c9dSDeepak Kumar Singh 
83620bb6c9dSDeepak Kumar Singh 	if (!header)
83720bb6c9dSDeepak Kumar Singh 		return NULL;
838ada79289SAlex Elder 
839ada79289SAlex Elder 	if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
8401b3df368SAndy Shevchenko 		dev_err(smem->dev, "bad partition magic %4ph\n", header->magic);
841ada79289SAlex Elder 		return NULL;
842ada79289SAlex Elder 	}
843ada79289SAlex Elder 
8447d019344SAlex Elder 	if (host0 != le16_to_cpu(header->host0)) {
8457d019344SAlex Elder 		dev_err(smem->dev, "bad host0 (%hu != %hu)\n",
8467d019344SAlex Elder 				host0, le16_to_cpu(header->host0));
8477d019344SAlex Elder 		return NULL;
8487d019344SAlex Elder 	}
8497d019344SAlex Elder 	if (host1 != le16_to_cpu(header->host1)) {
8507d019344SAlex Elder 		dev_err(smem->dev, "bad host1 (%hu != %hu)\n",
8517d019344SAlex Elder 				host1, le16_to_cpu(header->host1));
8527d019344SAlex Elder 		return NULL;
8537d019344SAlex Elder 	}
8547d019344SAlex Elder 
855190b216cSAlex Elder 	size = le32_to_cpu(header->size);
856190b216cSAlex Elder 	if (size != le32_to_cpu(entry->size)) {
857190b216cSAlex Elder 		dev_err(smem->dev, "bad partition size (%u != %u)\n",
858190b216cSAlex Elder 			size, le32_to_cpu(entry->size));
859190b216cSAlex Elder 		return NULL;
860190b216cSAlex Elder 	}
861190b216cSAlex Elder 
862380dc4afSAlex Elder 	if (le32_to_cpu(header->offset_free_uncached) > size) {
863380dc4afSAlex Elder 		dev_err(smem->dev, "bad partition free uncached (%u > %u)\n",
864380dc4afSAlex Elder 			le32_to_cpu(header->offset_free_uncached), size);
865380dc4afSAlex Elder 		return NULL;
866380dc4afSAlex Elder 	}
867380dc4afSAlex Elder 
868ada79289SAlex Elder 	return header;
869ada79289SAlex Elder }
870ada79289SAlex Elder 
871d52e4048SChris Lew static int qcom_smem_set_global_partition(struct qcom_smem *smem)
872d52e4048SChris Lew {
873d52e4048SChris Lew 	struct smem_partition_header *header;
8748fa1a214SAlex Elder 	struct smem_ptable_entry *entry;
875d52e4048SChris Lew 	struct smem_ptable *ptable;
8768fa1a214SAlex Elder 	bool found = false;
877d52e4048SChris Lew 	int i;
878d52e4048SChris Lew 
87920bb6c9dSDeepak Kumar Singh 	if (smem->global_partition.virt_base) {
8800b65c59eSBjorn Andersson 		dev_err(smem->dev, "Already found the global partition\n");
8810b65c59eSBjorn Andersson 		return -EINVAL;
8820b65c59eSBjorn Andersson 	}
8830b65c59eSBjorn Andersson 
884d52e4048SChris Lew 	ptable = qcom_smem_get_ptable(smem);
885d52e4048SChris Lew 	if (IS_ERR(ptable))
886d52e4048SChris Lew 		return PTR_ERR(ptable);
887d52e4048SChris Lew 
888d52e4048SChris Lew 	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
889d52e4048SChris Lew 		entry = &ptable->entry[i];
890eba75702SAlex Elder 		if (!le32_to_cpu(entry->offset))
891eba75702SAlex Elder 			continue;
892eba75702SAlex Elder 		if (!le32_to_cpu(entry->size))
893eba75702SAlex Elder 			continue;
894eba75702SAlex Elder 
89533fdbc4eSAlex Elder 		if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST)
89633fdbc4eSAlex Elder 			continue;
89733fdbc4eSAlex Elder 
89833fdbc4eSAlex Elder 		if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) {
8998fa1a214SAlex Elder 			found = true;
900d52e4048SChris Lew 			break;
901d52e4048SChris Lew 		}
9028fa1a214SAlex Elder 	}
903d52e4048SChris Lew 
9048fa1a214SAlex Elder 	if (!found) {
905d52e4048SChris Lew 		dev_err(smem->dev, "Missing entry for global partition\n");
9064b638df4SBjorn Andersson 		return -EINVAL;
9074b638df4SBjorn Andersson 	}
9084b638df4SBjorn Andersson 
9097d019344SAlex Elder 	header = qcom_smem_partition_header(smem, entry,
9107d019344SAlex Elder 				SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST);
911ada79289SAlex Elder 	if (!header)
912ada79289SAlex Elder 		return -EINVAL;
913ada79289SAlex Elder 
91420bb6c9dSDeepak Kumar Singh 	smem->global_partition.virt_base = (void __iomem *)header;
91520bb6c9dSDeepak Kumar Singh 	smem->global_partition.phys_base = smem->regions[0].aux_base +
91620bb6c9dSDeepak Kumar Singh 								le32_to_cpu(entry->offset);
91720bb6c9dSDeepak Kumar Singh 	smem->global_partition.size = le32_to_cpu(entry->size);
91820bb6c9dSDeepak Kumar Singh 	smem->global_partition.cacheline = le32_to_cpu(entry->cacheline);
919d52e4048SChris Lew 
920d52e4048SChris Lew 	return 0;
921d52e4048SChris Lew }
922d52e4048SChris Lew 
92313a920aeSAlex Elder static int
92413a920aeSAlex Elder qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
925d52e4048SChris Lew {
926d52e4048SChris Lew 	struct smem_partition_header *header;
927d52e4048SChris Lew 	struct smem_ptable_entry *entry;
928d52e4048SChris Lew 	struct smem_ptable *ptable;
9290ec29ccfSBill Wendling 	u16 remote_host;
93013a920aeSAlex Elder 	u16 host0, host1;
931d52e4048SChris Lew 	int i;
932d52e4048SChris Lew 
933d52e4048SChris Lew 	ptable = qcom_smem_get_ptable(smem);
934d52e4048SChris Lew 	if (IS_ERR(ptable))
935d52e4048SChris Lew 		return PTR_ERR(ptable);
936d52e4048SChris Lew 
9379806884dSStephen Boyd 	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
9384b638df4SBjorn Andersson 		entry = &ptable->entry[i];
9399806884dSStephen Boyd 		if (!le32_to_cpu(entry->offset))
9404b638df4SBjorn Andersson 			continue;
9419806884dSStephen Boyd 		if (!le32_to_cpu(entry->size))
9424b638df4SBjorn Andersson 			continue;
9434b638df4SBjorn Andersson 
944eba75702SAlex Elder 		host0 = le16_to_cpu(entry->host0);
945eba75702SAlex Elder 		host1 = le16_to_cpu(entry->host1);
9469806884dSStephen Boyd 		if (host0 == local_host)
9479806884dSStephen Boyd 			remote_host = host1;
948eb68cf09SAlex Elder 		else if (host1 == local_host)
9499806884dSStephen Boyd 			remote_host = host0;
950eb68cf09SAlex Elder 		else
951eb68cf09SAlex Elder 			continue;
9524b638df4SBjorn Andersson 
9534b638df4SBjorn Andersson 		if (remote_host >= SMEM_HOST_COUNT) {
9540ec29ccfSBill Wendling 			dev_err(smem->dev, "bad host %u\n", remote_host);
9554b638df4SBjorn Andersson 			return -EINVAL;
9564b638df4SBjorn Andersson 		}
9574b638df4SBjorn Andersson 
95820bb6c9dSDeepak Kumar Singh 		if (smem->partitions[remote_host].virt_base) {
9590ec29ccfSBill Wendling 			dev_err(smem->dev, "duplicate host %u\n", remote_host);
9604b638df4SBjorn Andersson 			return -EINVAL;
9614b638df4SBjorn Andersson 		}
9624b638df4SBjorn Andersson 
9637d019344SAlex Elder 		header = qcom_smem_partition_header(smem, entry, host0, host1);
964ada79289SAlex Elder 		if (!header)
965ada79289SAlex Elder 			return -EINVAL;
966ada79289SAlex Elder 
96720bb6c9dSDeepak Kumar Singh 		smem->partitions[remote_host].virt_base = (void __iomem *)header;
96820bb6c9dSDeepak Kumar Singh 		smem->partitions[remote_host].phys_base = smem->regions[0].aux_base +
96920bb6c9dSDeepak Kumar Singh 										le32_to_cpu(entry->offset);
97020bb6c9dSDeepak Kumar Singh 		smem->partitions[remote_host].size = le32_to_cpu(entry->size);
97120bb6c9dSDeepak Kumar Singh 		smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline);
9724b638df4SBjorn Andersson 	}
9734b638df4SBjorn Andersson 
9744b638df4SBjorn Andersson 	return 0;
9754b638df4SBjorn Andersson }
9764b638df4SBjorn Andersson 
97720bb6c9dSDeepak Kumar Singh static int qcom_smem_map_toc(struct qcom_smem *smem, struct smem_region *region)
97820bb6c9dSDeepak Kumar Singh {
97920bb6c9dSDeepak Kumar Singh 	u32 ptable_start;
98020bb6c9dSDeepak Kumar Singh 
98120bb6c9dSDeepak Kumar Singh 	/* map starting 4K for smem header */
98220bb6c9dSDeepak Kumar Singh 	region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K);
98320bb6c9dSDeepak Kumar Singh 	ptable_start = region->aux_base + region->size - SZ_4K;
98420bb6c9dSDeepak Kumar Singh 	/* map last 4k for toc */
98520bb6c9dSDeepak Kumar Singh 	smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K);
98620bb6c9dSDeepak Kumar Singh 
98720bb6c9dSDeepak Kumar Singh 	if (!region->virt_base || !smem->ptable)
98820bb6c9dSDeepak Kumar Singh 		return -ENOMEM;
98920bb6c9dSDeepak Kumar Singh 
99020bb6c9dSDeepak Kumar Singh 	return 0;
99120bb6c9dSDeepak Kumar Singh }
99220bb6c9dSDeepak Kumar Singh 
99320bb6c9dSDeepak Kumar Singh static int qcom_smem_map_global(struct qcom_smem *smem, u32 size)
99420bb6c9dSDeepak Kumar Singh {
99520bb6c9dSDeepak Kumar Singh 	u32 phys_addr;
99620bb6c9dSDeepak Kumar Singh 
99720bb6c9dSDeepak Kumar Singh 	phys_addr = smem->regions[0].aux_base;
99820bb6c9dSDeepak Kumar Singh 
99920bb6c9dSDeepak Kumar Singh 	smem->regions[0].size = size;
100020bb6c9dSDeepak Kumar Singh 	smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size);
100120bb6c9dSDeepak Kumar Singh 
100220bb6c9dSDeepak Kumar Singh 	if (!smem->regions[0].virt_base)
100320bb6c9dSDeepak Kumar Singh 		return -ENOMEM;
100420bb6c9dSDeepak Kumar Singh 
100520bb6c9dSDeepak Kumar Singh 	return 0;
100620bb6c9dSDeepak Kumar Singh }
100720bb6c9dSDeepak Kumar Singh 
1008b5af64fcSBjorn Andersson static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name,
1009b5af64fcSBjorn Andersson 				 struct smem_region *region)
10104b638df4SBjorn Andersson {
1011b5af64fcSBjorn Andersson 	struct device *dev = smem->dev;
1012d0bfd7c9SStephen Boyd 	struct device_node *np;
1013d0bfd7c9SStephen Boyd 	struct resource r;
1014d0bfd7c9SStephen Boyd 	int ret;
10154b638df4SBjorn Andersson 
1016d0bfd7c9SStephen Boyd 	np = of_parse_phandle(dev->of_node, name, 0);
1017d0bfd7c9SStephen Boyd 	if (!np) {
1018d0bfd7c9SStephen Boyd 		dev_err(dev, "No %s specified\n", name);
1019d0bfd7c9SStephen Boyd 		return -EINVAL;
10204b638df4SBjorn Andersson 	}
10214b638df4SBjorn Andersson 
1022d0bfd7c9SStephen Boyd 	ret = of_address_to_resource(np, 0, &r);
1023d0bfd7c9SStephen Boyd 	of_node_put(np);
1024d0bfd7c9SStephen Boyd 	if (ret)
1025d0bfd7c9SStephen Boyd 		return ret;
1026d0bfd7c9SStephen Boyd 
1027b5af64fcSBjorn Andersson 	region->aux_base = r.start;
1028b5af64fcSBjorn Andersson 	region->size = resource_size(&r);
1029d0bfd7c9SStephen Boyd 
1030d0bfd7c9SStephen Boyd 	return 0;
10314b638df4SBjorn Andersson }
10324b638df4SBjorn Andersson 
10334b638df4SBjorn Andersson static int qcom_smem_probe(struct platform_device *pdev)
10344b638df4SBjorn Andersson {
10354b638df4SBjorn Andersson 	struct smem_header *header;
1036b5af64fcSBjorn Andersson 	struct reserved_mem *rmem;
10374b638df4SBjorn Andersson 	struct qcom_smem *smem;
103820bb6c9dSDeepak Kumar Singh 	unsigned long flags;
10394b638df4SBjorn Andersson 	size_t array_size;
1040d0bfd7c9SStephen Boyd 	int num_regions;
10414b638df4SBjorn Andersson 	int hwlock_id;
10424b638df4SBjorn Andersson 	u32 version;
104320bb6c9dSDeepak Kumar Singh 	u32 size;
10444b638df4SBjorn Andersson 	int ret;
1045b5af64fcSBjorn Andersson 	int i;
10464b638df4SBjorn Andersson 
1047d0bfd7c9SStephen Boyd 	num_regions = 1;
10484a1b9f4eSRob Herring 	if (of_property_present(pdev->dev.of_node, "qcom,rpm-msg-ram"))
1049d0bfd7c9SStephen Boyd 		num_regions++;
10504b638df4SBjorn Andersson 
10514b638df4SBjorn Andersson 	array_size = num_regions * sizeof(struct smem_region);
10524b638df4SBjorn Andersson 	smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
10534b638df4SBjorn Andersson 	if (!smem)
10544b638df4SBjorn Andersson 		return -ENOMEM;
10554b638df4SBjorn Andersson 
10564b638df4SBjorn Andersson 	smem->dev = &pdev->dev;
10574b638df4SBjorn Andersson 	smem->num_regions = num_regions;
10584b638df4SBjorn Andersson 
1059b5af64fcSBjorn Andersson 	rmem = of_reserved_mem_lookup(pdev->dev.of_node);
1060b5af64fcSBjorn Andersson 	if (rmem) {
1061b5af64fcSBjorn Andersson 		smem->regions[0].aux_base = rmem->base;
1062b5af64fcSBjorn Andersson 		smem->regions[0].size = rmem->size;
1063b5af64fcSBjorn Andersson 	} else {
1064b5af64fcSBjorn Andersson 		/*
1065b5af64fcSBjorn Andersson 		 * Fall back to the memory-region reference, if we're not a
1066b5af64fcSBjorn Andersson 		 * reserved-memory node.
1067b5af64fcSBjorn Andersson 		 */
1068b5af64fcSBjorn Andersson 		ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]);
10694b638df4SBjorn Andersson 		if (ret)
10704b638df4SBjorn Andersson 			return ret;
1071b5af64fcSBjorn Andersson 	}
10724b638df4SBjorn Andersson 
1073b5af64fcSBjorn Andersson 	if (num_regions > 1) {
1074b5af64fcSBjorn Andersson 		ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]);
1075b5af64fcSBjorn Andersson 		if (ret)
1076d0bfd7c9SStephen Boyd 			return ret;
1077b5af64fcSBjorn Andersson 	}
1078b5af64fcSBjorn Andersson 
107920bb6c9dSDeepak Kumar Singh 
108020bb6c9dSDeepak Kumar Singh 	ret = qcom_smem_map_toc(smem, &smem->regions[0]);
108120bb6c9dSDeepak Kumar Singh 	if (ret)
108220bb6c9dSDeepak Kumar Singh 		return ret;
108320bb6c9dSDeepak Kumar Singh 
108420bb6c9dSDeepak Kumar Singh 	for (i = 1; i < num_regions; i++) {
1085b5af64fcSBjorn Andersson 		smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev,
1086b5af64fcSBjorn Andersson 							     smem->regions[i].aux_base,
1087b5af64fcSBjorn Andersson 							     smem->regions[i].size);
1088b5af64fcSBjorn Andersson 		if (!smem->regions[i].virt_base) {
1089b5af64fcSBjorn Andersson 			dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base);
1090b5af64fcSBjorn Andersson 			return -ENOMEM;
1091b5af64fcSBjorn Andersson 		}
1092b5af64fcSBjorn Andersson 	}
10934b638df4SBjorn Andersson 
10944b638df4SBjorn Andersson 	header = smem->regions[0].virt_base;
10959806884dSStephen Boyd 	if (le32_to_cpu(header->initialized) != 1 ||
10969806884dSStephen Boyd 	    le32_to_cpu(header->reserved)) {
10974b638df4SBjorn Andersson 		dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
10984b638df4SBjorn Andersson 		return -EINVAL;
10994b638df4SBjorn Andersson 	}
11004b638df4SBjorn Andersson 
110120bb6c9dSDeepak Kumar Singh 	hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
110220bb6c9dSDeepak Kumar Singh 	if (hwlock_id < 0) {
110320bb6c9dSDeepak Kumar Singh 		if (hwlock_id != -EPROBE_DEFER)
110420bb6c9dSDeepak Kumar Singh 			dev_err(&pdev->dev, "failed to retrieve hwlock\n");
110520bb6c9dSDeepak Kumar Singh 		return hwlock_id;
110620bb6c9dSDeepak Kumar Singh 	}
110720bb6c9dSDeepak Kumar Singh 
110820bb6c9dSDeepak Kumar Singh 	smem->hwlock = hwspin_lock_request_specific(hwlock_id);
110920bb6c9dSDeepak Kumar Singh 	if (!smem->hwlock)
111020bb6c9dSDeepak Kumar Singh 		return -ENXIO;
111120bb6c9dSDeepak Kumar Singh 
111220bb6c9dSDeepak Kumar Singh 	ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags);
111320bb6c9dSDeepak Kumar Singh 	if (ret)
111420bb6c9dSDeepak Kumar Singh 		return ret;
111520bb6c9dSDeepak Kumar Singh 	size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset);
111620bb6c9dSDeepak Kumar Singh 	hwspin_unlock_irqrestore(smem->hwlock, &flags);
111720bb6c9dSDeepak Kumar Singh 
11184b638df4SBjorn Andersson 	version = qcom_smem_get_sbl_version(smem);
111920bb6c9dSDeepak Kumar Singh 	/*
112020bb6c9dSDeepak Kumar Singh 	 * smem header mapping is required only in heap version scheme, so unmap
112120bb6c9dSDeepak Kumar Singh 	 * it here. It will be remapped in qcom_smem_map_global() when whole
112220bb6c9dSDeepak Kumar Singh 	 * partition is mapped again.
112320bb6c9dSDeepak Kumar Singh 	 */
112420bb6c9dSDeepak Kumar Singh 	devm_iounmap(smem->dev, smem->regions[0].virt_base);
1125d52e4048SChris Lew 	switch (version >> 16) {
1126d52e4048SChris Lew 	case SMEM_GLOBAL_PART_VERSION:
1127d52e4048SChris Lew 		ret = qcom_smem_set_global_partition(smem);
1128d52e4048SChris Lew 		if (ret < 0)
1129d52e4048SChris Lew 			return ret;
11305b394067SChris Lew 		smem->item_count = qcom_smem_get_item_count(smem);
11315b394067SChris Lew 		break;
1132d52e4048SChris Lew 	case SMEM_GLOBAL_HEAP_VERSION:
113320bb6c9dSDeepak Kumar Singh 		qcom_smem_map_global(smem, size);
11345b394067SChris Lew 		smem->item_count = SMEM_ITEM_COUNT;
1135d52e4048SChris Lew 		break;
1136d52e4048SChris Lew 	default:
11374b638df4SBjorn Andersson 		dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
11384b638df4SBjorn Andersson 		return -EINVAL;
11394b638df4SBjorn Andersson 	}
11404b638df4SBjorn Andersson 
114113a920aeSAlex Elder 	BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT);
11424b638df4SBjorn Andersson 	ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
1143d52e4048SChris Lew 	if (ret < 0 && ret != -ENOENT)
11444b638df4SBjorn Andersson 		return ret;
11454b638df4SBjorn Andersson 
11464b638df4SBjorn Andersson 	__smem = smem;
11474b638df4SBjorn Andersson 
1148efb448d0SImran Khan 	smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
1149efb448d0SImran Khan 						      PLATFORM_DEVID_NONE, NULL,
1150efb448d0SImran Khan 						      0);
1151efb448d0SImran Khan 	if (IS_ERR(smem->socinfo))
1152efb448d0SImran Khan 		dev_dbg(&pdev->dev, "failed to register socinfo device\n");
1153efb448d0SImran Khan 
11544b638df4SBjorn Andersson 	return 0;
11554b638df4SBjorn Andersson }
11564b638df4SBjorn Andersson 
11574b638df4SBjorn Andersson static int qcom_smem_remove(struct platform_device *pdev)
11584b638df4SBjorn Andersson {
1159efb448d0SImran Khan 	platform_device_unregister(__smem->socinfo);
1160efb448d0SImran Khan 
11614b638df4SBjorn Andersson 	hwspin_lock_free(__smem->hwlock);
1162f8c67df7SStephen Boyd 	__smem = NULL;
11634b638df4SBjorn Andersson 
11644b638df4SBjorn Andersson 	return 0;
11654b638df4SBjorn Andersson }
11664b638df4SBjorn Andersson 
11674b638df4SBjorn Andersson static const struct of_device_id qcom_smem_of_match[] = {
11684b638df4SBjorn Andersson 	{ .compatible = "qcom,smem" },
11694b638df4SBjorn Andersson 	{}
11704b638df4SBjorn Andersson };
11714b638df4SBjorn Andersson MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
11724b638df4SBjorn Andersson 
11734b638df4SBjorn Andersson static struct platform_driver qcom_smem_driver = {
11744b638df4SBjorn Andersson 	.probe = qcom_smem_probe,
11754b638df4SBjorn Andersson 	.remove = qcom_smem_remove,
11764b638df4SBjorn Andersson 	.driver  = {
11774b638df4SBjorn Andersson 		.name = "qcom-smem",
11784b638df4SBjorn Andersson 		.of_match_table = qcom_smem_of_match,
11794b638df4SBjorn Andersson 		.suppress_bind_attrs = true,
11804b638df4SBjorn Andersson 	},
11814b638df4SBjorn Andersson };
11824b638df4SBjorn Andersson 
11834b638df4SBjorn Andersson static int __init qcom_smem_init(void)
11844b638df4SBjorn Andersson {
11854b638df4SBjorn Andersson 	return platform_driver_register(&qcom_smem_driver);
11864b638df4SBjorn Andersson }
11874b638df4SBjorn Andersson arch_initcall(qcom_smem_init);
11884b638df4SBjorn Andersson 
11894b638df4SBjorn Andersson static void __exit qcom_smem_exit(void)
11904b638df4SBjorn Andersson {
11914b638df4SBjorn Andersson 	platform_driver_unregister(&qcom_smem_driver);
11924b638df4SBjorn Andersson }
11934b638df4SBjorn Andersson module_exit(qcom_smem_exit)
11944b638df4SBjorn Andersson 
11954b638df4SBjorn Andersson MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
11964b638df4SBjorn Andersson MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
11974b638df4SBjorn Andersson MODULE_LICENSE("GPL v2");
1198