xref: /openbmc/linux/drivers/soc/qcom/smem.c (revision 8fa1a21409da6abfe890f66532f9fcd8d2c25a3d)
1 /*
2  * Copyright (c) 2015, Sony Mobile Communications AB.
3  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 and
7  * only version 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14 
15 #include <linux/hwspinlock.h>
16 #include <linux/io.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_address.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <linux/soc/qcom/smem.h>
23 
24 /*
25  * The Qualcomm shared memory system is a allocate only heap structure that
26  * consists of one of more memory areas that can be accessed by the processors
27  * in the SoC.
28  *
29  * All systems contains a global heap, accessible by all processors in the SoC,
30  * with a table of contents data structure (@smem_header) at the beginning of
31  * the main shared memory block.
32  *
33  * The global header contains meta data for allocations as well as a fixed list
34  * of 512 entries (@smem_global_entry) that can be initialized to reference
35  * parts of the shared memory space.
36  *
37  *
38  * In addition to this global heap a set of "private" heaps can be set up at
39  * boot time with access restrictions so that only certain processor pairs can
40  * access the data.
41  *
42  * These partitions are referenced from an optional partition table
43  * (@smem_ptable), that is found 4kB from the end of the main smem region. The
44  * partition table entries (@smem_ptable_entry) lists the involved processors
45  * (or hosts) and their location in the main shared memory region.
46  *
47  * Each partition starts with a header (@smem_partition_header) that identifies
48  * the partition and holds properties for the two internal memory regions. The
49  * two regions are cached and non-cached memory respectively. Each region
50  * contain a link list of allocation headers (@smem_private_entry) followed by
51  * their data.
52  *
53  * Items in the non-cached region are allocated from the start of the partition
54  * while items in the cached region are allocated from the end. The free area
55  * is hence the region between the cached and non-cached offsets. The header of
56  * cached items comes after the data.
57  *
58  * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
59  * for the global heap. A new global partition is created from the global heap
60  * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
61  * set by the bootloader.
62  *
63  * To synchronize allocations in the shared memory heaps a remote spinlock must
64  * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
65  * platforms.
66  *
67  */
68 
69 /*
70  * The version member of the smem header contains an array of versions for the
71  * various software components in the SoC. We verify that the boot loader
72  * version is a valid version as a sanity check.
73  */
74 #define SMEM_MASTER_SBL_VERSION_INDEX	7
75 #define SMEM_GLOBAL_HEAP_VERSION	11
76 #define SMEM_GLOBAL_PART_VERSION	12
77 
78 /*
79  * The first 8 items are only to be allocated by the boot loader while
80  * initializing the heap.
81  */
82 #define SMEM_ITEM_LAST_FIXED	8
83 
84 /* Highest accepted item number, for both global and private heaps */
85 #define SMEM_ITEM_COUNT		512
86 
87 /* Processor/host identifier for the application processor */
88 #define SMEM_HOST_APPS		0
89 
90 /* Processor/host identifier for the global partition */
91 #define SMEM_GLOBAL_HOST	0xfffe
92 
93 /* Max number of processors/hosts in a system */
94 #define SMEM_HOST_COUNT		10
95 
96 /**
97   * struct smem_proc_comm - proc_comm communication struct (legacy)
98   * @command:	current command to be executed
99   * @status:	status of the currently requested command
100   * @params:	parameters to the command
101   */
102 struct smem_proc_comm {
103 	__le32 command;
104 	__le32 status;
105 	__le32 params[2];
106 };
107 
108 /**
109  * struct smem_global_entry - entry to reference smem items on the heap
110  * @allocated:	boolean to indicate if this entry is used
111  * @offset:	offset to the allocated space
112  * @size:	size of the allocated space, 8 byte aligned
113  * @aux_base:	base address for the memory region used by this unit, or 0 for
114  *		the default region. bits 0,1 are reserved
115  */
116 struct smem_global_entry {
117 	__le32 allocated;
118 	__le32 offset;
119 	__le32 size;
120 	__le32 aux_base; /* bits 1:0 reserved */
121 };
122 #define AUX_BASE_MASK		0xfffffffc
123 
124 /**
125  * struct smem_header - header found in beginning of primary smem region
126  * @proc_comm:		proc_comm communication interface (legacy)
127  * @version:		array of versions for the various subsystems
128  * @initialized:	boolean to indicate that smem is initialized
129  * @free_offset:	index of the first unallocated byte in smem
130  * @available:		number of bytes available for allocation
131  * @reserved:		reserved field, must be 0
132  * toc:			array of references to items
133  */
134 struct smem_header {
135 	struct smem_proc_comm proc_comm[4];
136 	__le32 version[32];
137 	__le32 initialized;
138 	__le32 free_offset;
139 	__le32 available;
140 	__le32 reserved;
141 	struct smem_global_entry toc[SMEM_ITEM_COUNT];
142 };
143 
144 /**
145  * struct smem_ptable_entry - one entry in the @smem_ptable list
146  * @offset:	offset, within the main shared memory region, of the partition
147  * @size:	size of the partition
148  * @flags:	flags for the partition (currently unused)
149  * @host0:	first processor/host with access to this partition
150  * @host1:	second processor/host with access to this partition
151  * @cacheline:	alignment for "cached" entries
152  * @reserved:	reserved entries for later use
153  */
154 struct smem_ptable_entry {
155 	__le32 offset;
156 	__le32 size;
157 	__le32 flags;
158 	__le16 host0;
159 	__le16 host1;
160 	__le32 cacheline;
161 	__le32 reserved[7];
162 };
163 
164 /**
165  * struct smem_ptable - partition table for the private partitions
166  * @magic:	magic number, must be SMEM_PTABLE_MAGIC
167  * @version:	version of the partition table
168  * @num_entries: number of partitions in the table
169  * @reserved:	for now reserved entries
170  * @entry:	list of @smem_ptable_entry for the @num_entries partitions
171  */
172 struct smem_ptable {
173 	u8 magic[4];
174 	__le32 version;
175 	__le32 num_entries;
176 	__le32 reserved[5];
177 	struct smem_ptable_entry entry[];
178 };
179 
180 static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
181 
182 /**
183  * struct smem_partition_header - header of the partitions
184  * @magic:	magic number, must be SMEM_PART_MAGIC
185  * @host0:	first processor/host with access to this partition
186  * @host1:	second processor/host with access to this partition
187  * @size:	size of the partition
188  * @offset_free_uncached: offset to the first free byte of uncached memory in
189  *		this partition
190  * @offset_free_cached: offset to the first free byte of cached memory in this
191  *		partition
192  * @reserved:	for now reserved entries
193  */
194 struct smem_partition_header {
195 	u8 magic[4];
196 	__le16 host0;
197 	__le16 host1;
198 	__le32 size;
199 	__le32 offset_free_uncached;
200 	__le32 offset_free_cached;
201 	__le32 reserved[3];
202 };
203 
204 static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
205 
206 /**
207  * struct smem_private_entry - header of each item in the private partition
208  * @canary:	magic number, must be SMEM_PRIVATE_CANARY
209  * @item:	identifying number of the smem item
210  * @size:	size of the data, including padding bytes
211  * @padding_data: number of bytes of padding of data
212  * @padding_hdr: number of bytes of padding between the header and the data
213  * @reserved:	for now reserved entry
214  */
215 struct smem_private_entry {
216 	u16 canary; /* bytes are the same so no swapping needed */
217 	__le16 item;
218 	__le32 size; /* includes padding bytes */
219 	__le16 padding_data;
220 	__le16 padding_hdr;
221 	__le32 reserved;
222 };
223 #define SMEM_PRIVATE_CANARY	0xa5a5
224 
225 /**
226  * struct smem_info - smem region info located after the table of contents
227  * @magic:	magic number, must be SMEM_INFO_MAGIC
228  * @size:	size of the smem region
229  * @base_addr:	base address of the smem region
230  * @reserved:	for now reserved entry
231  * @num_items:	highest accepted item number
232  */
233 struct smem_info {
234 	u8 magic[4];
235 	__le32 size;
236 	__le32 base_addr;
237 	__le32 reserved;
238 	__le16 num_items;
239 };
240 
241 static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
242 
243 /**
244  * struct smem_region - representation of a chunk of memory used for smem
245  * @aux_base:	identifier of aux_mem base
246  * @virt_base:	virtual base address of memory with this aux_mem identifier
247  * @size:	size of the memory region
248  */
249 struct smem_region {
250 	u32 aux_base;
251 	void __iomem *virt_base;
252 	size_t size;
253 };
254 
255 /**
256  * struct qcom_smem - device data for the smem device
257  * @dev:	device pointer
258  * @hwlock:	reference to a hwspinlock
259  * @global_partition:	pointer to global partition when in use
260  * @global_cacheline:	cacheline size for global partition
261  * @partitions:	list of pointers to partitions affecting the current
262  *		processor/host
263  * @cacheline:	list of cacheline sizes for each host
264  * @item_count: max accepted item number
265  * @num_regions: number of @regions
266  * @regions:	list of the memory regions defining the shared memory
267  */
268 struct qcom_smem {
269 	struct device *dev;
270 
271 	struct hwspinlock *hwlock;
272 
273 	struct smem_partition_header *global_partition;
274 	size_t global_cacheline;
275 	struct smem_partition_header *partitions[SMEM_HOST_COUNT];
276 	size_t cacheline[SMEM_HOST_COUNT];
277 	u32 item_count;
278 
279 	unsigned num_regions;
280 	struct smem_region regions[0];
281 };
282 
283 static void *
284 phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
285 {
286 	void *p = phdr;
287 
288 	return p + le32_to_cpu(phdr->offset_free_uncached);
289 }
290 
291 static struct smem_private_entry *
292 phdr_to_first_cached_entry(struct smem_partition_header *phdr,
293 					size_t cacheline)
294 {
295 	void *p = phdr;
296 	struct smem_private_entry *e;
297 
298 	return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
299 }
300 
301 static void *
302 phdr_to_last_cached_entry(struct smem_partition_header *phdr)
303 {
304 	void *p = phdr;
305 
306 	return p + le32_to_cpu(phdr->offset_free_cached);
307 }
308 
309 static struct smem_private_entry *
310 phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
311 {
312 	void *p = phdr;
313 
314 	return p + sizeof(*phdr);
315 }
316 
317 static struct smem_private_entry *
318 uncached_entry_next(struct smem_private_entry *e)
319 {
320 	void *p = e;
321 
322 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
323 	       le32_to_cpu(e->size);
324 }
325 
326 static struct smem_private_entry *
327 cached_entry_next(struct smem_private_entry *e, size_t cacheline)
328 {
329 	void *p = e;
330 
331 	return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
332 }
333 
334 static void *uncached_entry_to_item(struct smem_private_entry *e)
335 {
336 	void *p = e;
337 
338 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
339 }
340 
341 static void *cached_entry_to_item(struct smem_private_entry *e)
342 {
343 	void *p = e;
344 
345 	return p - le32_to_cpu(e->size);
346 }
347 
348 /* Pointer to the one and only smem handle */
349 static struct qcom_smem *__smem;
350 
351 /* Timeout (ms) for the trylock of remote spinlocks */
352 #define HWSPINLOCK_TIMEOUT	1000
353 
354 static int qcom_smem_alloc_private(struct qcom_smem *smem,
355 				   struct smem_partition_header *phdr,
356 				   unsigned item,
357 				   size_t size)
358 {
359 	struct smem_private_entry *hdr, *end;
360 	size_t alloc_size;
361 	void *cached;
362 
363 	hdr = phdr_to_first_uncached_entry(phdr);
364 	end = phdr_to_last_uncached_entry(phdr);
365 	cached = phdr_to_last_cached_entry(phdr);
366 
367 	while (hdr < end) {
368 		if (hdr->canary != SMEM_PRIVATE_CANARY)
369 			goto bad_canary;
370 		if (le16_to_cpu(hdr->item) == item)
371 			return -EEXIST;
372 
373 		hdr = uncached_entry_next(hdr);
374 	}
375 
376 	/* Check that we don't grow into the cached region */
377 	alloc_size = sizeof(*hdr) + ALIGN(size, 8);
378 	if ((void *)hdr + alloc_size > cached) {
379 		dev_err(smem->dev, "Out of memory\n");
380 		return -ENOSPC;
381 	}
382 
383 	hdr->canary = SMEM_PRIVATE_CANARY;
384 	hdr->item = cpu_to_le16(item);
385 	hdr->size = cpu_to_le32(ALIGN(size, 8));
386 	hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
387 	hdr->padding_hdr = 0;
388 
389 	/*
390 	 * Ensure the header is written before we advance the free offset, so
391 	 * that remote processors that does not take the remote spinlock still
392 	 * gets a consistent view of the linked list.
393 	 */
394 	wmb();
395 	le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
396 
397 	return 0;
398 bad_canary:
399 	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
400 		le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
401 
402 	return -EINVAL;
403 }
404 
405 static int qcom_smem_alloc_global(struct qcom_smem *smem,
406 				  unsigned item,
407 				  size_t size)
408 {
409 	struct smem_global_entry *entry;
410 	struct smem_header *header;
411 
412 	header = smem->regions[0].virt_base;
413 	entry = &header->toc[item];
414 	if (entry->allocated)
415 		return -EEXIST;
416 
417 	size = ALIGN(size, 8);
418 	if (WARN_ON(size > le32_to_cpu(header->available)))
419 		return -ENOMEM;
420 
421 	entry->offset = header->free_offset;
422 	entry->size = cpu_to_le32(size);
423 
424 	/*
425 	 * Ensure the header is consistent before we mark the item allocated,
426 	 * so that remote processors will get a consistent view of the item
427 	 * even though they do not take the spinlock on read.
428 	 */
429 	wmb();
430 	entry->allocated = cpu_to_le32(1);
431 
432 	le32_add_cpu(&header->free_offset, size);
433 	le32_add_cpu(&header->available, -size);
434 
435 	return 0;
436 }
437 
438 /**
439  * qcom_smem_alloc() - allocate space for a smem item
440  * @host:	remote processor id, or -1
441  * @item:	smem item handle
442  * @size:	number of bytes to be allocated
443  *
444  * Allocate space for a given smem item of size @size, given that the item is
445  * not yet allocated.
446  */
447 int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
448 {
449 	struct smem_partition_header *phdr;
450 	unsigned long flags;
451 	int ret;
452 
453 	if (!__smem)
454 		return -EPROBE_DEFER;
455 
456 	if (item < SMEM_ITEM_LAST_FIXED) {
457 		dev_err(__smem->dev,
458 			"Rejecting allocation of static entry %d\n", item);
459 		return -EINVAL;
460 	}
461 
462 	if (WARN_ON(item >= __smem->item_count))
463 		return -EINVAL;
464 
465 	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
466 					  HWSPINLOCK_TIMEOUT,
467 					  &flags);
468 	if (ret)
469 		return ret;
470 
471 	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
472 		phdr = __smem->partitions[host];
473 		ret = qcom_smem_alloc_private(__smem, phdr, item, size);
474 	} else if (__smem->global_partition) {
475 		phdr = __smem->global_partition;
476 		ret = qcom_smem_alloc_private(__smem, phdr, item, size);
477 	} else {
478 		ret = qcom_smem_alloc_global(__smem, item, size);
479 	}
480 
481 	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
482 
483 	return ret;
484 }
485 EXPORT_SYMBOL(qcom_smem_alloc);
486 
487 static void *qcom_smem_get_global(struct qcom_smem *smem,
488 				  unsigned item,
489 				  size_t *size)
490 {
491 	struct smem_header *header;
492 	struct smem_region *area;
493 	struct smem_global_entry *entry;
494 	u32 aux_base;
495 	unsigned i;
496 
497 	header = smem->regions[0].virt_base;
498 	entry = &header->toc[item];
499 	if (!entry->allocated)
500 		return ERR_PTR(-ENXIO);
501 
502 	aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
503 
504 	for (i = 0; i < smem->num_regions; i++) {
505 		area = &smem->regions[i];
506 
507 		if (area->aux_base == aux_base || !aux_base) {
508 			if (size != NULL)
509 				*size = le32_to_cpu(entry->size);
510 			return area->virt_base + le32_to_cpu(entry->offset);
511 		}
512 	}
513 
514 	return ERR_PTR(-ENOENT);
515 }
516 
517 static void *qcom_smem_get_private(struct qcom_smem *smem,
518 				   struct smem_partition_header *phdr,
519 				   size_t cacheline,
520 				   unsigned item,
521 				   size_t *size)
522 {
523 	struct smem_private_entry *e, *end;
524 
525 	e = phdr_to_first_uncached_entry(phdr);
526 	end = phdr_to_last_uncached_entry(phdr);
527 
528 	while (e < end) {
529 		if (e->canary != SMEM_PRIVATE_CANARY)
530 			goto invalid_canary;
531 
532 		if (le16_to_cpu(e->item) == item) {
533 			if (size != NULL)
534 				*size = le32_to_cpu(e->size) -
535 					le16_to_cpu(e->padding_data);
536 
537 			return uncached_entry_to_item(e);
538 		}
539 
540 		e = uncached_entry_next(e);
541 	}
542 
543 	/* Item was not found in the uncached list, search the cached list */
544 
545 	e = phdr_to_first_cached_entry(phdr, cacheline);
546 	end = phdr_to_last_cached_entry(phdr);
547 
548 	while (e > end) {
549 		if (e->canary != SMEM_PRIVATE_CANARY)
550 			goto invalid_canary;
551 
552 		if (le16_to_cpu(e->item) == item) {
553 			if (size != NULL)
554 				*size = le32_to_cpu(e->size) -
555 					le16_to_cpu(e->padding_data);
556 
557 			return cached_entry_to_item(e);
558 		}
559 
560 		e = cached_entry_next(e, cacheline);
561 	}
562 
563 	return ERR_PTR(-ENOENT);
564 
565 invalid_canary:
566 	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
567 			le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
568 
569 	return ERR_PTR(-EINVAL);
570 }
571 
572 /**
573  * qcom_smem_get() - resolve ptr of size of a smem item
574  * @host:	the remote processor, or -1
575  * @item:	smem item handle
576  * @size:	pointer to be filled out with size of the item
577  *
578  * Looks up smem item and returns pointer to it. Size of smem
579  * item is returned in @size.
580  */
581 void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
582 {
583 	struct smem_partition_header *phdr;
584 	unsigned long flags;
585 	size_t cacheln;
586 	int ret;
587 	void *ptr = ERR_PTR(-EPROBE_DEFER);
588 
589 	if (!__smem)
590 		return ptr;
591 
592 	if (WARN_ON(item >= __smem->item_count))
593 		return ERR_PTR(-EINVAL);
594 
595 	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
596 					  HWSPINLOCK_TIMEOUT,
597 					  &flags);
598 	if (ret)
599 		return ERR_PTR(ret);
600 
601 	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
602 		phdr = __smem->partitions[host];
603 		cacheln = __smem->cacheline[host];
604 		ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
605 	} else if (__smem->global_partition) {
606 		phdr = __smem->global_partition;
607 		cacheln = __smem->global_cacheline;
608 		ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
609 	} else {
610 		ptr = qcom_smem_get_global(__smem, item, size);
611 	}
612 
613 	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
614 
615 	return ptr;
616 
617 }
618 EXPORT_SYMBOL(qcom_smem_get);
619 
620 /**
621  * qcom_smem_get_free_space() - retrieve amount of free space in a partition
622  * @host:	the remote processor identifying a partition, or -1
623  *
624  * To be used by smem clients as a quick way to determine if any new
625  * allocations has been made.
626  */
627 int qcom_smem_get_free_space(unsigned host)
628 {
629 	struct smem_partition_header *phdr;
630 	struct smem_header *header;
631 	unsigned ret;
632 
633 	if (!__smem)
634 		return -EPROBE_DEFER;
635 
636 	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
637 		phdr = __smem->partitions[host];
638 		ret = le32_to_cpu(phdr->offset_free_cached) -
639 		      le32_to_cpu(phdr->offset_free_uncached);
640 	} else if (__smem->global_partition) {
641 		phdr = __smem->global_partition;
642 		ret = le32_to_cpu(phdr->offset_free_cached) -
643 		      le32_to_cpu(phdr->offset_free_uncached);
644 	} else {
645 		header = __smem->regions[0].virt_base;
646 		ret = le32_to_cpu(header->available);
647 	}
648 
649 	return ret;
650 }
651 EXPORT_SYMBOL(qcom_smem_get_free_space);
652 
653 static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
654 {
655 	struct smem_header *header;
656 	__le32 *versions;
657 
658 	header = smem->regions[0].virt_base;
659 	versions = header->version;
660 
661 	return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
662 }
663 
664 static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
665 {
666 	struct smem_ptable *ptable;
667 	u32 version;
668 
669 	ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
670 	if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
671 		return ERR_PTR(-ENOENT);
672 
673 	version = le32_to_cpu(ptable->version);
674 	if (version != 1) {
675 		dev_err(smem->dev,
676 			"Unsupported partition header version %d\n", version);
677 		return ERR_PTR(-EINVAL);
678 	}
679 	return ptable;
680 }
681 
682 static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
683 {
684 	struct smem_ptable *ptable;
685 	struct smem_info *info;
686 
687 	ptable = qcom_smem_get_ptable(smem);
688 	if (IS_ERR_OR_NULL(ptable))
689 		return SMEM_ITEM_COUNT;
690 
691 	info = (struct smem_info *)&ptable->entry[ptable->num_entries];
692 	if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
693 		return SMEM_ITEM_COUNT;
694 
695 	return le16_to_cpu(info->num_items);
696 }
697 
698 static int qcom_smem_set_global_partition(struct qcom_smem *smem)
699 {
700 	struct smem_partition_header *header;
701 	struct smem_ptable_entry *entry;
702 	struct smem_ptable *ptable;
703 	u32 host0, host1, size;
704 	bool found = false;
705 	int i;
706 
707 	ptable = qcom_smem_get_ptable(smem);
708 	if (IS_ERR(ptable))
709 		return PTR_ERR(ptable);
710 
711 	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
712 		entry = &ptable->entry[i];
713 		host0 = le16_to_cpu(entry->host0);
714 		host1 = le16_to_cpu(entry->host1);
715 
716 		if (host0 == SMEM_GLOBAL_HOST && host0 == host1) {
717 			found = true;
718 			break;
719 		}
720 	}
721 
722 	if (!found) {
723 		dev_err(smem->dev, "Missing entry for global partition\n");
724 		return -EINVAL;
725 	}
726 
727 	if (!le32_to_cpu(entry->offset) || !le32_to_cpu(entry->size)) {
728 		dev_err(smem->dev, "Invalid entry for global partition\n");
729 		return -EINVAL;
730 	}
731 
732 	if (smem->global_partition) {
733 		dev_err(smem->dev, "Already found the global partition\n");
734 		return -EINVAL;
735 	}
736 
737 	header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
738 	host0 = le16_to_cpu(header->host0);
739 	host1 = le16_to_cpu(header->host1);
740 
741 	if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
742 		dev_err(smem->dev, "Global partition has invalid magic\n");
743 		return -EINVAL;
744 	}
745 
746 	if (host0 != SMEM_GLOBAL_HOST && host1 != SMEM_GLOBAL_HOST) {
747 		dev_err(smem->dev, "Global partition hosts are invalid\n");
748 		return -EINVAL;
749 	}
750 
751 	if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
752 		dev_err(smem->dev, "Global partition has invalid size\n");
753 		return -EINVAL;
754 	}
755 
756 	size = le32_to_cpu(header->offset_free_uncached);
757 	if (size > le32_to_cpu(header->size)) {
758 		dev_err(smem->dev,
759 			"Global partition has invalid free pointer\n");
760 		return -EINVAL;
761 	}
762 
763 	smem->global_partition = header;
764 	smem->global_cacheline = le32_to_cpu(entry->cacheline);
765 
766 	return 0;
767 }
768 
769 static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
770 					  unsigned int local_host)
771 {
772 	struct smem_partition_header *header;
773 	struct smem_ptable_entry *entry;
774 	struct smem_ptable *ptable;
775 	unsigned int remote_host;
776 	u32 host0, host1;
777 	int i;
778 
779 	ptable = qcom_smem_get_ptable(smem);
780 	if (IS_ERR(ptable))
781 		return PTR_ERR(ptable);
782 
783 	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
784 		entry = &ptable->entry[i];
785 		host0 = le16_to_cpu(entry->host0);
786 		host1 = le16_to_cpu(entry->host1);
787 
788 		if (host0 != local_host && host1 != local_host)
789 			continue;
790 
791 		if (!le32_to_cpu(entry->offset))
792 			continue;
793 
794 		if (!le32_to_cpu(entry->size))
795 			continue;
796 
797 		if (host0 == local_host)
798 			remote_host = host1;
799 		else
800 			remote_host = host0;
801 
802 		if (remote_host >= SMEM_HOST_COUNT) {
803 			dev_err(smem->dev,
804 				"Invalid remote host %d\n",
805 				remote_host);
806 			return -EINVAL;
807 		}
808 
809 		if (smem->partitions[remote_host]) {
810 			dev_err(smem->dev,
811 				"Already found a partition for host %d\n",
812 				remote_host);
813 			return -EINVAL;
814 		}
815 
816 		header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
817 		host0 = le16_to_cpu(header->host0);
818 		host1 = le16_to_cpu(header->host1);
819 
820 		if (memcmp(header->magic, SMEM_PART_MAGIC,
821 			    sizeof(header->magic))) {
822 			dev_err(smem->dev,
823 				"Partition %d has invalid magic\n", i);
824 			return -EINVAL;
825 		}
826 
827 		if (host0 != local_host && host1 != local_host) {
828 			dev_err(smem->dev,
829 				"Partition %d hosts are invalid\n", i);
830 			return -EINVAL;
831 		}
832 
833 		if (host0 != remote_host && host1 != remote_host) {
834 			dev_err(smem->dev,
835 				"Partition %d hosts are invalid\n", i);
836 			return -EINVAL;
837 		}
838 
839 		if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
840 			dev_err(smem->dev,
841 				"Partition %d has invalid size\n", i);
842 			return -EINVAL;
843 		}
844 
845 		if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
846 			dev_err(smem->dev,
847 				"Partition %d has invalid free pointer\n", i);
848 			return -EINVAL;
849 		}
850 
851 		smem->partitions[remote_host] = header;
852 		smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
853 	}
854 
855 	return 0;
856 }
857 
858 static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
859 				const char *name, int i)
860 {
861 	struct device_node *np;
862 	struct resource r;
863 	int ret;
864 
865 	np = of_parse_phandle(dev->of_node, name, 0);
866 	if (!np) {
867 		dev_err(dev, "No %s specified\n", name);
868 		return -EINVAL;
869 	}
870 
871 	ret = of_address_to_resource(np, 0, &r);
872 	of_node_put(np);
873 	if (ret)
874 		return ret;
875 
876 	smem->regions[i].aux_base = (u32)r.start;
877 	smem->regions[i].size = resource_size(&r);
878 	smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, resource_size(&r));
879 	if (!smem->regions[i].virt_base)
880 		return -ENOMEM;
881 
882 	return 0;
883 }
884 
885 static int qcom_smem_probe(struct platform_device *pdev)
886 {
887 	struct smem_header *header;
888 	struct qcom_smem *smem;
889 	size_t array_size;
890 	int num_regions;
891 	int hwlock_id;
892 	u32 version;
893 	int ret;
894 
895 	num_regions = 1;
896 	if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
897 		num_regions++;
898 
899 	array_size = num_regions * sizeof(struct smem_region);
900 	smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
901 	if (!smem)
902 		return -ENOMEM;
903 
904 	smem->dev = &pdev->dev;
905 	smem->num_regions = num_regions;
906 
907 	ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
908 	if (ret)
909 		return ret;
910 
911 	if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
912 					"qcom,rpm-msg-ram", 1)))
913 		return ret;
914 
915 	header = smem->regions[0].virt_base;
916 	if (le32_to_cpu(header->initialized) != 1 ||
917 	    le32_to_cpu(header->reserved)) {
918 		dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
919 		return -EINVAL;
920 	}
921 
922 	version = qcom_smem_get_sbl_version(smem);
923 	switch (version >> 16) {
924 	case SMEM_GLOBAL_PART_VERSION:
925 		ret = qcom_smem_set_global_partition(smem);
926 		if (ret < 0)
927 			return ret;
928 		smem->item_count = qcom_smem_get_item_count(smem);
929 		break;
930 	case SMEM_GLOBAL_HEAP_VERSION:
931 		smem->item_count = SMEM_ITEM_COUNT;
932 		break;
933 	default:
934 		dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
935 		return -EINVAL;
936 	}
937 
938 	ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
939 	if (ret < 0 && ret != -ENOENT)
940 		return ret;
941 
942 	hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
943 	if (hwlock_id < 0) {
944 		if (hwlock_id != -EPROBE_DEFER)
945 			dev_err(&pdev->dev, "failed to retrieve hwlock\n");
946 		return hwlock_id;
947 	}
948 
949 	smem->hwlock = hwspin_lock_request_specific(hwlock_id);
950 	if (!smem->hwlock)
951 		return -ENXIO;
952 
953 	__smem = smem;
954 
955 	return 0;
956 }
957 
958 static int qcom_smem_remove(struct platform_device *pdev)
959 {
960 	hwspin_lock_free(__smem->hwlock);
961 	__smem = NULL;
962 
963 	return 0;
964 }
965 
966 static const struct of_device_id qcom_smem_of_match[] = {
967 	{ .compatible = "qcom,smem" },
968 	{}
969 };
970 MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
971 
972 static struct platform_driver qcom_smem_driver = {
973 	.probe = qcom_smem_probe,
974 	.remove = qcom_smem_remove,
975 	.driver  = {
976 		.name = "qcom-smem",
977 		.of_match_table = qcom_smem_of_match,
978 		.suppress_bind_attrs = true,
979 	},
980 };
981 
982 static int __init qcom_smem_init(void)
983 {
984 	return platform_driver_register(&qcom_smem_driver);
985 }
986 arch_initcall(qcom_smem_init);
987 
988 static void __exit qcom_smem_exit(void)
989 {
990 	platform_driver_unregister(&qcom_smem_driver);
991 }
992 module_exit(qcom_smem_exit)
993 
994 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
995 MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
996 MODULE_LICENSE("GPL v2");
997