xref: /openbmc/linux/drivers/soc/qcom/smem.c (revision a216000f0140f415cec96129f777b5234c9d142f)
1 /*
2  * Copyright (c) 2015, Sony Mobile Communications AB.
3  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 and
7  * only version 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14 
15 #include <linux/hwspinlock.h>
16 #include <linux/io.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_address.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <linux/soc/qcom/smem.h>
23 
24 /*
25  * The Qualcomm shared memory system is a allocate only heap structure that
26  * consists of one of more memory areas that can be accessed by the processors
27  * in the SoC.
28  *
29  * All systems contains a global heap, accessible by all processors in the SoC,
30  * with a table of contents data structure (@smem_header) at the beginning of
31  * the main shared memory block.
32  *
33  * The global header contains meta data for allocations as well as a fixed list
34  * of 512 entries (@smem_global_entry) that can be initialized to reference
35  * parts of the shared memory space.
36  *
37  *
38  * In addition to this global heap a set of "private" heaps can be set up at
39  * boot time with access restrictions so that only certain processor pairs can
40  * access the data.
41  *
42  * These partitions are referenced from an optional partition table
43  * (@smem_ptable), that is found 4kB from the end of the main smem region. The
44  * partition table entries (@smem_ptable_entry) lists the involved processors
45  * (or hosts) and their location in the main shared memory region.
46  *
47  * Each partition starts with a header (@smem_partition_header) that identifies
48  * the partition and holds properties for the two internal memory regions. The
49  * two regions are cached and non-cached memory respectively. Each region
50  * contain a link list of allocation headers (@smem_private_entry) followed by
51  * their data.
52  *
53  * Items in the non-cached region are allocated from the start of the partition
54  * while items in the cached region are allocated from the end. The free area
55  * is hence the region between the cached and non-cached offsets. The header of
56  * cached items comes after the data.
57  *
58  *
59  * To synchronize allocations in the shared memory heaps a remote spinlock must
60  * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
61  * platforms.
62  *
63  */
64 
65 /*
66  * Item 3 of the global heap contains an array of versions for the various
67  * software components in the SoC. We verify that the boot loader version is
68  * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check.
69  */
70 #define SMEM_ITEM_VERSION	3
71 #define  SMEM_MASTER_SBL_VERSION_INDEX	7
72 #define  SMEM_EXPECTED_VERSION		11
73 
74 /*
75  * The first 8 items are only to be allocated by the boot loader while
76  * initializing the heap.
77  */
78 #define SMEM_ITEM_LAST_FIXED	8
79 
80 /* Highest accepted item number, for both global and private heaps */
81 #define SMEM_ITEM_COUNT		512
82 
83 /* Processor/host identifier for the application processor */
84 #define SMEM_HOST_APPS		0
85 
86 /* Max number of processors/hosts in a system */
87 #define SMEM_HOST_COUNT		9
88 
89 /**
90   * struct smem_proc_comm - proc_comm communication struct (legacy)
91   * @command:	current command to be executed
92   * @status:	status of the currently requested command
93   * @params:	parameters to the command
94   */
95 struct smem_proc_comm {
96 	__le32 command;
97 	__le32 status;
98 	__le32 params[2];
99 };
100 
101 /**
102  * struct smem_global_entry - entry to reference smem items on the heap
103  * @allocated:	boolean to indicate if this entry is used
104  * @offset:	offset to the allocated space
105  * @size:	size of the allocated space, 8 byte aligned
106  * @aux_base:	base address for the memory region used by this unit, or 0 for
107  *		the default region. bits 0,1 are reserved
108  */
109 struct smem_global_entry {
110 	__le32 allocated;
111 	__le32 offset;
112 	__le32 size;
113 	__le32 aux_base; /* bits 1:0 reserved */
114 };
115 #define AUX_BASE_MASK		0xfffffffc
116 
117 /**
118  * struct smem_header - header found in beginning of primary smem region
119  * @proc_comm:		proc_comm communication interface (legacy)
120  * @version:		array of versions for the various subsystems
121  * @initialized:	boolean to indicate that smem is initialized
122  * @free_offset:	index of the first unallocated byte in smem
123  * @available:		number of bytes available for allocation
124  * @reserved:		reserved field, must be 0
125  * toc:			array of references to items
126  */
127 struct smem_header {
128 	struct smem_proc_comm proc_comm[4];
129 	__le32 version[32];
130 	__le32 initialized;
131 	__le32 free_offset;
132 	__le32 available;
133 	__le32 reserved;
134 	struct smem_global_entry toc[SMEM_ITEM_COUNT];
135 };
136 
137 /**
138  * struct smem_ptable_entry - one entry in the @smem_ptable list
139  * @offset:	offset, within the main shared memory region, of the partition
140  * @size:	size of the partition
141  * @flags:	flags for the partition (currently unused)
142  * @host0:	first processor/host with access to this partition
143  * @host1:	second processor/host with access to this partition
144  * @cacheline:	alignment for "cached" entries
145  * @reserved:	reserved entries for later use
146  */
147 struct smem_ptable_entry {
148 	__le32 offset;
149 	__le32 size;
150 	__le32 flags;
151 	__le16 host0;
152 	__le16 host1;
153 	__le32 cacheline;
154 	__le32 reserved[7];
155 };
156 
157 /**
158  * struct smem_ptable - partition table for the private partitions
159  * @magic:	magic number, must be SMEM_PTABLE_MAGIC
160  * @version:	version of the partition table
161  * @num_entries: number of partitions in the table
162  * @reserved:	for now reserved entries
163  * @entry:	list of @smem_ptable_entry for the @num_entries partitions
164  */
165 struct smem_ptable {
166 	u8 magic[4];
167 	__le32 version;
168 	__le32 num_entries;
169 	__le32 reserved[5];
170 	struct smem_ptable_entry entry[];
171 };
172 
173 static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
174 
175 /**
176  * struct smem_partition_header - header of the partitions
177  * @magic:	magic number, must be SMEM_PART_MAGIC
178  * @host0:	first processor/host with access to this partition
179  * @host1:	second processor/host with access to this partition
180  * @size:	size of the partition
181  * @offset_free_uncached: offset to the first free byte of uncached memory in
182  *		this partition
183  * @offset_free_cached: offset to the first free byte of cached memory in this
184  *		partition
185  * @reserved:	for now reserved entries
186  */
187 struct smem_partition_header {
188 	u8 magic[4];
189 	__le16 host0;
190 	__le16 host1;
191 	__le32 size;
192 	__le32 offset_free_uncached;
193 	__le32 offset_free_cached;
194 	__le32 reserved[3];
195 };
196 
197 static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
198 
199 /**
200  * struct smem_private_entry - header of each item in the private partition
201  * @canary:	magic number, must be SMEM_PRIVATE_CANARY
202  * @item:	identifying number of the smem item
203  * @size:	size of the data, including padding bytes
204  * @padding_data: number of bytes of padding of data
205  * @padding_hdr: number of bytes of padding between the header and the data
206  * @reserved:	for now reserved entry
207  */
208 struct smem_private_entry {
209 	u16 canary; /* bytes are the same so no swapping needed */
210 	__le16 item;
211 	__le32 size; /* includes padding bytes */
212 	__le16 padding_data;
213 	__le16 padding_hdr;
214 	__le32 reserved;
215 };
216 #define SMEM_PRIVATE_CANARY	0xa5a5
217 
218 /**
219  * struct smem_region - representation of a chunk of memory used for smem
220  * @aux_base:	identifier of aux_mem base
221  * @virt_base:	virtual base address of memory with this aux_mem identifier
222  * @size:	size of the memory region
223  */
224 struct smem_region {
225 	u32 aux_base;
226 	void __iomem *virt_base;
227 	size_t size;
228 };
229 
230 /**
231  * struct qcom_smem - device data for the smem device
232  * @dev:	device pointer
233  * @hwlock:	reference to a hwspinlock
234  * @partitions:	list of pointers to partitions affecting the current
235  *		processor/host
236  * @cacheline:	list of cacheline sizes for each host
237  * @num_regions: number of @regions
238  * @regions:	list of the memory regions defining the shared memory
239  */
240 struct qcom_smem {
241 	struct device *dev;
242 
243 	struct hwspinlock *hwlock;
244 
245 	struct smem_partition_header *partitions[SMEM_HOST_COUNT];
246 	size_t cacheline[SMEM_HOST_COUNT];
247 
248 	unsigned num_regions;
249 	struct smem_region regions[0];
250 };
251 
252 static struct smem_private_entry *
253 phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
254 {
255 	void *p = phdr;
256 
257 	return p + le32_to_cpu(phdr->offset_free_uncached);
258 }
259 
260 static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr,
261 					size_t cacheline)
262 {
263 	void *p = phdr;
264 
265 	return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*phdr), cacheline);
266 }
267 
268 static void *phdr_to_last_cached_entry(struct smem_partition_header *phdr)
269 {
270 	void *p = phdr;
271 
272 	return p + le32_to_cpu(phdr->offset_free_cached);
273 }
274 
275 static struct smem_private_entry *
276 phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
277 {
278 	void *p = phdr;
279 
280 	return p + sizeof(*phdr);
281 }
282 
283 static struct smem_private_entry *
284 uncached_entry_next(struct smem_private_entry *e)
285 {
286 	void *p = e;
287 
288 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
289 	       le32_to_cpu(e->size);
290 }
291 
292 static struct smem_private_entry *
293 cached_entry_next(struct smem_private_entry *e, size_t cacheline)
294 {
295 	void *p = e;
296 
297 	return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
298 }
299 
300 static void *uncached_entry_to_item(struct smem_private_entry *e)
301 {
302 	void *p = e;
303 
304 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
305 }
306 
307 static void *cached_entry_to_item(struct smem_private_entry *e)
308 {
309 	void *p = e;
310 
311 	return p - le32_to_cpu(e->size);
312 }
313 
314 /* Pointer to the one and only smem handle */
315 static struct qcom_smem *__smem;
316 
317 /* Timeout (ms) for the trylock of remote spinlocks */
318 #define HWSPINLOCK_TIMEOUT	1000
319 
320 static int qcom_smem_alloc_private(struct qcom_smem *smem,
321 				   unsigned host,
322 				   unsigned item,
323 				   size_t size)
324 {
325 	struct smem_partition_header *phdr;
326 	struct smem_private_entry *hdr, *end;
327 	size_t alloc_size;
328 	void *cached;
329 
330 	phdr = smem->partitions[host];
331 	hdr = phdr_to_first_uncached_entry(phdr);
332 	end = phdr_to_last_uncached_entry(phdr);
333 	cached = phdr_to_last_cached_entry(phdr);
334 
335 	while (hdr < end) {
336 		if (hdr->canary != SMEM_PRIVATE_CANARY) {
337 			dev_err(smem->dev,
338 				"Found invalid canary in host %d partition\n",
339 				host);
340 			return -EINVAL;
341 		}
342 
343 		if (le16_to_cpu(hdr->item) == item)
344 			return -EEXIST;
345 
346 		hdr = uncached_entry_next(hdr);
347 	}
348 
349 	/* Check that we don't grow into the cached region */
350 	alloc_size = sizeof(*hdr) + ALIGN(size, 8);
351 	if ((void *)hdr + alloc_size >= cached) {
352 		dev_err(smem->dev, "Out of memory\n");
353 		return -ENOSPC;
354 	}
355 
356 	hdr->canary = SMEM_PRIVATE_CANARY;
357 	hdr->item = cpu_to_le16(item);
358 	hdr->size = cpu_to_le32(ALIGN(size, 8));
359 	hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
360 	hdr->padding_hdr = 0;
361 
362 	/*
363 	 * Ensure the header is written before we advance the free offset, so
364 	 * that remote processors that does not take the remote spinlock still
365 	 * gets a consistent view of the linked list.
366 	 */
367 	wmb();
368 	le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
369 
370 	return 0;
371 }
372 
373 static int qcom_smem_alloc_global(struct qcom_smem *smem,
374 				  unsigned item,
375 				  size_t size)
376 {
377 	struct smem_header *header;
378 	struct smem_global_entry *entry;
379 
380 	if (WARN_ON(item >= SMEM_ITEM_COUNT))
381 		return -EINVAL;
382 
383 	header = smem->regions[0].virt_base;
384 	entry = &header->toc[item];
385 	if (entry->allocated)
386 		return -EEXIST;
387 
388 	size = ALIGN(size, 8);
389 	if (WARN_ON(size > le32_to_cpu(header->available)))
390 		return -ENOMEM;
391 
392 	entry->offset = header->free_offset;
393 	entry->size = cpu_to_le32(size);
394 
395 	/*
396 	 * Ensure the header is consistent before we mark the item allocated,
397 	 * so that remote processors will get a consistent view of the item
398 	 * even though they do not take the spinlock on read.
399 	 */
400 	wmb();
401 	entry->allocated = cpu_to_le32(1);
402 
403 	le32_add_cpu(&header->free_offset, size);
404 	le32_add_cpu(&header->available, -size);
405 
406 	return 0;
407 }
408 
409 /**
410  * qcom_smem_alloc() - allocate space for a smem item
411  * @host:	remote processor id, or -1
412  * @item:	smem item handle
413  * @size:	number of bytes to be allocated
414  *
415  * Allocate space for a given smem item of size @size, given that the item is
416  * not yet allocated.
417  */
418 int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
419 {
420 	unsigned long flags;
421 	int ret;
422 
423 	if (!__smem)
424 		return -EPROBE_DEFER;
425 
426 	if (item < SMEM_ITEM_LAST_FIXED) {
427 		dev_err(__smem->dev,
428 			"Rejecting allocation of static entry %d\n", item);
429 		return -EINVAL;
430 	}
431 
432 	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
433 					  HWSPINLOCK_TIMEOUT,
434 					  &flags);
435 	if (ret)
436 		return ret;
437 
438 	if (host < SMEM_HOST_COUNT && __smem->partitions[host])
439 		ret = qcom_smem_alloc_private(__smem, host, item, size);
440 	else
441 		ret = qcom_smem_alloc_global(__smem, item, size);
442 
443 	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
444 
445 	return ret;
446 }
447 EXPORT_SYMBOL(qcom_smem_alloc);
448 
449 static void *qcom_smem_get_global(struct qcom_smem *smem,
450 				  unsigned item,
451 				  size_t *size)
452 {
453 	struct smem_header *header;
454 	struct smem_region *area;
455 	struct smem_global_entry *entry;
456 	u32 aux_base;
457 	unsigned i;
458 
459 	if (WARN_ON(item >= SMEM_ITEM_COUNT))
460 		return ERR_PTR(-EINVAL);
461 
462 	header = smem->regions[0].virt_base;
463 	entry = &header->toc[item];
464 	if (!entry->allocated)
465 		return ERR_PTR(-ENXIO);
466 
467 	aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
468 
469 	for (i = 0; i < smem->num_regions; i++) {
470 		area = &smem->regions[i];
471 
472 		if (area->aux_base == aux_base || !aux_base) {
473 			if (size != NULL)
474 				*size = le32_to_cpu(entry->size);
475 			return area->virt_base + le32_to_cpu(entry->offset);
476 		}
477 	}
478 
479 	return ERR_PTR(-ENOENT);
480 }
481 
482 static void *qcom_smem_get_private(struct qcom_smem *smem,
483 				   unsigned host,
484 				   unsigned item,
485 				   size_t *size)
486 {
487 	struct smem_partition_header *phdr;
488 	struct smem_private_entry *e, *end;
489 	size_t cacheline;
490 
491 	phdr = smem->partitions[host];
492 	cacheline = smem->cacheline[host];
493 
494 	e = phdr_to_first_uncached_entry(phdr);
495 	end = phdr_to_last_uncached_entry(phdr);
496 
497 	while (e < end) {
498 		if (e->canary != SMEM_PRIVATE_CANARY)
499 			goto invalid_canary;
500 
501 		if (le16_to_cpu(e->item) == item) {
502 			if (size != NULL)
503 				*size = le32_to_cpu(e->size) -
504 					le16_to_cpu(e->padding_data);
505 
506 			return uncached_entry_to_item(e);
507 		}
508 
509 		e = uncached_entry_next(e);
510 	}
511 
512 	/* Item was not found in the uncached list, search the cached list */
513 
514 	e = phdr_to_first_cached_entry(phdr, cacheline);
515 	end = phdr_to_last_cached_entry(phdr);
516 
517 	while (e > end) {
518 		if (e->canary != SMEM_PRIVATE_CANARY)
519 			goto invalid_canary;
520 
521 		if (le16_to_cpu(e->item) == item) {
522 			if (size != NULL)
523 				*size = le32_to_cpu(e->size) -
524 					le16_to_cpu(e->padding_data);
525 
526 			return cached_entry_to_item(e);
527 		}
528 
529 		e = cached_entry_next(e, cacheline);
530 	}
531 
532 	return ERR_PTR(-ENOENT);
533 
534 invalid_canary:
535 	dev_err(smem->dev, "Found invalid canary in host %d partition\n", host);
536 
537 	return ERR_PTR(-EINVAL);
538 }
539 
540 /**
541  * qcom_smem_get() - resolve ptr of size of a smem item
542  * @host:	the remote processor, or -1
543  * @item:	smem item handle
544  * @size:	pointer to be filled out with size of the item
545  *
546  * Looks up smem item and returns pointer to it. Size of smem
547  * item is returned in @size.
548  */
549 void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
550 {
551 	unsigned long flags;
552 	int ret;
553 	void *ptr = ERR_PTR(-EPROBE_DEFER);
554 
555 	if (!__smem)
556 		return ptr;
557 
558 	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
559 					  HWSPINLOCK_TIMEOUT,
560 					  &flags);
561 	if (ret)
562 		return ERR_PTR(ret);
563 
564 	if (host < SMEM_HOST_COUNT && __smem->partitions[host])
565 		ptr = qcom_smem_get_private(__smem, host, item, size);
566 	else
567 		ptr = qcom_smem_get_global(__smem, item, size);
568 
569 	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
570 
571 	return ptr;
572 
573 }
574 EXPORT_SYMBOL(qcom_smem_get);
575 
576 /**
577  * qcom_smem_get_free_space() - retrieve amount of free space in a partition
578  * @host:	the remote processor identifying a partition, or -1
579  *
580  * To be used by smem clients as a quick way to determine if any new
581  * allocations has been made.
582  */
583 int qcom_smem_get_free_space(unsigned host)
584 {
585 	struct smem_partition_header *phdr;
586 	struct smem_header *header;
587 	unsigned ret;
588 
589 	if (!__smem)
590 		return -EPROBE_DEFER;
591 
592 	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
593 		phdr = __smem->partitions[host];
594 		ret = le32_to_cpu(phdr->offset_free_cached) -
595 		      le32_to_cpu(phdr->offset_free_uncached);
596 	} else {
597 		header = __smem->regions[0].virt_base;
598 		ret = le32_to_cpu(header->available);
599 	}
600 
601 	return ret;
602 }
603 EXPORT_SYMBOL(qcom_smem_get_free_space);
604 
605 static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
606 {
607 	__le32 *versions;
608 	size_t size;
609 
610 	versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size);
611 	if (IS_ERR(versions)) {
612 		dev_err(smem->dev, "Unable to read the version item\n");
613 		return -ENOENT;
614 	}
615 
616 	if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) {
617 		dev_err(smem->dev, "Version item is too small\n");
618 		return -EINVAL;
619 	}
620 
621 	return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
622 }
623 
624 static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
625 					  unsigned local_host)
626 {
627 	struct smem_partition_header *header;
628 	struct smem_ptable_entry *entry;
629 	struct smem_ptable *ptable;
630 	unsigned remote_host;
631 	u32 version, host0, host1;
632 	int i;
633 
634 	ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
635 	if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
636 		return 0;
637 
638 	version = le32_to_cpu(ptable->version);
639 	if (version != 1) {
640 		dev_err(smem->dev,
641 			"Unsupported partition header version %d\n", version);
642 		return -EINVAL;
643 	}
644 
645 	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
646 		entry = &ptable->entry[i];
647 		host0 = le16_to_cpu(entry->host0);
648 		host1 = le16_to_cpu(entry->host1);
649 
650 		if (host0 != local_host && host1 != local_host)
651 			continue;
652 
653 		if (!le32_to_cpu(entry->offset))
654 			continue;
655 
656 		if (!le32_to_cpu(entry->size))
657 			continue;
658 
659 		if (host0 == local_host)
660 			remote_host = host1;
661 		else
662 			remote_host = host0;
663 
664 		if (remote_host >= SMEM_HOST_COUNT) {
665 			dev_err(smem->dev,
666 				"Invalid remote host %d\n",
667 				remote_host);
668 			return -EINVAL;
669 		}
670 
671 		if (smem->partitions[remote_host]) {
672 			dev_err(smem->dev,
673 				"Already found a partition for host %d\n",
674 				remote_host);
675 			return -EINVAL;
676 		}
677 
678 		header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
679 		host0 = le16_to_cpu(header->host0);
680 		host1 = le16_to_cpu(header->host1);
681 
682 		if (memcmp(header->magic, SMEM_PART_MAGIC,
683 			    sizeof(header->magic))) {
684 			dev_err(smem->dev,
685 				"Partition %d has invalid magic\n", i);
686 			return -EINVAL;
687 		}
688 
689 		if (host0 != local_host && host1 != local_host) {
690 			dev_err(smem->dev,
691 				"Partition %d hosts are invalid\n", i);
692 			return -EINVAL;
693 		}
694 
695 		if (host0 != remote_host && host1 != remote_host) {
696 			dev_err(smem->dev,
697 				"Partition %d hosts are invalid\n", i);
698 			return -EINVAL;
699 		}
700 
701 		if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
702 			dev_err(smem->dev,
703 				"Partition %d has invalid size\n", i);
704 			return -EINVAL;
705 		}
706 
707 		if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
708 			dev_err(smem->dev,
709 				"Partition %d has invalid free pointer\n", i);
710 			return -EINVAL;
711 		}
712 
713 		smem->partitions[remote_host] = header;
714 		smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
715 	}
716 
717 	return 0;
718 }
719 
720 static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
721 				const char *name, int i)
722 {
723 	struct device_node *np;
724 	struct resource r;
725 	int ret;
726 
727 	np = of_parse_phandle(dev->of_node, name, 0);
728 	if (!np) {
729 		dev_err(dev, "No %s specified\n", name);
730 		return -EINVAL;
731 	}
732 
733 	ret = of_address_to_resource(np, 0, &r);
734 	of_node_put(np);
735 	if (ret)
736 		return ret;
737 
738 	smem->regions[i].aux_base = (u32)r.start;
739 	smem->regions[i].size = resource_size(&r);
740 	smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, resource_size(&r));
741 	if (!smem->regions[i].virt_base)
742 		return -ENOMEM;
743 
744 	return 0;
745 }
746 
747 static int qcom_smem_probe(struct platform_device *pdev)
748 {
749 	struct smem_header *header;
750 	struct qcom_smem *smem;
751 	size_t array_size;
752 	int num_regions;
753 	int hwlock_id;
754 	u32 version;
755 	int ret;
756 
757 	num_regions = 1;
758 	if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
759 		num_regions++;
760 
761 	array_size = num_regions * sizeof(struct smem_region);
762 	smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
763 	if (!smem)
764 		return -ENOMEM;
765 
766 	smem->dev = &pdev->dev;
767 	smem->num_regions = num_regions;
768 
769 	ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
770 	if (ret)
771 		return ret;
772 
773 	if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
774 					"qcom,rpm-msg-ram", 1)))
775 		return ret;
776 
777 	header = smem->regions[0].virt_base;
778 	if (le32_to_cpu(header->initialized) != 1 ||
779 	    le32_to_cpu(header->reserved)) {
780 		dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
781 		return -EINVAL;
782 	}
783 
784 	version = qcom_smem_get_sbl_version(smem);
785 	if (version >> 16 != SMEM_EXPECTED_VERSION) {
786 		dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
787 		return -EINVAL;
788 	}
789 
790 	ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
791 	if (ret < 0)
792 		return ret;
793 
794 	hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
795 	if (hwlock_id < 0) {
796 		if (hwlock_id != -EPROBE_DEFER)
797 			dev_err(&pdev->dev, "failed to retrieve hwlock\n");
798 		return hwlock_id;
799 	}
800 
801 	smem->hwlock = hwspin_lock_request_specific(hwlock_id);
802 	if (!smem->hwlock)
803 		return -ENXIO;
804 
805 	__smem = smem;
806 
807 	return 0;
808 }
809 
810 static int qcom_smem_remove(struct platform_device *pdev)
811 {
812 	hwspin_lock_free(__smem->hwlock);
813 	__smem = NULL;
814 
815 	return 0;
816 }
817 
818 static const struct of_device_id qcom_smem_of_match[] = {
819 	{ .compatible = "qcom,smem" },
820 	{}
821 };
822 MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
823 
824 static struct platform_driver qcom_smem_driver = {
825 	.probe = qcom_smem_probe,
826 	.remove = qcom_smem_remove,
827 	.driver  = {
828 		.name = "qcom-smem",
829 		.of_match_table = qcom_smem_of_match,
830 		.suppress_bind_attrs = true,
831 	},
832 };
833 
834 static int __init qcom_smem_init(void)
835 {
836 	return platform_driver_register(&qcom_smem_driver);
837 }
838 arch_initcall(qcom_smem_init);
839 
840 static void __exit qcom_smem_exit(void)
841 {
842 	platform_driver_unregister(&qcom_smem_driver);
843 }
844 module_exit(qcom_smem_exit)
845 
846 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
847 MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
848 MODULE_LICENSE("GPL v2");
849