xref: /openbmc/linux/drivers/soc/qcom/smp2p.c (revision 5efb685b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, Sony Mobile Communications AB.
4  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
5  */
6 
7 #include <linux/interrupt.h>
8 #include <linux/list.h>
9 #include <linux/io.h>
10 #include <linux/of.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/mailbox_client.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_wakeirq.h>
18 #include <linux/regmap.h>
19 #include <linux/soc/qcom/smem.h>
20 #include <linux/soc/qcom/smem_state.h>
21 #include <linux/spinlock.h>
22 
23 /*
24  * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
25  * of a single 32-bit value between two processors.  Each value has a single
26  * writer (the local side) and a single reader (the remote side). Values are
27  * uniquely identified in the system by the directed edge (local processor ID
28  * to remote processor ID) and a string identifier.
29  *
30  * Each processor is responsible for creating the outgoing SMEM items and each
31  * item is writable by the local processor and readable by the remote
32  * processor.  By using two separate SMEM items that are single-reader and
33  * single-writer, SMP2P does not require any remote locking mechanisms.
34  *
35  * The driver uses the Linux GPIO and interrupt framework to expose a virtual
36  * GPIO for each outbound entry and a virtual interrupt controller for each
37  * inbound entry.
38  */
39 
40 #define SMP2P_MAX_ENTRY 16
41 #define SMP2P_MAX_ENTRY_NAME 16
42 
43 #define SMP2P_FEATURE_SSR_ACK 0x1
44 #define SMP2P_FLAGS_RESTART_DONE_BIT 0
45 #define SMP2P_FLAGS_RESTART_ACK_BIT 1
46 
47 #define SMP2P_MAGIC 0x504d5324
48 #define SMP2P_ALL_FEATURES	SMP2P_FEATURE_SSR_ACK
49 
50 /**
51  * struct smp2p_smem_item - in memory communication structure
52  * @magic:		magic number
53  * @version:		version - must be 1
54  * @features:		features flag - currently unused
55  * @local_pid:		processor id of sending end
56  * @remote_pid:		processor id of receiving end
57  * @total_entries:	number of entries - always SMP2P_MAX_ENTRY
58  * @valid_entries:	number of allocated entries
59  * @flags:
60  * @entries:		individual communication entries
61  *     @name:		name of the entry
62  *     @value:		content of the entry
63  */
64 struct smp2p_smem_item {
65 	u32 magic;
66 	u8 version;
67 	unsigned features:24;
68 	u16 local_pid;
69 	u16 remote_pid;
70 	u16 total_entries;
71 	u16 valid_entries;
72 	u32 flags;
73 
74 	struct {
75 		u8 name[SMP2P_MAX_ENTRY_NAME];
76 		u32 value;
77 	} entries[SMP2P_MAX_ENTRY];
78 } __packed;
79 
80 /**
81  * struct smp2p_entry - driver context matching one entry
82  * @node:	list entry to keep track of allocated entries
83  * @smp2p:	reference to the device driver context
84  * @name:	name of the entry, to match against smp2p_smem_item
85  * @value:	pointer to smp2p_smem_item entry value
86  * @last_value:	last handled value
87  * @domain:	irq_domain for inbound entries
88  * @irq_enabled:bitmap to track enabled irq bits
89  * @irq_rising:	bitmap to mark irq bits for rising detection
90  * @irq_falling:bitmap to mark irq bits for falling detection
91  * @state:	smem state handle
92  * @lock:	spinlock to protect read-modify-write of the value
93  */
94 struct smp2p_entry {
95 	struct list_head node;
96 	struct qcom_smp2p *smp2p;
97 
98 	const char *name;
99 	u32 *value;
100 	u32 last_value;
101 
102 	struct irq_domain *domain;
103 	DECLARE_BITMAP(irq_enabled, 32);
104 	DECLARE_BITMAP(irq_rising, 32);
105 	DECLARE_BITMAP(irq_falling, 32);
106 
107 	struct qcom_smem_state *state;
108 
109 	spinlock_t lock;
110 };
111 
112 #define SMP2P_INBOUND	0
113 #define SMP2P_OUTBOUND	1
114 
115 /**
116  * struct qcom_smp2p - device driver context
117  * @dev:	device driver handle
118  * @in:		pointer to the inbound smem item
119  * @out:	pointer to the outbound smem item
120  * @smem_items:	ids of the two smem items
121  * @valid_entries: already scanned inbound entries
122  * @ssr_ack_enabled: SMP2P_FEATURE_SSR_ACK feature is supported and was enabled
123  * @ssr_ack: current cached state of the local ack bit
124  * @negotiation_done: whether negotiating finished
125  * @local_pid:	processor id of the inbound edge
126  * @remote_pid:	processor id of the outbound edge
127  * @ipc_regmap:	regmap for the outbound ipc
128  * @ipc_offset:	offset within the regmap
129  * @ipc_bit:	bit in regmap@offset to kick to signal remote processor
130  * @mbox_client: mailbox client handle
131  * @mbox_chan:	apcs ipc mailbox channel handle
132  * @inbound:	list of inbound entries
133  * @outbound:	list of outbound entries
134  */
135 struct qcom_smp2p {
136 	struct device *dev;
137 
138 	struct smp2p_smem_item *in;
139 	struct smp2p_smem_item *out;
140 
141 	unsigned smem_items[SMP2P_OUTBOUND + 1];
142 
143 	unsigned valid_entries;
144 
145 	bool ssr_ack_enabled;
146 	bool ssr_ack;
147 	bool negotiation_done;
148 
149 	unsigned local_pid;
150 	unsigned remote_pid;
151 
152 	struct regmap *ipc_regmap;
153 	int ipc_offset;
154 	int ipc_bit;
155 
156 	struct mbox_client mbox_client;
157 	struct mbox_chan *mbox_chan;
158 
159 	struct list_head inbound;
160 	struct list_head outbound;
161 };
162 
163 static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
164 {
165 	/* Make sure any updated data is written before the kick */
166 	wmb();
167 
168 	if (smp2p->mbox_chan) {
169 		mbox_send_message(smp2p->mbox_chan, NULL);
170 		mbox_client_txdone(smp2p->mbox_chan, 0);
171 	} else {
172 		regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
173 	}
174 }
175 
176 static bool qcom_smp2p_check_ssr(struct qcom_smp2p *smp2p)
177 {
178 	struct smp2p_smem_item *in = smp2p->in;
179 	bool restart;
180 
181 	if (!smp2p->ssr_ack_enabled)
182 		return false;
183 
184 	restart = in->flags & BIT(SMP2P_FLAGS_RESTART_DONE_BIT);
185 
186 	return restart != smp2p->ssr_ack;
187 }
188 
189 static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p)
190 {
191 	struct smp2p_smem_item *out = smp2p->out;
192 	u32 val;
193 
194 	smp2p->ssr_ack = !smp2p->ssr_ack;
195 
196 	val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
197 	if (smp2p->ssr_ack)
198 		val |= BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
199 	out->flags = val;
200 
201 	qcom_smp2p_kick(smp2p);
202 }
203 
204 static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p)
205 {
206 	struct smp2p_smem_item *out = smp2p->out;
207 	struct smp2p_smem_item *in = smp2p->in;
208 
209 	if (in->version == out->version) {
210 		out->features &= in->features;
211 
212 		if (out->features & SMP2P_FEATURE_SSR_ACK)
213 			smp2p->ssr_ack_enabled = true;
214 
215 		smp2p->negotiation_done = true;
216 	}
217 }
218 
219 static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
220 {
221 	struct smp2p_smem_item *in;
222 	struct smp2p_entry *entry;
223 	int irq_pin;
224 	u32 status;
225 	char buf[SMP2P_MAX_ENTRY_NAME];
226 	u32 val;
227 	int i;
228 
229 	in = smp2p->in;
230 
231 	/* Match newly created entries */
232 	for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
233 		list_for_each_entry(entry, &smp2p->inbound, node) {
234 			memcpy(buf, in->entries[i].name, sizeof(buf));
235 			if (!strcmp(buf, entry->name)) {
236 				entry->value = &in->entries[i].value;
237 				break;
238 			}
239 		}
240 	}
241 	smp2p->valid_entries = i;
242 
243 	/* Fire interrupts based on any value changes */
244 	list_for_each_entry(entry, &smp2p->inbound, node) {
245 		/* Ignore entries not yet allocated by the remote side */
246 		if (!entry->value)
247 			continue;
248 
249 		val = readl(entry->value);
250 
251 		status = val ^ entry->last_value;
252 		entry->last_value = val;
253 
254 		/* No changes of this entry? */
255 		if (!status)
256 			continue;
257 
258 		for_each_set_bit(i, entry->irq_enabled, 32) {
259 			if (!(status & BIT(i)))
260 				continue;
261 
262 			if ((val & BIT(i) && test_bit(i, entry->irq_rising)) ||
263 			    (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) {
264 				irq_pin = irq_find_mapping(entry->domain, i);
265 				handle_nested_irq(irq_pin);
266 			}
267 		}
268 	}
269 }
270 
271 /**
272  * qcom_smp2p_intr() - interrupt handler for incoming notifications
273  * @irq:	unused
274  * @data:	smp2p driver context
275  *
276  * Handle notifications from the remote side to handle newly allocated entries
277  * or any changes to the state bits of existing entries.
278  */
279 static irqreturn_t qcom_smp2p_intr(int irq, void *data)
280 {
281 	struct smp2p_smem_item *in;
282 	struct qcom_smp2p *smp2p = data;
283 	unsigned int smem_id = smp2p->smem_items[SMP2P_INBOUND];
284 	unsigned int pid = smp2p->remote_pid;
285 	bool ack_restart;
286 	size_t size;
287 
288 	in = smp2p->in;
289 
290 	/* Acquire smem item, if not already found */
291 	if (!in) {
292 		in = qcom_smem_get(pid, smem_id, &size);
293 		if (IS_ERR(in)) {
294 			dev_err(smp2p->dev,
295 				"Unable to acquire remote smp2p item\n");
296 			goto out;
297 		}
298 
299 		smp2p->in = in;
300 	}
301 
302 	if (!smp2p->negotiation_done)
303 		qcom_smp2p_negotiate(smp2p);
304 
305 	if (smp2p->negotiation_done) {
306 		ack_restart = qcom_smp2p_check_ssr(smp2p);
307 		qcom_smp2p_notify_in(smp2p);
308 
309 		if (ack_restart)
310 			qcom_smp2p_do_ssr_ack(smp2p);
311 	}
312 
313 out:
314 	return IRQ_HANDLED;
315 }
316 
317 static void smp2p_mask_irq(struct irq_data *irqd)
318 {
319 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
320 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
321 
322 	clear_bit(irq, entry->irq_enabled);
323 }
324 
325 static void smp2p_unmask_irq(struct irq_data *irqd)
326 {
327 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
328 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
329 
330 	set_bit(irq, entry->irq_enabled);
331 }
332 
333 static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
334 {
335 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
336 	irq_hw_number_t irq = irqd_to_hwirq(irqd);
337 
338 	if (!(type & IRQ_TYPE_EDGE_BOTH))
339 		return -EINVAL;
340 
341 	if (type & IRQ_TYPE_EDGE_RISING)
342 		set_bit(irq, entry->irq_rising);
343 	else
344 		clear_bit(irq, entry->irq_rising);
345 
346 	if (type & IRQ_TYPE_EDGE_FALLING)
347 		set_bit(irq, entry->irq_falling);
348 	else
349 		clear_bit(irq, entry->irq_falling);
350 
351 	return 0;
352 }
353 
354 static struct irq_chip smp2p_irq_chip = {
355 	.name           = "smp2p",
356 	.irq_mask       = smp2p_mask_irq,
357 	.irq_unmask     = smp2p_unmask_irq,
358 	.irq_set_type	= smp2p_set_irq_type,
359 };
360 
361 static int smp2p_irq_map(struct irq_domain *d,
362 			 unsigned int irq,
363 			 irq_hw_number_t hw)
364 {
365 	struct smp2p_entry *entry = d->host_data;
366 
367 	irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq);
368 	irq_set_chip_data(irq, entry);
369 	irq_set_nested_thread(irq, 1);
370 	irq_set_noprobe(irq);
371 
372 	return 0;
373 }
374 
375 static const struct irq_domain_ops smp2p_irq_ops = {
376 	.map = smp2p_irq_map,
377 	.xlate = irq_domain_xlate_twocell,
378 };
379 
380 static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
381 				    struct smp2p_entry *entry,
382 				    struct device_node *node)
383 {
384 	entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry);
385 	if (!entry->domain) {
386 		dev_err(smp2p->dev, "failed to add irq_domain\n");
387 		return -ENOMEM;
388 	}
389 
390 	return 0;
391 }
392 
393 static int smp2p_update_bits(void *data, u32 mask, u32 value)
394 {
395 	struct smp2p_entry *entry = data;
396 	unsigned long flags;
397 	u32 orig;
398 	u32 val;
399 
400 	spin_lock_irqsave(&entry->lock, flags);
401 	val = orig = readl(entry->value);
402 	val &= ~mask;
403 	val |= value;
404 	writel(val, entry->value);
405 	spin_unlock_irqrestore(&entry->lock, flags);
406 
407 	if (val != orig)
408 		qcom_smp2p_kick(entry->smp2p);
409 
410 	return 0;
411 }
412 
413 static const struct qcom_smem_state_ops smp2p_state_ops = {
414 	.update_bits = smp2p_update_bits,
415 };
416 
417 static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
418 				     struct smp2p_entry *entry,
419 				     struct device_node *node)
420 {
421 	struct smp2p_smem_item *out = smp2p->out;
422 	char buf[SMP2P_MAX_ENTRY_NAME] = {};
423 
424 	/* Allocate an entry from the smem item */
425 	strscpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
426 	memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
427 
428 	/* Make the logical entry reference the physical value */
429 	entry->value = &out->entries[out->valid_entries].value;
430 
431 	out->valid_entries++;
432 
433 	entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
434 	if (IS_ERR(entry->state)) {
435 		dev_err(smp2p->dev, "failed to register qcom_smem_state\n");
436 		return PTR_ERR(entry->state);
437 	}
438 
439 	return 0;
440 }
441 
442 static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
443 {
444 	struct smp2p_smem_item *out;
445 	unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND];
446 	unsigned pid = smp2p->remote_pid;
447 	int ret;
448 
449 	ret = qcom_smem_alloc(pid, smem_id, sizeof(*out));
450 	if (ret < 0 && ret != -EEXIST) {
451 		if (ret != -EPROBE_DEFER)
452 			dev_err(smp2p->dev,
453 				"unable to allocate local smp2p item\n");
454 		return ret;
455 	}
456 
457 	out = qcom_smem_get(pid, smem_id, NULL);
458 	if (IS_ERR(out)) {
459 		dev_err(smp2p->dev, "Unable to acquire local smp2p item\n");
460 		return PTR_ERR(out);
461 	}
462 
463 	memset(out, 0, sizeof(*out));
464 	out->magic = SMP2P_MAGIC;
465 	out->local_pid = smp2p->local_pid;
466 	out->remote_pid = smp2p->remote_pid;
467 	out->total_entries = SMP2P_MAX_ENTRY;
468 	out->valid_entries = 0;
469 	out->features = SMP2P_ALL_FEATURES;
470 
471 	/*
472 	 * Make sure the rest of the header is written before we validate the
473 	 * item by writing a valid version number.
474 	 */
475 	wmb();
476 	out->version = 1;
477 
478 	qcom_smp2p_kick(smp2p);
479 
480 	smp2p->out = out;
481 
482 	return 0;
483 }
484 
485 static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
486 {
487 	struct device_node *syscon;
488 	struct device *dev = smp2p->dev;
489 	const char *key;
490 	int ret;
491 
492 	syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0);
493 	if (!syscon) {
494 		dev_err(dev, "no qcom,ipc node\n");
495 		return -ENODEV;
496 	}
497 
498 	smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
499 	of_node_put(syscon);
500 	if (IS_ERR(smp2p->ipc_regmap))
501 		return PTR_ERR(smp2p->ipc_regmap);
502 
503 	key = "qcom,ipc";
504 	ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset);
505 	if (ret < 0) {
506 		dev_err(dev, "no offset in %s\n", key);
507 		return -EINVAL;
508 	}
509 
510 	ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit);
511 	if (ret < 0) {
512 		dev_err(dev, "no bit in %s\n", key);
513 		return -EINVAL;
514 	}
515 
516 	return 0;
517 }
518 
519 static int qcom_smp2p_probe(struct platform_device *pdev)
520 {
521 	struct smp2p_entry *entry;
522 	struct device_node *node;
523 	struct qcom_smp2p *smp2p;
524 	const char *key;
525 	int irq;
526 	int ret;
527 
528 	smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL);
529 	if (!smp2p)
530 		return -ENOMEM;
531 
532 	smp2p->dev = &pdev->dev;
533 	INIT_LIST_HEAD(&smp2p->inbound);
534 	INIT_LIST_HEAD(&smp2p->outbound);
535 
536 	platform_set_drvdata(pdev, smp2p);
537 
538 	key = "qcom,smem";
539 	ret = of_property_read_u32_array(pdev->dev.of_node, key,
540 					 smp2p->smem_items, 2);
541 	if (ret)
542 		return ret;
543 
544 	key = "qcom,local-pid";
545 	ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid);
546 	if (ret)
547 		goto report_read_failure;
548 
549 	key = "qcom,remote-pid";
550 	ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid);
551 	if (ret)
552 		goto report_read_failure;
553 
554 	irq = platform_get_irq(pdev, 0);
555 	if (irq < 0)
556 		return irq;
557 
558 	smp2p->mbox_client.dev = &pdev->dev;
559 	smp2p->mbox_client.knows_txdone = true;
560 	smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0);
561 	if (IS_ERR(smp2p->mbox_chan)) {
562 		if (PTR_ERR(smp2p->mbox_chan) != -ENODEV)
563 			return PTR_ERR(smp2p->mbox_chan);
564 
565 		smp2p->mbox_chan = NULL;
566 
567 		ret = smp2p_parse_ipc(smp2p);
568 		if (ret)
569 			return ret;
570 	}
571 
572 	ret = qcom_smp2p_alloc_outbound_item(smp2p);
573 	if (ret < 0)
574 		goto release_mbox;
575 
576 	for_each_available_child_of_node(pdev->dev.of_node, node) {
577 		entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
578 		if (!entry) {
579 			ret = -ENOMEM;
580 			of_node_put(node);
581 			goto unwind_interfaces;
582 		}
583 
584 		entry->smp2p = smp2p;
585 		spin_lock_init(&entry->lock);
586 
587 		ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
588 		if (ret < 0) {
589 			of_node_put(node);
590 			goto unwind_interfaces;
591 		}
592 
593 		if (of_property_read_bool(node, "interrupt-controller")) {
594 			ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
595 			if (ret < 0) {
596 				of_node_put(node);
597 				goto unwind_interfaces;
598 			}
599 
600 			list_add(&entry->node, &smp2p->inbound);
601 		} else  {
602 			ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
603 			if (ret < 0) {
604 				of_node_put(node);
605 				goto unwind_interfaces;
606 			}
607 
608 			list_add(&entry->node, &smp2p->outbound);
609 		}
610 	}
611 
612 	/* Kick the outgoing edge after allocating entries */
613 	qcom_smp2p_kick(smp2p);
614 
615 	ret = devm_request_threaded_irq(&pdev->dev, irq,
616 					NULL, qcom_smp2p_intr,
617 					IRQF_ONESHOT,
618 					"smp2p", (void *)smp2p);
619 	if (ret) {
620 		dev_err(&pdev->dev, "failed to request interrupt\n");
621 		goto unwind_interfaces;
622 	}
623 
624 	/*
625 	 * Treat smp2p interrupt as wakeup source, but keep it disabled
626 	 * by default. User space can decide enabling it depending on its
627 	 * use cases. For example if remoteproc crashes and device wants
628 	 * to handle it immediatedly (e.g. to not miss phone calls) it can
629 	 * enable wakeup source from user space, while other devices which
630 	 * do not have proper autosleep feature may want to handle it with
631 	 * other wakeup events (e.g. Power button) instead waking up immediately.
632 	 */
633 	device_set_wakeup_capable(&pdev->dev, true);
634 
635 	ret = dev_pm_set_wake_irq(&pdev->dev, irq);
636 	if (ret)
637 		goto set_wake_irq_fail;
638 
639 	return 0;
640 
641 set_wake_irq_fail:
642 	dev_pm_clear_wake_irq(&pdev->dev);
643 
644 unwind_interfaces:
645 	list_for_each_entry(entry, &smp2p->inbound, node)
646 		irq_domain_remove(entry->domain);
647 
648 	list_for_each_entry(entry, &smp2p->outbound, node)
649 		qcom_smem_state_unregister(entry->state);
650 
651 	smp2p->out->valid_entries = 0;
652 
653 release_mbox:
654 	mbox_free_channel(smp2p->mbox_chan);
655 
656 	return ret;
657 
658 report_read_failure:
659 	dev_err(&pdev->dev, "failed to read %s\n", key);
660 	return -EINVAL;
661 }
662 
663 static int qcom_smp2p_remove(struct platform_device *pdev)
664 {
665 	struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
666 	struct smp2p_entry *entry;
667 
668 	dev_pm_clear_wake_irq(&pdev->dev);
669 
670 	list_for_each_entry(entry, &smp2p->inbound, node)
671 		irq_domain_remove(entry->domain);
672 
673 	list_for_each_entry(entry, &smp2p->outbound, node)
674 		qcom_smem_state_unregister(entry->state);
675 
676 	mbox_free_channel(smp2p->mbox_chan);
677 
678 	smp2p->out->valid_entries = 0;
679 
680 	return 0;
681 }
682 
683 static const struct of_device_id qcom_smp2p_of_match[] = {
684 	{ .compatible = "qcom,smp2p" },
685 	{}
686 };
687 MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match);
688 
689 static struct platform_driver qcom_smp2p_driver = {
690 	.probe = qcom_smp2p_probe,
691 	.remove = qcom_smp2p_remove,
692 	.driver  = {
693 		.name  = "qcom_smp2p",
694 		.of_match_table = qcom_smp2p_of_match,
695 	},
696 };
697 module_platform_driver(qcom_smp2p_driver);
698 
699 MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
700 MODULE_LICENSE("GPL v2");
701