1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TI K3 DSP Remote Processor(s) driver
4  *
5  * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
6  *	Suman Anna <s-anna@ti.com>
7  */
8 
9 #include <linux/io.h>
10 #include <linux/mailbox_client.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/of_reserved_mem.h>
14 #include <linux/omap-mailbox.h>
15 #include <linux/platform_device.h>
16 #include <linux/remoteproc.h>
17 #include <linux/reset.h>
18 #include <linux/slab.h>
19 
20 #include "omap_remoteproc.h"
21 #include "remoteproc_internal.h"
22 #include "ti_sci_proc.h"
23 
24 #define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK	(SZ_16M - 1)
25 
26 /**
27  * struct k3_dsp_mem - internal memory structure
28  * @cpu_addr: MPU virtual address of the memory region
29  * @bus_addr: Bus address used to access the memory region
30  * @dev_addr: Device address of the memory region from DSP view
31  * @size: Size of the memory region
32  */
33 struct k3_dsp_mem {
34 	void __iomem *cpu_addr;
35 	phys_addr_t bus_addr;
36 	u32 dev_addr;
37 	size_t size;
38 };
39 
40 /**
41  * struct k3_dsp_mem_data - memory definitions for a DSP
42  * @name: name for this memory entry
43  * @dev_addr: device address for the memory entry
44  */
45 struct k3_dsp_mem_data {
46 	const char *name;
47 	const u32 dev_addr;
48 };
49 
50 /**
51  * struct k3_dsp_dev_data - device data structure for a DSP
52  * @mems: pointer to memory definitions for a DSP
53  * @num_mems: number of memory regions in @mems
54  * @boot_align_addr: boot vector address alignment granularity
55  * @uses_lreset: flag to denote the need for local reset management
56  */
57 struct k3_dsp_dev_data {
58 	const struct k3_dsp_mem_data *mems;
59 	u32 num_mems;
60 	u32 boot_align_addr;
61 	bool uses_lreset;
62 };
63 
64 /**
65  * struct k3_dsp_rproc - k3 DSP remote processor driver structure
66  * @dev: cached device pointer
67  * @rproc: remoteproc device handle
68  * @mem: internal memory regions data
69  * @num_mems: number of internal memory regions
70  * @rmem: reserved memory regions data
71  * @num_rmems: number of reserved memory regions
72  * @reset: reset control handle
73  * @data: pointer to DSP-specific device data
74  * @tsp: TI-SCI processor control handle
75  * @ti_sci: TI-SCI handle
76  * @ti_sci_id: TI-SCI device identifier
77  * @mbox: mailbox channel handle
78  * @client: mailbox client to request the mailbox channel
79  */
80 struct k3_dsp_rproc {
81 	struct device *dev;
82 	struct rproc *rproc;
83 	struct k3_dsp_mem *mem;
84 	int num_mems;
85 	struct k3_dsp_mem *rmem;
86 	int num_rmems;
87 	struct reset_control *reset;
88 	const struct k3_dsp_dev_data *data;
89 	struct ti_sci_proc *tsp;
90 	const struct ti_sci_handle *ti_sci;
91 	u32 ti_sci_id;
92 	struct mbox_chan *mbox;
93 	struct mbox_client client;
94 };
95 
96 /**
97  * k3_dsp_rproc_mbox_callback() - inbound mailbox message handler
98  * @client: mailbox client pointer used for requesting the mailbox channel
99  * @data: mailbox payload
100  *
101  * This handler is invoked by the OMAP mailbox driver whenever a mailbox
102  * message is received. Usually, the mailbox payload simply contains
103  * the index of the virtqueue that is kicked by the remote processor,
104  * and we let remoteproc core handle it.
105  *
106  * In addition to virtqueue indices, we also have some out-of-band values
107  * that indicate different events. Those values are deliberately very
108  * large so they don't coincide with virtqueue indices.
109  */
110 static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
111 {
112 	struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc,
113 						  client);
114 	struct device *dev = kproc->rproc->dev.parent;
115 	const char *name = kproc->rproc->name;
116 	u32 msg = omap_mbox_message(data);
117 
118 	dev_dbg(dev, "mbox msg: 0x%x\n", msg);
119 
120 	switch (msg) {
121 	case RP_MBOX_CRASH:
122 		/*
123 		 * remoteproc detected an exception, but error recovery is not
124 		 * supported. So, just log this for now
125 		 */
126 		dev_err(dev, "K3 DSP rproc %s crashed\n", name);
127 		break;
128 	case RP_MBOX_ECHO_REPLY:
129 		dev_info(dev, "received echo reply from %s\n", name);
130 		break;
131 	default:
132 		/* silently handle all other valid messages */
133 		if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
134 			return;
135 		if (msg > kproc->rproc->max_notifyid) {
136 			dev_dbg(dev, "dropping unknown message 0x%x", msg);
137 			return;
138 		}
139 		/* msg contains the index of the triggered vring */
140 		if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
141 			dev_dbg(dev, "no message was found in vqid %d\n", msg);
142 	}
143 }
144 
145 /*
146  * Kick the remote processor to notify about pending unprocessed messages.
147  * The vqid usage is not used and is inconsequential, as the kick is performed
148  * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
149  * the remote processor is expected to process both its Tx and Rx virtqueues.
150  */
151 static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
152 {
153 	struct k3_dsp_rproc *kproc = rproc->priv;
154 	struct device *dev = rproc->dev.parent;
155 	mbox_msg_t msg = (mbox_msg_t)vqid;
156 	int ret;
157 
158 	/* send the index of the triggered virtqueue in the mailbox payload */
159 	ret = mbox_send_message(kproc->mbox, (void *)msg);
160 	if (ret < 0)
161 		dev_err(dev, "failed to send mailbox message, status = %d\n",
162 			ret);
163 }
164 
165 /* Put the DSP processor into reset */
166 static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
167 {
168 	struct device *dev = kproc->dev;
169 	int ret;
170 
171 	ret = reset_control_assert(kproc->reset);
172 	if (ret) {
173 		dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
174 		return ret;
175 	}
176 
177 	if (kproc->data->uses_lreset)
178 		return ret;
179 
180 	ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
181 						    kproc->ti_sci_id);
182 	if (ret) {
183 		dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
184 		if (reset_control_deassert(kproc->reset))
185 			dev_warn(dev, "local-reset deassert back failed\n");
186 	}
187 
188 	return ret;
189 }
190 
191 /* Release the DSP processor from reset */
192 static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
193 {
194 	struct device *dev = kproc->dev;
195 	int ret;
196 
197 	if (kproc->data->uses_lreset)
198 		goto lreset;
199 
200 	ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
201 						    kproc->ti_sci_id);
202 	if (ret) {
203 		dev_err(dev, "module-reset deassert failed, ret = %d\n", ret);
204 		return ret;
205 	}
206 
207 lreset:
208 	ret = reset_control_deassert(kproc->reset);
209 	if (ret) {
210 		dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
211 		if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
212 							  kproc->ti_sci_id))
213 			dev_warn(dev, "module-reset assert back failed\n");
214 	}
215 
216 	return ret;
217 }
218 
219 /*
220  * The C66x DSP cores have a local reset that affects only the CPU, and a
221  * generic module reset that powers on the device and allows the DSP internal
222  * memories to be accessed while the local reset is asserted. This function is
223  * used to release the global reset on C66x DSPs to allow loading into the DSP
224  * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
225  * firmware loading, and is followed by the .start() ops after loading to
226  * actually let the C66x DSP cores run.
227  */
228 static int k3_dsp_rproc_prepare(struct rproc *rproc)
229 {
230 	struct k3_dsp_rproc *kproc = rproc->priv;
231 	struct device *dev = kproc->dev;
232 	int ret;
233 
234 	ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
235 						    kproc->ti_sci_id);
236 	if (ret)
237 		dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading, ret = %d\n",
238 			ret);
239 
240 	return ret;
241 }
242 
243 /*
244  * This function implements the .unprepare() ops and performs the complimentary
245  * operations to that of the .prepare() ops. The function is used to assert the
246  * global reset on applicable C66x cores. This completes the second portion of
247  * powering down the C66x DSP cores. The cores themselves are only halted in the
248  * .stop() callback through the local reset, and the .unprepare() ops is invoked
249  * by the remoteproc core after the remoteproc is stopped to balance the global
250  * reset.
251  */
252 static int k3_dsp_rproc_unprepare(struct rproc *rproc)
253 {
254 	struct k3_dsp_rproc *kproc = rproc->priv;
255 	struct device *dev = kproc->dev;
256 	int ret;
257 
258 	ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
259 						    kproc->ti_sci_id);
260 	if (ret)
261 		dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
262 
263 	return ret;
264 }
265 
266 /*
267  * Power up the DSP remote processor.
268  *
269  * This function will be invoked only after the firmware for this rproc
270  * was loaded, parsed successfully, and all of its resource requirements
271  * were met.
272  */
273 static int k3_dsp_rproc_start(struct rproc *rproc)
274 {
275 	struct k3_dsp_rproc *kproc = rproc->priv;
276 	struct mbox_client *client = &kproc->client;
277 	struct device *dev = kproc->dev;
278 	u32 boot_addr;
279 	int ret;
280 
281 	client->dev = dev;
282 	client->tx_done = NULL;
283 	client->rx_callback = k3_dsp_rproc_mbox_callback;
284 	client->tx_block = false;
285 	client->knows_txdone = false;
286 
287 	kproc->mbox = mbox_request_channel(client, 0);
288 	if (IS_ERR(kproc->mbox)) {
289 		ret = -EBUSY;
290 		dev_err(dev, "mbox_request_channel failed: %ld\n",
291 			PTR_ERR(kproc->mbox));
292 		return ret;
293 	}
294 
295 	/*
296 	 * Ping the remote processor, this is only for sanity-sake for now;
297 	 * there is no functional effect whatsoever.
298 	 *
299 	 * Note that the reply will _not_ arrive immediately: this message
300 	 * will wait in the mailbox fifo until the remote processor is booted.
301 	 */
302 	ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
303 	if (ret < 0) {
304 		dev_err(dev, "mbox_send_message failed: %d\n", ret);
305 		goto put_mbox;
306 	}
307 
308 	boot_addr = rproc->bootaddr;
309 	if (boot_addr & (kproc->data->boot_align_addr - 1)) {
310 		dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n",
311 			boot_addr, kproc->data->boot_align_addr);
312 		ret = -EINVAL;
313 		goto put_mbox;
314 	}
315 
316 	dev_err(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
317 	ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
318 	if (ret)
319 		goto put_mbox;
320 
321 	ret = k3_dsp_rproc_release(kproc);
322 	if (ret)
323 		goto put_mbox;
324 
325 	return 0;
326 
327 put_mbox:
328 	mbox_free_channel(kproc->mbox);
329 	return ret;
330 }
331 
332 /*
333  * Stop the DSP remote processor.
334  *
335  * This function puts the DSP processor into reset, and finishes processing
336  * of any pending messages.
337  */
338 static int k3_dsp_rproc_stop(struct rproc *rproc)
339 {
340 	struct k3_dsp_rproc *kproc = rproc->priv;
341 
342 	mbox_free_channel(kproc->mbox);
343 
344 	k3_dsp_rproc_reset(kproc);
345 
346 	return 0;
347 }
348 
349 /*
350  * Custom function to translate a DSP device address (internal RAMs only) to a
351  * kernel virtual address.  The DSPs can access their RAMs at either an internal
352  * address visible only from a DSP, or at the SoC-level bus address. Both these
353  * addresses need to be looked through for translation. The translated addresses
354  * can be used either by the remoteproc core for loading (when using kernel
355  * remoteproc loader), or by any rpmsg bus drivers.
356  */
357 static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
358 {
359 	struct k3_dsp_rproc *kproc = rproc->priv;
360 	void __iomem *va = NULL;
361 	phys_addr_t bus_addr;
362 	u32 dev_addr, offset;
363 	size_t size;
364 	int i;
365 
366 	if (len == 0)
367 		return NULL;
368 
369 	for (i = 0; i < kproc->num_mems; i++) {
370 		bus_addr = kproc->mem[i].bus_addr;
371 		dev_addr = kproc->mem[i].dev_addr;
372 		size = kproc->mem[i].size;
373 
374 		if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
375 			/* handle DSP-view addresses */
376 			if (da >= dev_addr &&
377 			    ((da + len) <= (dev_addr + size))) {
378 				offset = da - dev_addr;
379 				va = kproc->mem[i].cpu_addr + offset;
380 				return (__force void *)va;
381 			}
382 		} else {
383 			/* handle SoC-view addresses */
384 			if (da >= bus_addr &&
385 			    (da + len) <= (bus_addr + size)) {
386 				offset = da - bus_addr;
387 				va = kproc->mem[i].cpu_addr + offset;
388 				return (__force void *)va;
389 			}
390 		}
391 	}
392 
393 	/* handle static DDR reserved memory regions */
394 	for (i = 0; i < kproc->num_rmems; i++) {
395 		dev_addr = kproc->rmem[i].dev_addr;
396 		size = kproc->rmem[i].size;
397 
398 		if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
399 			offset = da - dev_addr;
400 			va = kproc->rmem[i].cpu_addr + offset;
401 			return (__force void *)va;
402 		}
403 	}
404 
405 	return NULL;
406 }
407 
408 static const struct rproc_ops k3_dsp_rproc_ops = {
409 	.start		= k3_dsp_rproc_start,
410 	.stop		= k3_dsp_rproc_stop,
411 	.kick		= k3_dsp_rproc_kick,
412 	.da_to_va	= k3_dsp_rproc_da_to_va,
413 };
414 
415 static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
416 					struct k3_dsp_rproc *kproc)
417 {
418 	const struct k3_dsp_dev_data *data = kproc->data;
419 	struct device *dev = &pdev->dev;
420 	struct resource *res;
421 	int num_mems = 0;
422 	int i;
423 
424 	num_mems = kproc->data->num_mems;
425 	kproc->mem = devm_kcalloc(kproc->dev, num_mems,
426 				  sizeof(*kproc->mem), GFP_KERNEL);
427 	if (!kproc->mem)
428 		return -ENOMEM;
429 
430 	for (i = 0; i < num_mems; i++) {
431 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
432 						   data->mems[i].name);
433 		if (!res) {
434 			dev_err(dev, "found no memory resource for %s\n",
435 				data->mems[i].name);
436 			return -EINVAL;
437 		}
438 		if (!devm_request_mem_region(dev, res->start,
439 					     resource_size(res),
440 					     dev_name(dev))) {
441 			dev_err(dev, "could not request %s region for resource\n",
442 				data->mems[i].name);
443 			return -EBUSY;
444 		}
445 
446 		kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
447 							 resource_size(res));
448 		if (!kproc->mem[i].cpu_addr) {
449 			dev_err(dev, "failed to map %s memory\n",
450 				data->mems[i].name);
451 			return -ENOMEM;
452 		}
453 		kproc->mem[i].bus_addr = res->start;
454 		kproc->mem[i].dev_addr = data->mems[i].dev_addr;
455 		kproc->mem[i].size = resource_size(res);
456 
457 		dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
458 			data->mems[i].name, &kproc->mem[i].bus_addr,
459 			kproc->mem[i].size, kproc->mem[i].cpu_addr,
460 			kproc->mem[i].dev_addr);
461 	}
462 	kproc->num_mems = num_mems;
463 
464 	return 0;
465 }
466 
467 static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
468 {
469 	struct device *dev = kproc->dev;
470 	struct device_node *np = dev->of_node;
471 	struct device_node *rmem_np;
472 	struct reserved_mem *rmem;
473 	int num_rmems;
474 	int ret, i;
475 
476 	num_rmems = of_property_count_elems_of_size(np, "memory-region",
477 						    sizeof(phandle));
478 	if (num_rmems <= 0) {
479 		dev_err(dev, "device does not reserved memory regions, ret = %d\n",
480 			num_rmems);
481 		return -EINVAL;
482 	}
483 	if (num_rmems < 2) {
484 		dev_err(dev, "device needs atleast two memory regions to be defined, num = %d\n",
485 			num_rmems);
486 		return -EINVAL;
487 	}
488 
489 	/* use reserved memory region 0 for vring DMA allocations */
490 	ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
491 	if (ret) {
492 		dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
493 			ret);
494 		return ret;
495 	}
496 
497 	num_rmems--;
498 	kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
499 	if (!kproc->rmem) {
500 		ret = -ENOMEM;
501 		goto release_rmem;
502 	}
503 
504 	/* use remaining reserved memory regions for static carveouts */
505 	for (i = 0; i < num_rmems; i++) {
506 		rmem_np = of_parse_phandle(np, "memory-region", i + 1);
507 		if (!rmem_np) {
508 			ret = -EINVAL;
509 			goto unmap_rmem;
510 		}
511 
512 		rmem = of_reserved_mem_lookup(rmem_np);
513 		if (!rmem) {
514 			of_node_put(rmem_np);
515 			ret = -EINVAL;
516 			goto unmap_rmem;
517 		}
518 		of_node_put(rmem_np);
519 
520 		kproc->rmem[i].bus_addr = rmem->base;
521 		/* 64-bit address regions currently not supported */
522 		kproc->rmem[i].dev_addr = (u32)rmem->base;
523 		kproc->rmem[i].size = rmem->size;
524 		kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
525 		if (!kproc->rmem[i].cpu_addr) {
526 			dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
527 				i + 1, &rmem->base, &rmem->size);
528 			ret = -ENOMEM;
529 			goto unmap_rmem;
530 		}
531 
532 		dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
533 			i + 1, &kproc->rmem[i].bus_addr,
534 			kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
535 			kproc->rmem[i].dev_addr);
536 	}
537 	kproc->num_rmems = num_rmems;
538 
539 	return 0;
540 
541 unmap_rmem:
542 	for (i--; i >= 0; i--)
543 		iounmap(kproc->rmem[i].cpu_addr);
544 	kfree(kproc->rmem);
545 release_rmem:
546 	of_reserved_mem_device_release(kproc->dev);
547 	return ret;
548 }
549 
550 static void k3_dsp_reserved_mem_exit(struct k3_dsp_rproc *kproc)
551 {
552 	int i;
553 
554 	for (i = 0; i < kproc->num_rmems; i++)
555 		iounmap(kproc->rmem[i].cpu_addr);
556 	kfree(kproc->rmem);
557 
558 	of_reserved_mem_device_release(kproc->dev);
559 }
560 
561 static
562 struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev,
563 					    const struct ti_sci_handle *sci)
564 {
565 	struct ti_sci_proc *tsp;
566 	u32 temp[2];
567 	int ret;
568 
569 	ret = of_property_read_u32_array(dev->of_node, "ti,sci-proc-ids",
570 					 temp, 2);
571 	if (ret < 0)
572 		return ERR_PTR(ret);
573 
574 	tsp = kzalloc(sizeof(*tsp), GFP_KERNEL);
575 	if (!tsp)
576 		return ERR_PTR(-ENOMEM);
577 
578 	tsp->dev = dev;
579 	tsp->sci = sci;
580 	tsp->ops = &sci->ops.proc_ops;
581 	tsp->proc_id = temp[0];
582 	tsp->host_id = temp[1];
583 
584 	return tsp;
585 }
586 
587 static int k3_dsp_rproc_probe(struct platform_device *pdev)
588 {
589 	struct device *dev = &pdev->dev;
590 	struct device_node *np = dev->of_node;
591 	const struct k3_dsp_dev_data *data;
592 	struct k3_dsp_rproc *kproc;
593 	struct rproc *rproc;
594 	const char *fw_name;
595 	int ret = 0;
596 	int ret1;
597 
598 	data = of_device_get_match_data(dev);
599 	if (!data)
600 		return -ENODEV;
601 
602 	ret = rproc_of_parse_firmware(dev, 0, &fw_name);
603 	if (ret) {
604 		dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
605 			ret);
606 		return ret;
607 	}
608 
609 	rproc = rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops, fw_name,
610 			    sizeof(*kproc));
611 	if (!rproc)
612 		return -ENOMEM;
613 
614 	rproc->has_iommu = false;
615 	rproc->recovery_disabled = true;
616 	if (data->uses_lreset) {
617 		rproc->ops->prepare = k3_dsp_rproc_prepare;
618 		rproc->ops->unprepare = k3_dsp_rproc_unprepare;
619 	}
620 	kproc = rproc->priv;
621 	kproc->rproc = rproc;
622 	kproc->dev = dev;
623 	kproc->data = data;
624 
625 	kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci");
626 	if (IS_ERR(kproc->ti_sci)) {
627 		ret = PTR_ERR(kproc->ti_sci);
628 		if (ret != -EPROBE_DEFER) {
629 			dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
630 				ret);
631 		}
632 		kproc->ti_sci = NULL;
633 		goto free_rproc;
634 	}
635 
636 	ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
637 	if (ret) {
638 		dev_err(dev, "missing 'ti,sci-dev-id' property\n");
639 		goto put_sci;
640 	}
641 
642 	kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
643 	if (IS_ERR(kproc->reset)) {
644 		ret = PTR_ERR(kproc->reset);
645 		dev_err(dev, "failed to get reset, status = %d\n", ret);
646 		goto put_sci;
647 	}
648 
649 	kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
650 	if (IS_ERR(kproc->tsp)) {
651 		dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
652 			ret);
653 		ret = PTR_ERR(kproc->tsp);
654 		goto put_sci;
655 	}
656 
657 	ret = ti_sci_proc_request(kproc->tsp);
658 	if (ret < 0) {
659 		dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
660 		goto free_tsp;
661 	}
662 
663 	ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
664 	if (ret)
665 		goto release_tsp;
666 
667 	ret = k3_dsp_reserved_mem_init(kproc);
668 	if (ret) {
669 		dev_err(dev, "reserved memory init failed, ret = %d\n", ret);
670 		goto release_tsp;
671 	}
672 
673 	/*
674 	 * ensure the DSP local reset is asserted to ensure the DSP doesn't
675 	 * execute bogus code in .prepare() when the module reset is released.
676 	 */
677 	if (data->uses_lreset) {
678 		ret = reset_control_status(kproc->reset);
679 		if (ret < 0) {
680 			dev_err(dev, "failed to get reset status, status = %d\n",
681 				ret);
682 			goto release_mem;
683 		} else if (ret == 0) {
684 			dev_warn(dev, "local reset is deasserted for device\n");
685 			k3_dsp_rproc_reset(kproc);
686 		}
687 	}
688 
689 	ret = rproc_add(rproc);
690 	if (ret) {
691 		dev_err(dev, "failed to add register device with remoteproc core, status = %d\n",
692 			ret);
693 		goto release_mem;
694 	}
695 
696 	platform_set_drvdata(pdev, kproc);
697 
698 	return 0;
699 
700 release_mem:
701 	k3_dsp_reserved_mem_exit(kproc);
702 release_tsp:
703 	ret1 = ti_sci_proc_release(kproc->tsp);
704 	if (ret1)
705 		dev_err(dev, "failed to release proc, ret = %d\n", ret1);
706 free_tsp:
707 	kfree(kproc->tsp);
708 put_sci:
709 	ret1 = ti_sci_put_handle(kproc->ti_sci);
710 	if (ret1)
711 		dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret1);
712 free_rproc:
713 	rproc_free(rproc);
714 	return ret;
715 }
716 
717 static int k3_dsp_rproc_remove(struct platform_device *pdev)
718 {
719 	struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
720 	struct device *dev = &pdev->dev;
721 	int ret;
722 
723 	rproc_del(kproc->rproc);
724 
725 	ret = ti_sci_proc_release(kproc->tsp);
726 	if (ret)
727 		dev_err(dev, "failed to release proc, ret = %d\n", ret);
728 
729 	kfree(kproc->tsp);
730 
731 	ret = ti_sci_put_handle(kproc->ti_sci);
732 	if (ret)
733 		dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret);
734 
735 	k3_dsp_reserved_mem_exit(kproc);
736 	rproc_free(kproc->rproc);
737 
738 	return 0;
739 }
740 
741 static const struct k3_dsp_mem_data c66_mems[] = {
742 	{ .name = "l2sram", .dev_addr = 0x800000 },
743 	{ .name = "l1pram", .dev_addr = 0xe00000 },
744 	{ .name = "l1dram", .dev_addr = 0xf00000 },
745 };
746 
747 /* C71x cores only have a L1P Cache, there are no L1P SRAMs */
748 static const struct k3_dsp_mem_data c71_mems[] = {
749 	{ .name = "l2sram", .dev_addr = 0x800000 },
750 	{ .name = "l1dram", .dev_addr = 0xe00000 },
751 };
752 
753 static const struct k3_dsp_dev_data c66_data = {
754 	.mems = c66_mems,
755 	.num_mems = ARRAY_SIZE(c66_mems),
756 	.boot_align_addr = SZ_1K,
757 	.uses_lreset = true,
758 };
759 
760 static const struct k3_dsp_dev_data c71_data = {
761 	.mems = c71_mems,
762 	.num_mems = ARRAY_SIZE(c71_mems),
763 	.boot_align_addr = SZ_2M,
764 	.uses_lreset = false,
765 };
766 
767 static const struct of_device_id k3_dsp_of_match[] = {
768 	{ .compatible = "ti,j721e-c66-dsp", .data = &c66_data, },
769 	{ .compatible = "ti,j721e-c71-dsp", .data = &c71_data, },
770 	{ /* sentinel */ },
771 };
772 MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
773 
774 static struct platform_driver k3_dsp_rproc_driver = {
775 	.probe	= k3_dsp_rproc_probe,
776 	.remove	= k3_dsp_rproc_remove,
777 	.driver	= {
778 		.name = "k3-dsp-rproc",
779 		.of_match_table = k3_dsp_of_match,
780 	},
781 };
782 
783 module_platform_driver(k3_dsp_rproc_driver);
784 
785 MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
786 MODULE_LICENSE("GPL v2");
787 MODULE_DESCRIPTION("TI K3 DSP Remoteproc driver");
788