1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Remote Processor Framework
4  *
5  * Copyright (C) 2011 Texas Instruments, Inc.
6  * Copyright (C) 2011 Google, Inc.
7  *
8  * Ohad Ben-Cohen <ohad@wizery.com>
9  * Brian Swetland <swetland@google.com>
10  * Mark Grosen <mgrosen@ti.com>
11  * Fernando Guzman Lugo <fernando.lugo@ti.com>
12  * Suman Anna <s-anna@ti.com>
13  * Robert Tivy <rtivy@ti.com>
14  * Armando Uribe De Leon <x0095078@ti.com>
15  */
16 
17 #define pr_fmt(fmt)    "%s: " fmt, __func__
18 
19 #include <linux/delay.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/mutex.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/firmware.h>
27 #include <linux/string.h>
28 #include <linux/debugfs.h>
29 #include <linux/devcoredump.h>
30 #include <linux/rculist.h>
31 #include <linux/remoteproc.h>
32 #include <linux/iommu.h>
33 #include <linux/idr.h>
34 #include <linux/elf.h>
35 #include <linux/crc32.h>
36 #include <linux/of_reserved_mem.h>
37 #include <linux/virtio_ids.h>
38 #include <linux/virtio_ring.h>
39 #include <asm/byteorder.h>
40 #include <linux/platform_device.h>
41 
42 #include "remoteproc_internal.h"
43 #include "remoteproc_elf_helpers.h"
44 
45 #define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL
46 
47 static DEFINE_MUTEX(rproc_list_mutex);
48 static LIST_HEAD(rproc_list);
49 static struct notifier_block rproc_panic_nb;
50 
51 typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
52 				 void *, int offset, int avail);
53 
54 static int rproc_alloc_carveout(struct rproc *rproc,
55 				struct rproc_mem_entry *mem);
56 static int rproc_release_carveout(struct rproc *rproc,
57 				  struct rproc_mem_entry *mem);
58 
59 /* Unique indices for remoteproc devices */
60 static DEFINE_IDA(rproc_dev_index);
61 
62 static const char * const rproc_crash_names[] = {
63 	[RPROC_MMUFAULT]	= "mmufault",
64 	[RPROC_WATCHDOG]	= "watchdog",
65 	[RPROC_FATAL_ERROR]	= "fatal error",
66 };
67 
68 /* translate rproc_crash_type to string */
69 static const char *rproc_crash_to_string(enum rproc_crash_type type)
70 {
71 	if (type < ARRAY_SIZE(rproc_crash_names))
72 		return rproc_crash_names[type];
73 	return "unknown";
74 }
75 
76 /*
77  * This is the IOMMU fault handler we register with the IOMMU API
78  * (when relevant; not all remote processors access memory through
79  * an IOMMU).
80  *
81  * IOMMU core will invoke this handler whenever the remote processor
82  * will try to access an unmapped device address.
83  */
84 static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
85 			     unsigned long iova, int flags, void *token)
86 {
87 	struct rproc *rproc = token;
88 
89 	dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
90 
91 	rproc_report_crash(rproc, RPROC_MMUFAULT);
92 
93 	/*
94 	 * Let the iommu core know we're not really handling this fault;
95 	 * we just used it as a recovery trigger.
96 	 */
97 	return -ENOSYS;
98 }
99 
100 static int rproc_enable_iommu(struct rproc *rproc)
101 {
102 	struct iommu_domain *domain;
103 	struct device *dev = rproc->dev.parent;
104 	int ret;
105 
106 	if (!rproc->has_iommu) {
107 		dev_dbg(dev, "iommu not present\n");
108 		return 0;
109 	}
110 
111 	domain = iommu_domain_alloc(dev->bus);
112 	if (!domain) {
113 		dev_err(dev, "can't alloc iommu domain\n");
114 		return -ENOMEM;
115 	}
116 
117 	iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
118 
119 	ret = iommu_attach_device(domain, dev);
120 	if (ret) {
121 		dev_err(dev, "can't attach iommu device: %d\n", ret);
122 		goto free_domain;
123 	}
124 
125 	rproc->domain = domain;
126 
127 	return 0;
128 
129 free_domain:
130 	iommu_domain_free(domain);
131 	return ret;
132 }
133 
134 static void rproc_disable_iommu(struct rproc *rproc)
135 {
136 	struct iommu_domain *domain = rproc->domain;
137 	struct device *dev = rproc->dev.parent;
138 
139 	if (!domain)
140 		return;
141 
142 	iommu_detach_device(domain, dev);
143 	iommu_domain_free(domain);
144 }
145 
146 phys_addr_t rproc_va_to_pa(void *cpu_addr)
147 {
148 	/*
149 	 * Return physical address according to virtual address location
150 	 * - in vmalloc: if region ioremapped or defined as dma_alloc_coherent
151 	 * - in kernel: if region allocated in generic dma memory pool
152 	 */
153 	if (is_vmalloc_addr(cpu_addr)) {
154 		return page_to_phys(vmalloc_to_page(cpu_addr)) +
155 				    offset_in_page(cpu_addr);
156 	}
157 
158 	WARN_ON(!virt_addr_valid(cpu_addr));
159 	return virt_to_phys(cpu_addr);
160 }
161 EXPORT_SYMBOL(rproc_va_to_pa);
162 
163 /**
164  * rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address
165  * @rproc: handle of a remote processor
166  * @da: remoteproc device address to translate
167  * @len: length of the memory region @da is pointing to
168  *
169  * Some remote processors will ask us to allocate them physically contiguous
170  * memory regions (which we call "carveouts"), and map them to specific
171  * device addresses (which are hardcoded in the firmware). They may also have
172  * dedicated memory regions internal to the processors, and use them either
173  * exclusively or alongside carveouts.
174  *
175  * They may then ask us to copy objects into specific device addresses (e.g.
176  * code/data sections) or expose us certain symbols in other device address
177  * (e.g. their trace buffer).
178  *
179  * This function is a helper function with which we can go over the allocated
180  * carveouts and translate specific device addresses to kernel virtual addresses
181  * so we can access the referenced memory. This function also allows to perform
182  * translations on the internal remoteproc memory regions through a platform
183  * implementation specific da_to_va ops, if present.
184  *
185  * The function returns a valid kernel address on success or NULL on failure.
186  *
187  * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too,
188  * but only on kernel direct mapped RAM memory. Instead, we're just using
189  * here the output of the DMA API for the carveouts, which should be more
190  * correct.
191  */
192 void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
193 {
194 	struct rproc_mem_entry *carveout;
195 	void *ptr = NULL;
196 
197 	if (rproc->ops->da_to_va) {
198 		ptr = rproc->ops->da_to_va(rproc, da, len);
199 		if (ptr)
200 			goto out;
201 	}
202 
203 	list_for_each_entry(carveout, &rproc->carveouts, node) {
204 		int offset = da - carveout->da;
205 
206 		/*  Verify that carveout is allocated */
207 		if (!carveout->va)
208 			continue;
209 
210 		/* try next carveout if da is too small */
211 		if (offset < 0)
212 			continue;
213 
214 		/* try next carveout if da is too large */
215 		if (offset + len > carveout->len)
216 			continue;
217 
218 		ptr = carveout->va + offset;
219 
220 		break;
221 	}
222 
223 out:
224 	return ptr;
225 }
226 EXPORT_SYMBOL(rproc_da_to_va);
227 
228 /**
229  * rproc_find_carveout_by_name() - lookup the carveout region by a name
230  * @rproc: handle of a remote processor
231  * @name: carveout name to find (format string)
232  * @...: optional parameters matching @name string
233  *
234  * Platform driver has the capability to register some pre-allacoted carveout
235  * (physically contiguous memory regions) before rproc firmware loading and
236  * associated resource table analysis. These regions may be dedicated memory
237  * regions internal to the coprocessor or specified DDR region with specific
238  * attributes
239  *
240  * This function is a helper function with which we can go over the
241  * allocated carveouts and return associated region characteristics like
242  * coprocessor address, length or processor virtual address.
243  *
244  * Return: a valid pointer on carveout entry on success or NULL on failure.
245  */
246 struct rproc_mem_entry *
247 rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...)
248 {
249 	va_list args;
250 	char _name[32];
251 	struct rproc_mem_entry *carveout, *mem = NULL;
252 
253 	if (!name)
254 		return NULL;
255 
256 	va_start(args, name);
257 	vsnprintf(_name, sizeof(_name), name, args);
258 	va_end(args);
259 
260 	list_for_each_entry(carveout, &rproc->carveouts, node) {
261 		/* Compare carveout and requested names */
262 		if (!strcmp(carveout->name, _name)) {
263 			mem = carveout;
264 			break;
265 		}
266 	}
267 
268 	return mem;
269 }
270 
271 /**
272  * rproc_check_carveout_da() - Check specified carveout da configuration
273  * @rproc: handle of a remote processor
274  * @mem: pointer on carveout to check
275  * @da: area device address
276  * @len: associated area size
277  *
278  * This function is a helper function to verify requested device area (couple
279  * da, len) is part of specified carveout.
280  * If da is not set (defined as FW_RSC_ADDR_ANY), only requested length is
281  * checked.
282  *
283  * Return: 0 if carveout matches request else error
284  */
285 static int rproc_check_carveout_da(struct rproc *rproc,
286 				   struct rproc_mem_entry *mem, u32 da, u32 len)
287 {
288 	struct device *dev = &rproc->dev;
289 	int delta;
290 
291 	/* Check requested resource length */
292 	if (len > mem->len) {
293 		dev_err(dev, "Registered carveout doesn't fit len request\n");
294 		return -EINVAL;
295 	}
296 
297 	if (da != FW_RSC_ADDR_ANY && mem->da == FW_RSC_ADDR_ANY) {
298 		/* Address doesn't match registered carveout configuration */
299 		return -EINVAL;
300 	} else if (da != FW_RSC_ADDR_ANY && mem->da != FW_RSC_ADDR_ANY) {
301 		delta = da - mem->da;
302 
303 		/* Check requested resource belongs to registered carveout */
304 		if (delta < 0) {
305 			dev_err(dev,
306 				"Registered carveout doesn't fit da request\n");
307 			return -EINVAL;
308 		}
309 
310 		if (delta + len > mem->len) {
311 			dev_err(dev,
312 				"Registered carveout doesn't fit len request\n");
313 			return -EINVAL;
314 		}
315 	}
316 
317 	return 0;
318 }
319 
320 int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
321 {
322 	struct rproc *rproc = rvdev->rproc;
323 	struct device *dev = &rproc->dev;
324 	struct rproc_vring *rvring = &rvdev->vring[i];
325 	struct fw_rsc_vdev *rsc;
326 	int ret, notifyid;
327 	struct rproc_mem_entry *mem;
328 	size_t size;
329 
330 	/* actual size of vring (in bytes) */
331 	size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
332 
333 	rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
334 
335 	/* Search for pre-registered carveout */
336 	mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
337 					  i);
338 	if (mem) {
339 		if (rproc_check_carveout_da(rproc, mem, rsc->vring[i].da, size))
340 			return -ENOMEM;
341 	} else {
342 		/* Register carveout in in list */
343 		mem = rproc_mem_entry_init(dev, NULL, 0,
344 					   size, rsc->vring[i].da,
345 					   rproc_alloc_carveout,
346 					   rproc_release_carveout,
347 					   "vdev%dvring%d",
348 					   rvdev->index, i);
349 		if (!mem) {
350 			dev_err(dev, "Can't allocate memory entry structure\n");
351 			return -ENOMEM;
352 		}
353 
354 		rproc_add_carveout(rproc, mem);
355 	}
356 
357 	/*
358 	 * Assign an rproc-wide unique index for this vring
359 	 * TODO: assign a notifyid for rvdev updates as well
360 	 * TODO: support predefined notifyids (via resource table)
361 	 */
362 	ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
363 	if (ret < 0) {
364 		dev_err(dev, "idr_alloc failed: %d\n", ret);
365 		return ret;
366 	}
367 	notifyid = ret;
368 
369 	/* Potentially bump max_notifyid */
370 	if (notifyid > rproc->max_notifyid)
371 		rproc->max_notifyid = notifyid;
372 
373 	rvring->notifyid = notifyid;
374 
375 	/* Let the rproc know the notifyid of this vring.*/
376 	rsc->vring[i].notifyid = notifyid;
377 	return 0;
378 }
379 
380 static int
381 rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
382 {
383 	struct rproc *rproc = rvdev->rproc;
384 	struct device *dev = &rproc->dev;
385 	struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
386 	struct rproc_vring *rvring = &rvdev->vring[i];
387 
388 	dev_dbg(dev, "vdev rsc: vring%d: da 0x%x, qsz %d, align %d\n",
389 		i, vring->da, vring->num, vring->align);
390 
391 	/* verify queue size and vring alignment are sane */
392 	if (!vring->num || !vring->align) {
393 		dev_err(dev, "invalid qsz (%d) or alignment (%d)\n",
394 			vring->num, vring->align);
395 		return -EINVAL;
396 	}
397 
398 	rvring->len = vring->num;
399 	rvring->align = vring->align;
400 	rvring->rvdev = rvdev;
401 
402 	return 0;
403 }
404 
405 void rproc_free_vring(struct rproc_vring *rvring)
406 {
407 	struct rproc *rproc = rvring->rvdev->rproc;
408 	int idx = rvring - rvring->rvdev->vring;
409 	struct fw_rsc_vdev *rsc;
410 
411 	idr_remove(&rproc->notifyids, rvring->notifyid);
412 
413 	/* reset resource entry info */
414 	rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
415 	rsc->vring[idx].da = 0;
416 	rsc->vring[idx].notifyid = -1;
417 }
418 
419 static int rproc_vdev_do_start(struct rproc_subdev *subdev)
420 {
421 	struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
422 
423 	return rproc_add_virtio_dev(rvdev, rvdev->id);
424 }
425 
426 static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
427 {
428 	struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
429 	int ret;
430 
431 	ret = device_for_each_child(&rvdev->dev, NULL, rproc_remove_virtio_dev);
432 	if (ret)
433 		dev_warn(&rvdev->dev, "can't remove vdev child device: %d\n", ret);
434 }
435 
436 /**
437  * rproc_rvdev_release() - release the existence of a rvdev
438  *
439  * @dev: the subdevice's dev
440  */
441 static void rproc_rvdev_release(struct device *dev)
442 {
443 	struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev);
444 
445 	of_reserved_mem_device_release(dev);
446 
447 	kfree(rvdev);
448 }
449 
450 /**
451  * rproc_handle_vdev() - handle a vdev fw resource
452  * @rproc: the remote processor
453  * @rsc: the vring resource descriptor
454  * @offset: offset of the resource entry
455  * @avail: size of available data (for sanity checking the image)
456  *
457  * This resource entry requests the host to statically register a virtio
458  * device (vdev), and setup everything needed to support it. It contains
459  * everything needed to make it possible: the virtio device id, virtio
460  * device features, vrings information, virtio config space, etc...
461  *
462  * Before registering the vdev, the vrings are allocated from non-cacheable
463  * physically contiguous memory. Currently we only support two vrings per
464  * remote processor (temporary limitation). We might also want to consider
465  * doing the vring allocation only later when ->find_vqs() is invoked, and
466  * then release them upon ->del_vqs().
467  *
468  * Note: @da is currently not really handled correctly: we dynamically
469  * allocate it using the DMA API, ignoring requested hard coded addresses,
470  * and we don't take care of any required IOMMU programming. This is all
471  * going to be taken care of when the generic iommu-based DMA API will be
472  * merged. Meanwhile, statically-addressed iommu-based firmware images should
473  * use RSC_DEVMEM resource entries to map their required @da to the physical
474  * address of their base CMA region (ouch, hacky!).
475  *
476  * Returns 0 on success, or an appropriate error code otherwise
477  */
478 static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
479 			     int offset, int avail)
480 {
481 	struct device *dev = &rproc->dev;
482 	struct rproc_vdev *rvdev;
483 	int i, ret;
484 	char name[16];
485 
486 	/* make sure resource isn't truncated */
487 	if (struct_size(rsc, vring, rsc->num_of_vrings) + rsc->config_len >
488 			avail) {
489 		dev_err(dev, "vdev rsc is truncated\n");
490 		return -EINVAL;
491 	}
492 
493 	/* make sure reserved bytes are zeroes */
494 	if (rsc->reserved[0] || rsc->reserved[1]) {
495 		dev_err(dev, "vdev rsc has non zero reserved bytes\n");
496 		return -EINVAL;
497 	}
498 
499 	dev_dbg(dev, "vdev rsc: id %d, dfeatures 0x%x, cfg len %d, %d vrings\n",
500 		rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
501 
502 	/* we currently support only two vrings per rvdev */
503 	if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) {
504 		dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings);
505 		return -EINVAL;
506 	}
507 
508 	rvdev = kzalloc(sizeof(*rvdev), GFP_KERNEL);
509 	if (!rvdev)
510 		return -ENOMEM;
511 
512 	kref_init(&rvdev->refcount);
513 
514 	rvdev->id = rsc->id;
515 	rvdev->rproc = rproc;
516 	rvdev->index = rproc->nb_vdev++;
517 
518 	/* Initialise vdev subdevice */
519 	snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index);
520 	rvdev->dev.parent = rproc->dev.parent;
521 	rvdev->dev.dma_pfn_offset = rproc->dev.parent->dma_pfn_offset;
522 	rvdev->dev.release = rproc_rvdev_release;
523 	dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name);
524 	dev_set_drvdata(&rvdev->dev, rvdev);
525 
526 	ret = device_register(&rvdev->dev);
527 	if (ret) {
528 		put_device(&rvdev->dev);
529 		return ret;
530 	}
531 	/* Make device dma capable by inheriting from parent's capabilities */
532 	set_dma_ops(&rvdev->dev, get_dma_ops(rproc->dev.parent));
533 
534 	ret = dma_coerce_mask_and_coherent(&rvdev->dev,
535 					   dma_get_mask(rproc->dev.parent));
536 	if (ret) {
537 		dev_warn(dev,
538 			 "Failed to set DMA mask %llx. Trying to continue... %x\n",
539 			 dma_get_mask(rproc->dev.parent), ret);
540 	}
541 
542 	/* parse the vrings */
543 	for (i = 0; i < rsc->num_of_vrings; i++) {
544 		ret = rproc_parse_vring(rvdev, rsc, i);
545 		if (ret)
546 			goto free_rvdev;
547 	}
548 
549 	/* remember the resource offset*/
550 	rvdev->rsc_offset = offset;
551 
552 	/* allocate the vring resources */
553 	for (i = 0; i < rsc->num_of_vrings; i++) {
554 		ret = rproc_alloc_vring(rvdev, i);
555 		if (ret)
556 			goto unwind_vring_allocations;
557 	}
558 
559 	list_add_tail(&rvdev->node, &rproc->rvdevs);
560 
561 	rvdev->subdev.start = rproc_vdev_do_start;
562 	rvdev->subdev.stop = rproc_vdev_do_stop;
563 
564 	rproc_add_subdev(rproc, &rvdev->subdev);
565 
566 	return 0;
567 
568 unwind_vring_allocations:
569 	for (i--; i >= 0; i--)
570 		rproc_free_vring(&rvdev->vring[i]);
571 free_rvdev:
572 	device_unregister(&rvdev->dev);
573 	return ret;
574 }
575 
576 void rproc_vdev_release(struct kref *ref)
577 {
578 	struct rproc_vdev *rvdev = container_of(ref, struct rproc_vdev, refcount);
579 	struct rproc_vring *rvring;
580 	struct rproc *rproc = rvdev->rproc;
581 	int id;
582 
583 	for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
584 		rvring = &rvdev->vring[id];
585 		rproc_free_vring(rvring);
586 	}
587 
588 	rproc_remove_subdev(rproc, &rvdev->subdev);
589 	list_del(&rvdev->node);
590 	device_unregister(&rvdev->dev);
591 }
592 
593 /**
594  * rproc_handle_trace() - handle a shared trace buffer resource
595  * @rproc: the remote processor
596  * @rsc: the trace resource descriptor
597  * @offset: offset of the resource entry
598  * @avail: size of available data (for sanity checking the image)
599  *
600  * In case the remote processor dumps trace logs into memory,
601  * export it via debugfs.
602  *
603  * Currently, the 'da' member of @rsc should contain the device address
604  * where the remote processor is dumping the traces. Later we could also
605  * support dynamically allocating this address using the generic
606  * DMA API (but currently there isn't a use case for that).
607  *
608  * Returns 0 on success, or an appropriate error code otherwise
609  */
610 static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
611 			      int offset, int avail)
612 {
613 	struct rproc_debug_trace *trace;
614 	struct device *dev = &rproc->dev;
615 	char name[15];
616 
617 	if (sizeof(*rsc) > avail) {
618 		dev_err(dev, "trace rsc is truncated\n");
619 		return -EINVAL;
620 	}
621 
622 	/* make sure reserved bytes are zeroes */
623 	if (rsc->reserved) {
624 		dev_err(dev, "trace rsc has non zero reserved bytes\n");
625 		return -EINVAL;
626 	}
627 
628 	trace = kzalloc(sizeof(*trace), GFP_KERNEL);
629 	if (!trace)
630 		return -ENOMEM;
631 
632 	/* set the trace buffer dma properties */
633 	trace->trace_mem.len = rsc->len;
634 	trace->trace_mem.da = rsc->da;
635 
636 	/* set pointer on rproc device */
637 	trace->rproc = rproc;
638 
639 	/* make sure snprintf always null terminates, even if truncating */
640 	snprintf(name, sizeof(name), "trace%d", rproc->num_traces);
641 
642 	/* create the debugfs entry */
643 	trace->tfile = rproc_create_trace_file(name, rproc, trace);
644 	if (!trace->tfile) {
645 		kfree(trace);
646 		return -EINVAL;
647 	}
648 
649 	list_add_tail(&trace->node, &rproc->traces);
650 
651 	rproc->num_traces++;
652 
653 	dev_dbg(dev, "%s added: da 0x%x, len 0x%x\n",
654 		name, rsc->da, rsc->len);
655 
656 	return 0;
657 }
658 
659 /**
660  * rproc_handle_devmem() - handle devmem resource entry
661  * @rproc: remote processor handle
662  * @rsc: the devmem resource entry
663  * @offset: offset of the resource entry
664  * @avail: size of available data (for sanity checking the image)
665  *
666  * Remote processors commonly need to access certain on-chip peripherals.
667  *
668  * Some of these remote processors access memory via an iommu device,
669  * and might require us to configure their iommu before they can access
670  * the on-chip peripherals they need.
671  *
672  * This resource entry is a request to map such a peripheral device.
673  *
674  * These devmem entries will contain the physical address of the device in
675  * the 'pa' member. If a specific device address is expected, then 'da' will
676  * contain it (currently this is the only use case supported). 'len' will
677  * contain the size of the physical region we need to map.
678  *
679  * Currently we just "trust" those devmem entries to contain valid physical
680  * addresses, but this is going to change: we want the implementations to
681  * tell us ranges of physical addresses the firmware is allowed to request,
682  * and not allow firmwares to request access to physical addresses that
683  * are outside those ranges.
684  */
685 static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
686 			       int offset, int avail)
687 {
688 	struct rproc_mem_entry *mapping;
689 	struct device *dev = &rproc->dev;
690 	int ret;
691 
692 	/* no point in handling this resource without a valid iommu domain */
693 	if (!rproc->domain)
694 		return -EINVAL;
695 
696 	if (sizeof(*rsc) > avail) {
697 		dev_err(dev, "devmem rsc is truncated\n");
698 		return -EINVAL;
699 	}
700 
701 	/* make sure reserved bytes are zeroes */
702 	if (rsc->reserved) {
703 		dev_err(dev, "devmem rsc has non zero reserved bytes\n");
704 		return -EINVAL;
705 	}
706 
707 	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
708 	if (!mapping)
709 		return -ENOMEM;
710 
711 	ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
712 	if (ret) {
713 		dev_err(dev, "failed to map devmem: %d\n", ret);
714 		goto out;
715 	}
716 
717 	/*
718 	 * We'll need this info later when we'll want to unmap everything
719 	 * (e.g. on shutdown).
720 	 *
721 	 * We can't trust the remote processor not to change the resource
722 	 * table, so we must maintain this info independently.
723 	 */
724 	mapping->da = rsc->da;
725 	mapping->len = rsc->len;
726 	list_add_tail(&mapping->node, &rproc->mappings);
727 
728 	dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
729 		rsc->pa, rsc->da, rsc->len);
730 
731 	return 0;
732 
733 out:
734 	kfree(mapping);
735 	return ret;
736 }
737 
738 /**
739  * rproc_alloc_carveout() - allocated specified carveout
740  * @rproc: rproc handle
741  * @mem: the memory entry to allocate
742  *
743  * This function allocate specified memory entry @mem using
744  * dma_alloc_coherent() as default allocator
745  */
746 static int rproc_alloc_carveout(struct rproc *rproc,
747 				struct rproc_mem_entry *mem)
748 {
749 	struct rproc_mem_entry *mapping = NULL;
750 	struct device *dev = &rproc->dev;
751 	dma_addr_t dma;
752 	void *va;
753 	int ret;
754 
755 	va = dma_alloc_coherent(dev->parent, mem->len, &dma, GFP_KERNEL);
756 	if (!va) {
757 		dev_err(dev->parent,
758 			"failed to allocate dma memory: len 0x%zx\n",
759 			mem->len);
760 		return -ENOMEM;
761 	}
762 
763 	dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%zx\n",
764 		va, &dma, mem->len);
765 
766 	if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) {
767 		/*
768 		 * Check requested da is equal to dma address
769 		 * and print a warn message in case of missalignment.
770 		 * Don't stop rproc_start sequence as coprocessor may
771 		 * build pa to da translation on its side.
772 		 */
773 		if (mem->da != (u32)dma)
774 			dev_warn(dev->parent,
775 				 "Allocated carveout doesn't fit device address request\n");
776 	}
777 
778 	/*
779 	 * Ok, this is non-standard.
780 	 *
781 	 * Sometimes we can't rely on the generic iommu-based DMA API
782 	 * to dynamically allocate the device address and then set the IOMMU
783 	 * tables accordingly, because some remote processors might
784 	 * _require_ us to use hard coded device addresses that their
785 	 * firmware was compiled with.
786 	 *
787 	 * In this case, we must use the IOMMU API directly and map
788 	 * the memory to the device address as expected by the remote
789 	 * processor.
790 	 *
791 	 * Obviously such remote processor devices should not be configured
792 	 * to use the iommu-based DMA API: we expect 'dma' to contain the
793 	 * physical address in this case.
794 	 */
795 	if (mem->da != FW_RSC_ADDR_ANY && rproc->domain) {
796 		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
797 		if (!mapping) {
798 			ret = -ENOMEM;
799 			goto dma_free;
800 		}
801 
802 		ret = iommu_map(rproc->domain, mem->da, dma, mem->len,
803 				mem->flags);
804 		if (ret) {
805 			dev_err(dev, "iommu_map failed: %d\n", ret);
806 			goto free_mapping;
807 		}
808 
809 		/*
810 		 * We'll need this info later when we'll want to unmap
811 		 * everything (e.g. on shutdown).
812 		 *
813 		 * We can't trust the remote processor not to change the
814 		 * resource table, so we must maintain this info independently.
815 		 */
816 		mapping->da = mem->da;
817 		mapping->len = mem->len;
818 		list_add_tail(&mapping->node, &rproc->mappings);
819 
820 		dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
821 			mem->da, &dma);
822 	}
823 
824 	if (mem->da == FW_RSC_ADDR_ANY) {
825 		/* Update device address as undefined by requester */
826 		if ((u64)dma & HIGH_BITS_MASK)
827 			dev_warn(dev, "DMA address cast in 32bit to fit resource table format\n");
828 
829 		mem->da = (u32)dma;
830 	}
831 
832 	mem->dma = dma;
833 	mem->va = va;
834 
835 	return 0;
836 
837 free_mapping:
838 	kfree(mapping);
839 dma_free:
840 	dma_free_coherent(dev->parent, mem->len, va, dma);
841 	return ret;
842 }
843 
844 /**
845  * rproc_release_carveout() - release acquired carveout
846  * @rproc: rproc handle
847  * @mem: the memory entry to release
848  *
849  * This function releases specified memory entry @mem allocated via
850  * rproc_alloc_carveout() function by @rproc.
851  */
852 static int rproc_release_carveout(struct rproc *rproc,
853 				  struct rproc_mem_entry *mem)
854 {
855 	struct device *dev = &rproc->dev;
856 
857 	/* clean up carveout allocations */
858 	dma_free_coherent(dev->parent, mem->len, mem->va, mem->dma);
859 	return 0;
860 }
861 
862 /**
863  * rproc_handle_carveout() - handle phys contig memory allocation requests
864  * @rproc: rproc handle
865  * @rsc: the resource entry
866  * @offset: offset of the resource entry
867  * @avail: size of available data (for image validation)
868  *
869  * This function will handle firmware requests for allocation of physically
870  * contiguous memory regions.
871  *
872  * These request entries should come first in the firmware's resource table,
873  * as other firmware entries might request placing other data objects inside
874  * these memory regions (e.g. data/code segments, trace resource entries, ...).
875  *
876  * Allocating memory this way helps utilizing the reserved physical memory
877  * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
878  * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
879  * pressure is important; it may have a substantial impact on performance.
880  */
881 static int rproc_handle_carveout(struct rproc *rproc,
882 				 struct fw_rsc_carveout *rsc,
883 				 int offset, int avail)
884 {
885 	struct rproc_mem_entry *carveout;
886 	struct device *dev = &rproc->dev;
887 
888 	if (sizeof(*rsc) > avail) {
889 		dev_err(dev, "carveout rsc is truncated\n");
890 		return -EINVAL;
891 	}
892 
893 	/* make sure reserved bytes are zeroes */
894 	if (rsc->reserved) {
895 		dev_err(dev, "carveout rsc has non zero reserved bytes\n");
896 		return -EINVAL;
897 	}
898 
899 	dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n",
900 		rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags);
901 
902 	/*
903 	 * Check carveout rsc already part of a registered carveout,
904 	 * Search by name, then check the da and length
905 	 */
906 	carveout = rproc_find_carveout_by_name(rproc, rsc->name);
907 
908 	if (carveout) {
909 		if (carveout->rsc_offset != FW_RSC_ADDR_ANY) {
910 			dev_err(dev,
911 				"Carveout already associated to resource table\n");
912 			return -ENOMEM;
913 		}
914 
915 		if (rproc_check_carveout_da(rproc, carveout, rsc->da, rsc->len))
916 			return -ENOMEM;
917 
918 		/* Update memory carveout with resource table info */
919 		carveout->rsc_offset = offset;
920 		carveout->flags = rsc->flags;
921 
922 		return 0;
923 	}
924 
925 	/* Register carveout in in list */
926 	carveout = rproc_mem_entry_init(dev, NULL, 0, rsc->len, rsc->da,
927 					rproc_alloc_carveout,
928 					rproc_release_carveout, rsc->name);
929 	if (!carveout) {
930 		dev_err(dev, "Can't allocate memory entry structure\n");
931 		return -ENOMEM;
932 	}
933 
934 	carveout->flags = rsc->flags;
935 	carveout->rsc_offset = offset;
936 	rproc_add_carveout(rproc, carveout);
937 
938 	return 0;
939 }
940 
941 /**
942  * rproc_add_carveout() - register an allocated carveout region
943  * @rproc: rproc handle
944  * @mem: memory entry to register
945  *
946  * This function registers specified memory entry in @rproc carveouts list.
947  * Specified carveout should have been allocated before registering.
948  */
949 void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem)
950 {
951 	list_add_tail(&mem->node, &rproc->carveouts);
952 }
953 EXPORT_SYMBOL(rproc_add_carveout);
954 
955 /**
956  * rproc_mem_entry_init() - allocate and initialize rproc_mem_entry struct
957  * @dev: pointer on device struct
958  * @va: virtual address
959  * @dma: dma address
960  * @len: memory carveout length
961  * @da: device address
962  * @alloc: memory carveout allocation function
963  * @release: memory carveout release function
964  * @name: carveout name
965  *
966  * This function allocates a rproc_mem_entry struct and fill it with parameters
967  * provided by client.
968  */
969 struct rproc_mem_entry *
970 rproc_mem_entry_init(struct device *dev,
971 		     void *va, dma_addr_t dma, size_t len, u32 da,
972 		     int (*alloc)(struct rproc *, struct rproc_mem_entry *),
973 		     int (*release)(struct rproc *, struct rproc_mem_entry *),
974 		     const char *name, ...)
975 {
976 	struct rproc_mem_entry *mem;
977 	va_list args;
978 
979 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
980 	if (!mem)
981 		return mem;
982 
983 	mem->va = va;
984 	mem->dma = dma;
985 	mem->da = da;
986 	mem->len = len;
987 	mem->alloc = alloc;
988 	mem->release = release;
989 	mem->rsc_offset = FW_RSC_ADDR_ANY;
990 	mem->of_resm_idx = -1;
991 
992 	va_start(args, name);
993 	vsnprintf(mem->name, sizeof(mem->name), name, args);
994 	va_end(args);
995 
996 	return mem;
997 }
998 EXPORT_SYMBOL(rproc_mem_entry_init);
999 
1000 /**
1001  * rproc_of_resm_mem_entry_init() - allocate and initialize rproc_mem_entry struct
1002  * from a reserved memory phandle
1003  * @dev: pointer on device struct
1004  * @of_resm_idx: reserved memory phandle index in "memory-region"
1005  * @len: memory carveout length
1006  * @da: device address
1007  * @name: carveout name
1008  *
1009  * This function allocates a rproc_mem_entry struct and fill it with parameters
1010  * provided by client.
1011  */
1012 struct rproc_mem_entry *
1013 rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
1014 			     u32 da, const char *name, ...)
1015 {
1016 	struct rproc_mem_entry *mem;
1017 	va_list args;
1018 
1019 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
1020 	if (!mem)
1021 		return mem;
1022 
1023 	mem->da = da;
1024 	mem->len = len;
1025 	mem->rsc_offset = FW_RSC_ADDR_ANY;
1026 	mem->of_resm_idx = of_resm_idx;
1027 
1028 	va_start(args, name);
1029 	vsnprintf(mem->name, sizeof(mem->name), name, args);
1030 	va_end(args);
1031 
1032 	return mem;
1033 }
1034 EXPORT_SYMBOL(rproc_of_resm_mem_entry_init);
1035 
1036 /*
1037  * A lookup table for resource handlers. The indices are defined in
1038  * enum fw_resource_type.
1039  */
1040 static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
1041 	[RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
1042 	[RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
1043 	[RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
1044 	[RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
1045 };
1046 
1047 /* handle firmware resource entries before booting the remote processor */
1048 static int rproc_handle_resources(struct rproc *rproc,
1049 				  rproc_handle_resource_t handlers[RSC_LAST])
1050 {
1051 	struct device *dev = &rproc->dev;
1052 	rproc_handle_resource_t handler;
1053 	int ret = 0, i;
1054 
1055 	if (!rproc->table_ptr)
1056 		return 0;
1057 
1058 	for (i = 0; i < rproc->table_ptr->num; i++) {
1059 		int offset = rproc->table_ptr->offset[i];
1060 		struct fw_rsc_hdr *hdr = (void *)rproc->table_ptr + offset;
1061 		int avail = rproc->table_sz - offset - sizeof(*hdr);
1062 		void *rsc = (void *)hdr + sizeof(*hdr);
1063 
1064 		/* make sure table isn't truncated */
1065 		if (avail < 0) {
1066 			dev_err(dev, "rsc table is truncated\n");
1067 			return -EINVAL;
1068 		}
1069 
1070 		dev_dbg(dev, "rsc: type %d\n", hdr->type);
1071 
1072 		if (hdr->type >= RSC_VENDOR_START &&
1073 		    hdr->type <= RSC_VENDOR_END) {
1074 			ret = rproc_handle_rsc(rproc, hdr->type, rsc,
1075 					       offset + sizeof(*hdr), avail);
1076 			if (ret == RSC_HANDLED)
1077 				continue;
1078 			else if (ret < 0)
1079 				break;
1080 
1081 			dev_warn(dev, "unsupported vendor resource %d\n",
1082 				 hdr->type);
1083 			continue;
1084 		}
1085 
1086 		if (hdr->type >= RSC_LAST) {
1087 			dev_warn(dev, "unsupported resource %d\n", hdr->type);
1088 			continue;
1089 		}
1090 
1091 		handler = handlers[hdr->type];
1092 		if (!handler)
1093 			continue;
1094 
1095 		ret = handler(rproc, rsc, offset + sizeof(*hdr), avail);
1096 		if (ret)
1097 			break;
1098 	}
1099 
1100 	return ret;
1101 }
1102 
1103 static int rproc_prepare_subdevices(struct rproc *rproc)
1104 {
1105 	struct rproc_subdev *subdev;
1106 	int ret;
1107 
1108 	list_for_each_entry(subdev, &rproc->subdevs, node) {
1109 		if (subdev->prepare) {
1110 			ret = subdev->prepare(subdev);
1111 			if (ret)
1112 				goto unroll_preparation;
1113 		}
1114 	}
1115 
1116 	return 0;
1117 
1118 unroll_preparation:
1119 	list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
1120 		if (subdev->unprepare)
1121 			subdev->unprepare(subdev);
1122 	}
1123 
1124 	return ret;
1125 }
1126 
1127 static int rproc_start_subdevices(struct rproc *rproc)
1128 {
1129 	struct rproc_subdev *subdev;
1130 	int ret;
1131 
1132 	list_for_each_entry(subdev, &rproc->subdevs, node) {
1133 		if (subdev->start) {
1134 			ret = subdev->start(subdev);
1135 			if (ret)
1136 				goto unroll_registration;
1137 		}
1138 	}
1139 
1140 	return 0;
1141 
1142 unroll_registration:
1143 	list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
1144 		if (subdev->stop)
1145 			subdev->stop(subdev, true);
1146 	}
1147 
1148 	return ret;
1149 }
1150 
1151 static void rproc_stop_subdevices(struct rproc *rproc, bool crashed)
1152 {
1153 	struct rproc_subdev *subdev;
1154 
1155 	list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
1156 		if (subdev->stop)
1157 			subdev->stop(subdev, crashed);
1158 	}
1159 }
1160 
1161 static void rproc_unprepare_subdevices(struct rproc *rproc)
1162 {
1163 	struct rproc_subdev *subdev;
1164 
1165 	list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
1166 		if (subdev->unprepare)
1167 			subdev->unprepare(subdev);
1168 	}
1169 }
1170 
1171 /**
1172  * rproc_alloc_registered_carveouts() - allocate all carveouts registered
1173  * in the list
1174  * @rproc: the remote processor handle
1175  *
1176  * This function parses registered carveout list, performs allocation
1177  * if alloc() ops registered and updates resource table information
1178  * if rsc_offset set.
1179  *
1180  * Return: 0 on success
1181  */
1182 static int rproc_alloc_registered_carveouts(struct rproc *rproc)
1183 {
1184 	struct rproc_mem_entry *entry, *tmp;
1185 	struct fw_rsc_carveout *rsc;
1186 	struct device *dev = &rproc->dev;
1187 	u64 pa;
1188 	int ret;
1189 
1190 	list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
1191 		if (entry->alloc) {
1192 			ret = entry->alloc(rproc, entry);
1193 			if (ret) {
1194 				dev_err(dev, "Unable to allocate carveout %s: %d\n",
1195 					entry->name, ret);
1196 				return -ENOMEM;
1197 			}
1198 		}
1199 
1200 		if (entry->rsc_offset != FW_RSC_ADDR_ANY) {
1201 			/* update resource table */
1202 			rsc = (void *)rproc->table_ptr + entry->rsc_offset;
1203 
1204 			/*
1205 			 * Some remote processors might need to know the pa
1206 			 * even though they are behind an IOMMU. E.g., OMAP4's
1207 			 * remote M3 processor needs this so it can control
1208 			 * on-chip hardware accelerators that are not behind
1209 			 * the IOMMU, and therefor must know the pa.
1210 			 *
1211 			 * Generally we don't want to expose physical addresses
1212 			 * if we don't have to (remote processors are generally
1213 			 * _not_ trusted), so we might want to do this only for
1214 			 * remote processor that _must_ have this (e.g. OMAP4's
1215 			 * dual M3 subsystem).
1216 			 *
1217 			 * Non-IOMMU processors might also want to have this info.
1218 			 * In this case, the device address and the physical address
1219 			 * are the same.
1220 			 */
1221 
1222 			/* Use va if defined else dma to generate pa */
1223 			if (entry->va)
1224 				pa = (u64)rproc_va_to_pa(entry->va);
1225 			else
1226 				pa = (u64)entry->dma;
1227 
1228 			if (((u64)pa) & HIGH_BITS_MASK)
1229 				dev_warn(dev,
1230 					 "Physical address cast in 32bit to fit resource table format\n");
1231 
1232 			rsc->pa = (u32)pa;
1233 			rsc->da = entry->da;
1234 			rsc->len = entry->len;
1235 		}
1236 	}
1237 
1238 	return 0;
1239 }
1240 
1241 /**
1242  * rproc_coredump_cleanup() - clean up dump_segments list
1243  * @rproc: the remote processor handle
1244  */
1245 static void rproc_coredump_cleanup(struct rproc *rproc)
1246 {
1247 	struct rproc_dump_segment *entry, *tmp;
1248 
1249 	list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
1250 		list_del(&entry->node);
1251 		kfree(entry);
1252 	}
1253 }
1254 
1255 /**
1256  * rproc_resource_cleanup() - clean up and free all acquired resources
1257  * @rproc: rproc handle
1258  *
1259  * This function will free all resources acquired for @rproc, and it
1260  * is called whenever @rproc either shuts down or fails to boot.
1261  */
1262 static void rproc_resource_cleanup(struct rproc *rproc)
1263 {
1264 	struct rproc_mem_entry *entry, *tmp;
1265 	struct rproc_debug_trace *trace, *ttmp;
1266 	struct rproc_vdev *rvdev, *rvtmp;
1267 	struct device *dev = &rproc->dev;
1268 
1269 	/* clean up debugfs trace entries */
1270 	list_for_each_entry_safe(trace, ttmp, &rproc->traces, node) {
1271 		rproc_remove_trace_file(trace->tfile);
1272 		rproc->num_traces--;
1273 		list_del(&trace->node);
1274 		kfree(trace);
1275 	}
1276 
1277 	/* clean up iommu mapping entries */
1278 	list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) {
1279 		size_t unmapped;
1280 
1281 		unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
1282 		if (unmapped != entry->len) {
1283 			/* nothing much to do besides complaining */
1284 			dev_err(dev, "failed to unmap %zx/%zu\n", entry->len,
1285 				unmapped);
1286 		}
1287 
1288 		list_del(&entry->node);
1289 		kfree(entry);
1290 	}
1291 
1292 	/* clean up carveout allocations */
1293 	list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
1294 		if (entry->release)
1295 			entry->release(rproc, entry);
1296 		list_del(&entry->node);
1297 		kfree(entry);
1298 	}
1299 
1300 	/* clean up remote vdev entries */
1301 	list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
1302 		kref_put(&rvdev->refcount, rproc_vdev_release);
1303 
1304 	rproc_coredump_cleanup(rproc);
1305 }
1306 
1307 static int rproc_start(struct rproc *rproc, const struct firmware *fw)
1308 {
1309 	struct resource_table *loaded_table;
1310 	struct device *dev = &rproc->dev;
1311 	int ret;
1312 
1313 	/* load the ELF segments to memory */
1314 	ret = rproc_load_segments(rproc, fw);
1315 	if (ret) {
1316 		dev_err(dev, "Failed to load program segments: %d\n", ret);
1317 		return ret;
1318 	}
1319 
1320 	/*
1321 	 * The starting device has been given the rproc->cached_table as the
1322 	 * resource table. The address of the vring along with the other
1323 	 * allocated resources (carveouts etc) is stored in cached_table.
1324 	 * In order to pass this information to the remote device we must copy
1325 	 * this information to device memory. We also update the table_ptr so
1326 	 * that any subsequent changes will be applied to the loaded version.
1327 	 */
1328 	loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
1329 	if (loaded_table) {
1330 		memcpy(loaded_table, rproc->cached_table, rproc->table_sz);
1331 		rproc->table_ptr = loaded_table;
1332 	}
1333 
1334 	ret = rproc_prepare_subdevices(rproc);
1335 	if (ret) {
1336 		dev_err(dev, "failed to prepare subdevices for %s: %d\n",
1337 			rproc->name, ret);
1338 		goto reset_table_ptr;
1339 	}
1340 
1341 	/* power up the remote processor */
1342 	ret = rproc->ops->start(rproc);
1343 	if (ret) {
1344 		dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
1345 		goto unprepare_subdevices;
1346 	}
1347 
1348 	/* Start any subdevices for the remote processor */
1349 	ret = rproc_start_subdevices(rproc);
1350 	if (ret) {
1351 		dev_err(dev, "failed to probe subdevices for %s: %d\n",
1352 			rproc->name, ret);
1353 		goto stop_rproc;
1354 	}
1355 
1356 	rproc->state = RPROC_RUNNING;
1357 
1358 	dev_info(dev, "remote processor %s is now up\n", rproc->name);
1359 
1360 	return 0;
1361 
1362 stop_rproc:
1363 	rproc->ops->stop(rproc);
1364 unprepare_subdevices:
1365 	rproc_unprepare_subdevices(rproc);
1366 reset_table_ptr:
1367 	rproc->table_ptr = rproc->cached_table;
1368 
1369 	return ret;
1370 }
1371 
1372 /*
1373  * take a firmware and boot a remote processor with it.
1374  */
1375 static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
1376 {
1377 	struct device *dev = &rproc->dev;
1378 	const char *name = rproc->firmware;
1379 	int ret;
1380 
1381 	ret = rproc_fw_sanity_check(rproc, fw);
1382 	if (ret)
1383 		return ret;
1384 
1385 	dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
1386 
1387 	/*
1388 	 * if enabling an IOMMU isn't relevant for this rproc, this is
1389 	 * just a nop
1390 	 */
1391 	ret = rproc_enable_iommu(rproc);
1392 	if (ret) {
1393 		dev_err(dev, "can't enable iommu: %d\n", ret);
1394 		return ret;
1395 	}
1396 
1397 	rproc->bootaddr = rproc_get_boot_addr(rproc, fw);
1398 
1399 	/* Load resource table, core dump segment list etc from the firmware */
1400 	ret = rproc_parse_fw(rproc, fw);
1401 	if (ret)
1402 		goto disable_iommu;
1403 
1404 	/* reset max_notifyid */
1405 	rproc->max_notifyid = -1;
1406 
1407 	/* reset handled vdev */
1408 	rproc->nb_vdev = 0;
1409 
1410 	/* handle fw resources which are required to boot rproc */
1411 	ret = rproc_handle_resources(rproc, rproc_loading_handlers);
1412 	if (ret) {
1413 		dev_err(dev, "Failed to process resources: %d\n", ret);
1414 		goto clean_up_resources;
1415 	}
1416 
1417 	/* Allocate carveout resources associated to rproc */
1418 	ret = rproc_alloc_registered_carveouts(rproc);
1419 	if (ret) {
1420 		dev_err(dev, "Failed to allocate associated carveouts: %d\n",
1421 			ret);
1422 		goto clean_up_resources;
1423 	}
1424 
1425 	ret = rproc_start(rproc, fw);
1426 	if (ret)
1427 		goto clean_up_resources;
1428 
1429 	return 0;
1430 
1431 clean_up_resources:
1432 	rproc_resource_cleanup(rproc);
1433 	kfree(rproc->cached_table);
1434 	rproc->cached_table = NULL;
1435 	rproc->table_ptr = NULL;
1436 disable_iommu:
1437 	rproc_disable_iommu(rproc);
1438 	return ret;
1439 }
1440 
1441 /*
1442  * take a firmware and boot it up.
1443  *
1444  * Note: this function is called asynchronously upon registration of the
1445  * remote processor (so we must wait until it completes before we try
1446  * to unregister the device. one other option is just to use kref here,
1447  * that might be cleaner).
1448  */
1449 static void rproc_auto_boot_callback(const struct firmware *fw, void *context)
1450 {
1451 	struct rproc *rproc = context;
1452 
1453 	rproc_boot(rproc);
1454 
1455 	release_firmware(fw);
1456 }
1457 
1458 static int rproc_trigger_auto_boot(struct rproc *rproc)
1459 {
1460 	int ret;
1461 
1462 	/*
1463 	 * We're initiating an asynchronous firmware loading, so we can
1464 	 * be built-in kernel code, without hanging the boot process.
1465 	 */
1466 	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
1467 				      rproc->firmware, &rproc->dev, GFP_KERNEL,
1468 				      rproc, rproc_auto_boot_callback);
1469 	if (ret < 0)
1470 		dev_err(&rproc->dev, "request_firmware_nowait err: %d\n", ret);
1471 
1472 	return ret;
1473 }
1474 
1475 static int rproc_stop(struct rproc *rproc, bool crashed)
1476 {
1477 	struct device *dev = &rproc->dev;
1478 	int ret;
1479 
1480 	/* Stop any subdevices for the remote processor */
1481 	rproc_stop_subdevices(rproc, crashed);
1482 
1483 	/* the installed resource table is no longer accessible */
1484 	rproc->table_ptr = rproc->cached_table;
1485 
1486 	/* power off the remote processor */
1487 	ret = rproc->ops->stop(rproc);
1488 	if (ret) {
1489 		dev_err(dev, "can't stop rproc: %d\n", ret);
1490 		return ret;
1491 	}
1492 
1493 	rproc_unprepare_subdevices(rproc);
1494 
1495 	rproc->state = RPROC_OFFLINE;
1496 
1497 	dev_info(dev, "stopped remote processor %s\n", rproc->name);
1498 
1499 	return 0;
1500 }
1501 
1502 /**
1503  * rproc_coredump_add_segment() - add segment of device memory to coredump
1504  * @rproc:	handle of a remote processor
1505  * @da:		device address
1506  * @size:	size of segment
1507  *
1508  * Add device memory to the list of segments to be included in a coredump for
1509  * the remoteproc.
1510  *
1511  * Return: 0 on success, negative errno on error.
1512  */
1513 int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
1514 {
1515 	struct rproc_dump_segment *segment;
1516 
1517 	segment = kzalloc(sizeof(*segment), GFP_KERNEL);
1518 	if (!segment)
1519 		return -ENOMEM;
1520 
1521 	segment->da = da;
1522 	segment->size = size;
1523 
1524 	list_add_tail(&segment->node, &rproc->dump_segments);
1525 
1526 	return 0;
1527 }
1528 EXPORT_SYMBOL(rproc_coredump_add_segment);
1529 
1530 /**
1531  * rproc_coredump_add_custom_segment() - add custom coredump segment
1532  * @rproc:	handle of a remote processor
1533  * @da:		device address
1534  * @size:	size of segment
1535  * @dumpfn:	custom dump function called for each segment during coredump
1536  * @priv:	private data
1537  *
1538  * Add device memory to the list of segments to be included in the coredump
1539  * and associate the segment with the given custom dump function and private
1540  * data.
1541  *
1542  * Return: 0 on success, negative errno on error.
1543  */
1544 int rproc_coredump_add_custom_segment(struct rproc *rproc,
1545 				      dma_addr_t da, size_t size,
1546 				      void (*dumpfn)(struct rproc *rproc,
1547 						     struct rproc_dump_segment *segment,
1548 						     void *dest),
1549 				      void *priv)
1550 {
1551 	struct rproc_dump_segment *segment;
1552 
1553 	segment = kzalloc(sizeof(*segment), GFP_KERNEL);
1554 	if (!segment)
1555 		return -ENOMEM;
1556 
1557 	segment->da = da;
1558 	segment->size = size;
1559 	segment->priv = priv;
1560 	segment->dump = dumpfn;
1561 
1562 	list_add_tail(&segment->node, &rproc->dump_segments);
1563 
1564 	return 0;
1565 }
1566 EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
1567 
1568 /**
1569  * rproc_coredump() - perform coredump
1570  * @rproc:	rproc handle
1571  *
1572  * This function will generate an ELF header for the registered segments
1573  * and create a devcoredump device associated with rproc.
1574  */
1575 static void rproc_coredump(struct rproc *rproc)
1576 {
1577 	struct rproc_dump_segment *segment;
1578 	void *phdr;
1579 	void *ehdr;
1580 	size_t data_size;
1581 	size_t offset;
1582 	void *data;
1583 	void *ptr;
1584 	u8 class = rproc->elf_class;
1585 	int phnum = 0;
1586 
1587 	if (list_empty(&rproc->dump_segments))
1588 		return;
1589 
1590 	data_size = elf_size_of_hdr(class);
1591 	list_for_each_entry(segment, &rproc->dump_segments, node) {
1592 		data_size += elf_size_of_phdr(class) + segment->size;
1593 
1594 		phnum++;
1595 	}
1596 
1597 	data = vmalloc(data_size);
1598 	if (!data)
1599 		return;
1600 
1601 	ehdr = data;
1602 
1603 	memset(ehdr, 0, elf_size_of_hdr(class));
1604 	/* e_ident field is common for both elf32 and elf64 */
1605 	elf_hdr_init_ident(ehdr, class);
1606 
1607 	elf_hdr_set_e_type(class, ehdr, ET_CORE);
1608 	elf_hdr_set_e_machine(class, ehdr, EM_NONE);
1609 	elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
1610 	elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
1611 	elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class));
1612 	elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
1613 	elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class));
1614 	elf_hdr_set_e_phnum(class, ehdr, phnum);
1615 
1616 	phdr = data + elf_hdr_get_e_phoff(class, ehdr);
1617 	offset = elf_hdr_get_e_phoff(class, ehdr);
1618 	offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr);
1619 
1620 	list_for_each_entry(segment, &rproc->dump_segments, node) {
1621 		memset(phdr, 0, elf_size_of_phdr(class));
1622 		elf_phdr_set_p_type(class, phdr, PT_LOAD);
1623 		elf_phdr_set_p_offset(class, phdr, offset);
1624 		elf_phdr_set_p_vaddr(class, phdr, segment->da);
1625 		elf_phdr_set_p_paddr(class, phdr, segment->da);
1626 		elf_phdr_set_p_filesz(class, phdr, segment->size);
1627 		elf_phdr_set_p_memsz(class, phdr, segment->size);
1628 		elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X);
1629 		elf_phdr_set_p_align(class, phdr, 0);
1630 
1631 		if (segment->dump) {
1632 			segment->dump(rproc, segment, data + offset);
1633 		} else {
1634 			ptr = rproc_da_to_va(rproc, segment->da, segment->size);
1635 			if (!ptr) {
1636 				dev_err(&rproc->dev,
1637 					"invalid coredump segment (%pad, %zu)\n",
1638 					&segment->da, segment->size);
1639 				memset(data + offset, 0xff, segment->size);
1640 			} else {
1641 				memcpy(data + offset, ptr, segment->size);
1642 			}
1643 		}
1644 
1645 		offset += elf_phdr_get_p_filesz(class, phdr);
1646 		phdr += elf_size_of_phdr(class);
1647 	}
1648 
1649 	dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
1650 }
1651 
1652 /**
1653  * rproc_trigger_recovery() - recover a remoteproc
1654  * @rproc: the remote processor
1655  *
1656  * The recovery is done by resetting all the virtio devices, that way all the
1657  * rpmsg drivers will be reseted along with the remote processor making the
1658  * remoteproc functional again.
1659  *
1660  * This function can sleep, so it cannot be called from atomic context.
1661  */
1662 int rproc_trigger_recovery(struct rproc *rproc)
1663 {
1664 	const struct firmware *firmware_p;
1665 	struct device *dev = &rproc->dev;
1666 	int ret;
1667 
1668 	ret = mutex_lock_interruptible(&rproc->lock);
1669 	if (ret)
1670 		return ret;
1671 
1672 	/* State could have changed before we got the mutex */
1673 	if (rproc->state != RPROC_CRASHED)
1674 		goto unlock_mutex;
1675 
1676 	dev_err(dev, "recovering %s\n", rproc->name);
1677 
1678 	ret = rproc_stop(rproc, true);
1679 	if (ret)
1680 		goto unlock_mutex;
1681 
1682 	/* generate coredump */
1683 	rproc_coredump(rproc);
1684 
1685 	/* load firmware */
1686 	ret = request_firmware(&firmware_p, rproc->firmware, dev);
1687 	if (ret < 0) {
1688 		dev_err(dev, "request_firmware failed: %d\n", ret);
1689 		goto unlock_mutex;
1690 	}
1691 
1692 	/* boot the remote processor up again */
1693 	ret = rproc_start(rproc, firmware_p);
1694 
1695 	release_firmware(firmware_p);
1696 
1697 unlock_mutex:
1698 	mutex_unlock(&rproc->lock);
1699 	return ret;
1700 }
1701 
1702 /**
1703  * rproc_crash_handler_work() - handle a crash
1704  * @work: work treating the crash
1705  *
1706  * This function needs to handle everything related to a crash, like cpu
1707  * registers and stack dump, information to help to debug the fatal error, etc.
1708  */
1709 static void rproc_crash_handler_work(struct work_struct *work)
1710 {
1711 	struct rproc *rproc = container_of(work, struct rproc, crash_handler);
1712 	struct device *dev = &rproc->dev;
1713 
1714 	dev_dbg(dev, "enter %s\n", __func__);
1715 
1716 	mutex_lock(&rproc->lock);
1717 
1718 	if (rproc->state == RPROC_CRASHED || rproc->state == RPROC_OFFLINE) {
1719 		/* handle only the first crash detected */
1720 		mutex_unlock(&rproc->lock);
1721 		return;
1722 	}
1723 
1724 	rproc->state = RPROC_CRASHED;
1725 	dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt,
1726 		rproc->name);
1727 
1728 	mutex_unlock(&rproc->lock);
1729 
1730 	if (!rproc->recovery_disabled)
1731 		rproc_trigger_recovery(rproc);
1732 }
1733 
1734 /**
1735  * rproc_boot() - boot a remote processor
1736  * @rproc: handle of a remote processor
1737  *
1738  * Boot a remote processor (i.e. load its firmware, power it on, ...).
1739  *
1740  * If the remote processor is already powered on, this function immediately
1741  * returns (successfully).
1742  *
1743  * Returns 0 on success, and an appropriate error value otherwise.
1744  */
1745 int rproc_boot(struct rproc *rproc)
1746 {
1747 	const struct firmware *firmware_p;
1748 	struct device *dev;
1749 	int ret;
1750 
1751 	if (!rproc) {
1752 		pr_err("invalid rproc handle\n");
1753 		return -EINVAL;
1754 	}
1755 
1756 	dev = &rproc->dev;
1757 
1758 	ret = mutex_lock_interruptible(&rproc->lock);
1759 	if (ret) {
1760 		dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
1761 		return ret;
1762 	}
1763 
1764 	if (rproc->state == RPROC_DELETED) {
1765 		ret = -ENODEV;
1766 		dev_err(dev, "can't boot deleted rproc %s\n", rproc->name);
1767 		goto unlock_mutex;
1768 	}
1769 
1770 	/* skip the boot process if rproc is already powered up */
1771 	if (atomic_inc_return(&rproc->power) > 1) {
1772 		ret = 0;
1773 		goto unlock_mutex;
1774 	}
1775 
1776 	dev_info(dev, "powering up %s\n", rproc->name);
1777 
1778 	/* load firmware */
1779 	ret = request_firmware(&firmware_p, rproc->firmware, dev);
1780 	if (ret < 0) {
1781 		dev_err(dev, "request_firmware failed: %d\n", ret);
1782 		goto downref_rproc;
1783 	}
1784 
1785 	ret = rproc_fw_boot(rproc, firmware_p);
1786 
1787 	release_firmware(firmware_p);
1788 
1789 downref_rproc:
1790 	if (ret)
1791 		atomic_dec(&rproc->power);
1792 unlock_mutex:
1793 	mutex_unlock(&rproc->lock);
1794 	return ret;
1795 }
1796 EXPORT_SYMBOL(rproc_boot);
1797 
1798 /**
1799  * rproc_shutdown() - power off the remote processor
1800  * @rproc: the remote processor
1801  *
1802  * Power off a remote processor (previously booted with rproc_boot()).
1803  *
1804  * In case @rproc is still being used by an additional user(s), then
1805  * this function will just decrement the power refcount and exit,
1806  * without really powering off the device.
1807  *
1808  * Every call to rproc_boot() must (eventually) be accompanied by a call
1809  * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug.
1810  *
1811  * Notes:
1812  * - we're not decrementing the rproc's refcount, only the power refcount.
1813  *   which means that the @rproc handle stays valid even after rproc_shutdown()
1814  *   returns, and users can still use it with a subsequent rproc_boot(), if
1815  *   needed.
1816  */
1817 void rproc_shutdown(struct rproc *rproc)
1818 {
1819 	struct device *dev = &rproc->dev;
1820 	int ret;
1821 
1822 	ret = mutex_lock_interruptible(&rproc->lock);
1823 	if (ret) {
1824 		dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
1825 		return;
1826 	}
1827 
1828 	/* if the remote proc is still needed, bail out */
1829 	if (!atomic_dec_and_test(&rproc->power))
1830 		goto out;
1831 
1832 	ret = rproc_stop(rproc, false);
1833 	if (ret) {
1834 		atomic_inc(&rproc->power);
1835 		goto out;
1836 	}
1837 
1838 	/* clean up all acquired resources */
1839 	rproc_resource_cleanup(rproc);
1840 
1841 	rproc_disable_iommu(rproc);
1842 
1843 	/* Free the copy of the resource table */
1844 	kfree(rproc->cached_table);
1845 	rproc->cached_table = NULL;
1846 	rproc->table_ptr = NULL;
1847 out:
1848 	mutex_unlock(&rproc->lock);
1849 }
1850 EXPORT_SYMBOL(rproc_shutdown);
1851 
1852 /**
1853  * rproc_get_by_phandle() - find a remote processor by phandle
1854  * @phandle: phandle to the rproc
1855  *
1856  * Finds an rproc handle using the remote processor's phandle, and then
1857  * return a handle to the rproc.
1858  *
1859  * This function increments the remote processor's refcount, so always
1860  * use rproc_put() to decrement it back once rproc isn't needed anymore.
1861  *
1862  * Returns the rproc handle on success, and NULL on failure.
1863  */
1864 #ifdef CONFIG_OF
1865 struct rproc *rproc_get_by_phandle(phandle phandle)
1866 {
1867 	struct rproc *rproc = NULL, *r;
1868 	struct device_node *np;
1869 
1870 	np = of_find_node_by_phandle(phandle);
1871 	if (!np)
1872 		return NULL;
1873 
1874 	rcu_read_lock();
1875 	list_for_each_entry_rcu(r, &rproc_list, node) {
1876 		if (r->dev.parent && r->dev.parent->of_node == np) {
1877 			/* prevent underlying implementation from being removed */
1878 			if (!try_module_get(r->dev.parent->driver->owner)) {
1879 				dev_err(&r->dev, "can't get owner\n");
1880 				break;
1881 			}
1882 
1883 			rproc = r;
1884 			get_device(&rproc->dev);
1885 			break;
1886 		}
1887 	}
1888 	rcu_read_unlock();
1889 
1890 	of_node_put(np);
1891 
1892 	return rproc;
1893 }
1894 #else
1895 struct rproc *rproc_get_by_phandle(phandle phandle)
1896 {
1897 	return NULL;
1898 }
1899 #endif
1900 EXPORT_SYMBOL(rproc_get_by_phandle);
1901 
1902 /**
1903  * rproc_add() - register a remote processor
1904  * @rproc: the remote processor handle to register
1905  *
1906  * Registers @rproc with the remoteproc framework, after it has been
1907  * allocated with rproc_alloc().
1908  *
1909  * This is called by the platform-specific rproc implementation, whenever
1910  * a new remote processor device is probed.
1911  *
1912  * Returns 0 on success and an appropriate error code otherwise.
1913  *
1914  * Note: this function initiates an asynchronous firmware loading
1915  * context, which will look for virtio devices supported by the rproc's
1916  * firmware.
1917  *
1918  * If found, those virtio devices will be created and added, so as a result
1919  * of registering this remote processor, additional virtio drivers might be
1920  * probed.
1921  */
1922 int rproc_add(struct rproc *rproc)
1923 {
1924 	struct device *dev = &rproc->dev;
1925 	int ret;
1926 
1927 	ret = device_add(dev);
1928 	if (ret < 0)
1929 		return ret;
1930 
1931 	dev_info(dev, "%s is available\n", rproc->name);
1932 
1933 	/* create debugfs entries */
1934 	rproc_create_debug_dir(rproc);
1935 
1936 	/* if rproc is marked always-on, request it to boot */
1937 	if (rproc->auto_boot) {
1938 		ret = rproc_trigger_auto_boot(rproc);
1939 		if (ret < 0)
1940 			return ret;
1941 	}
1942 
1943 	/* expose to rproc_get_by_phandle users */
1944 	mutex_lock(&rproc_list_mutex);
1945 	list_add_rcu(&rproc->node, &rproc_list);
1946 	mutex_unlock(&rproc_list_mutex);
1947 
1948 	return 0;
1949 }
1950 EXPORT_SYMBOL(rproc_add);
1951 
1952 /**
1953  * rproc_type_release() - release a remote processor instance
1954  * @dev: the rproc's device
1955  *
1956  * This function should _never_ be called directly.
1957  *
1958  * It will be called by the driver core when no one holds a valid pointer
1959  * to @dev anymore.
1960  */
1961 static void rproc_type_release(struct device *dev)
1962 {
1963 	struct rproc *rproc = container_of(dev, struct rproc, dev);
1964 
1965 	dev_info(&rproc->dev, "releasing %s\n", rproc->name);
1966 
1967 	idr_destroy(&rproc->notifyids);
1968 
1969 	if (rproc->index >= 0)
1970 		ida_simple_remove(&rproc_dev_index, rproc->index);
1971 
1972 	kfree(rproc->firmware);
1973 	kfree(rproc->ops);
1974 	kfree(rproc);
1975 }
1976 
1977 static const struct device_type rproc_type = {
1978 	.name		= "remoteproc",
1979 	.release	= rproc_type_release,
1980 };
1981 
1982 /**
1983  * rproc_alloc() - allocate a remote processor handle
1984  * @dev: the underlying device
1985  * @name: name of this remote processor
1986  * @ops: platform-specific handlers (mainly start/stop)
1987  * @firmware: name of firmware file to load, can be NULL
1988  * @len: length of private data needed by the rproc driver (in bytes)
1989  *
1990  * Allocates a new remote processor handle, but does not register
1991  * it yet. if @firmware is NULL, a default name is used.
1992  *
1993  * This function should be used by rproc implementations during initialization
1994  * of the remote processor.
1995  *
1996  * After creating an rproc handle using this function, and when ready,
1997  * implementations should then call rproc_add() to complete
1998  * the registration of the remote processor.
1999  *
2000  * On success the new rproc is returned, and on failure, NULL.
2001  *
2002  * Note: _never_ directly deallocate @rproc, even if it was not registered
2003  * yet. Instead, when you need to unroll rproc_alloc(), use rproc_free().
2004  */
2005 struct rproc *rproc_alloc(struct device *dev, const char *name,
2006 			  const struct rproc_ops *ops,
2007 			  const char *firmware, int len)
2008 {
2009 	struct rproc *rproc;
2010 	char *p, *template = "rproc-%s-fw";
2011 	int name_len;
2012 
2013 	if (!dev || !name || !ops)
2014 		return NULL;
2015 
2016 	if (!firmware) {
2017 		/*
2018 		 * If the caller didn't pass in a firmware name then
2019 		 * construct a default name.
2020 		 */
2021 		name_len = strlen(name) + strlen(template) - 2 + 1;
2022 		p = kmalloc(name_len, GFP_KERNEL);
2023 		if (!p)
2024 			return NULL;
2025 		snprintf(p, name_len, template, name);
2026 	} else {
2027 		p = kstrdup(firmware, GFP_KERNEL);
2028 		if (!p)
2029 			return NULL;
2030 	}
2031 
2032 	rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL);
2033 	if (!rproc) {
2034 		kfree(p);
2035 		return NULL;
2036 	}
2037 
2038 	rproc->ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL);
2039 	if (!rproc->ops) {
2040 		kfree(p);
2041 		kfree(rproc);
2042 		return NULL;
2043 	}
2044 
2045 	rproc->firmware = p;
2046 	rproc->name = name;
2047 	rproc->priv = &rproc[1];
2048 	rproc->auto_boot = true;
2049 	rproc->elf_class = ELFCLASS32;
2050 
2051 	device_initialize(&rproc->dev);
2052 	rproc->dev.parent = dev;
2053 	rproc->dev.type = &rproc_type;
2054 	rproc->dev.class = &rproc_class;
2055 	rproc->dev.driver_data = rproc;
2056 
2057 	/* Assign a unique device index and name */
2058 	rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL);
2059 	if (rproc->index < 0) {
2060 		dev_err(dev, "ida_simple_get failed: %d\n", rproc->index);
2061 		put_device(&rproc->dev);
2062 		return NULL;
2063 	}
2064 
2065 	dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);
2066 
2067 	atomic_set(&rproc->power, 0);
2068 
2069 	/* Default to ELF loader if no load function is specified */
2070 	if (!rproc->ops->load) {
2071 		rproc->ops->load = rproc_elf_load_segments;
2072 		rproc->ops->parse_fw = rproc_elf_load_rsc_table;
2073 		rproc->ops->find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table;
2074 		if (!rproc->ops->sanity_check)
2075 			rproc->ops->sanity_check = rproc_elf32_sanity_check;
2076 		rproc->ops->get_boot_addr = rproc_elf_get_boot_addr;
2077 	}
2078 
2079 	mutex_init(&rproc->lock);
2080 
2081 	idr_init(&rproc->notifyids);
2082 
2083 	INIT_LIST_HEAD(&rproc->carveouts);
2084 	INIT_LIST_HEAD(&rproc->mappings);
2085 	INIT_LIST_HEAD(&rproc->traces);
2086 	INIT_LIST_HEAD(&rproc->rvdevs);
2087 	INIT_LIST_HEAD(&rproc->subdevs);
2088 	INIT_LIST_HEAD(&rproc->dump_segments);
2089 
2090 	INIT_WORK(&rproc->crash_handler, rproc_crash_handler_work);
2091 
2092 	rproc->state = RPROC_OFFLINE;
2093 
2094 	return rproc;
2095 }
2096 EXPORT_SYMBOL(rproc_alloc);
2097 
2098 /**
2099  * rproc_free() - unroll rproc_alloc()
2100  * @rproc: the remote processor handle
2101  *
2102  * This function decrements the rproc dev refcount.
2103  *
2104  * If no one holds any reference to rproc anymore, then its refcount would
2105  * now drop to zero, and it would be freed.
2106  */
2107 void rproc_free(struct rproc *rproc)
2108 {
2109 	put_device(&rproc->dev);
2110 }
2111 EXPORT_SYMBOL(rproc_free);
2112 
2113 /**
2114  * rproc_put() - release rproc reference
2115  * @rproc: the remote processor handle
2116  *
2117  * This function decrements the rproc dev refcount.
2118  *
2119  * If no one holds any reference to rproc anymore, then its refcount would
2120  * now drop to zero, and it would be freed.
2121  */
2122 void rproc_put(struct rproc *rproc)
2123 {
2124 	module_put(rproc->dev.parent->driver->owner);
2125 	put_device(&rproc->dev);
2126 }
2127 EXPORT_SYMBOL(rproc_put);
2128 
2129 /**
2130  * rproc_del() - unregister a remote processor
2131  * @rproc: rproc handle to unregister
2132  *
2133  * This function should be called when the platform specific rproc
2134  * implementation decides to remove the rproc device. it should
2135  * _only_ be called if a previous invocation of rproc_add()
2136  * has completed successfully.
2137  *
2138  * After rproc_del() returns, @rproc isn't freed yet, because
2139  * of the outstanding reference created by rproc_alloc. To decrement that
2140  * one last refcount, one still needs to call rproc_free().
2141  *
2142  * Returns 0 on success and -EINVAL if @rproc isn't valid.
2143  */
2144 int rproc_del(struct rproc *rproc)
2145 {
2146 	if (!rproc)
2147 		return -EINVAL;
2148 
2149 	/* if rproc is marked always-on, rproc_add() booted it */
2150 	/* TODO: make sure this works with rproc->power > 1 */
2151 	if (rproc->auto_boot)
2152 		rproc_shutdown(rproc);
2153 
2154 	mutex_lock(&rproc->lock);
2155 	rproc->state = RPROC_DELETED;
2156 	mutex_unlock(&rproc->lock);
2157 
2158 	rproc_delete_debug_dir(rproc);
2159 
2160 	/* the rproc is downref'ed as soon as it's removed from the klist */
2161 	mutex_lock(&rproc_list_mutex);
2162 	list_del_rcu(&rproc->node);
2163 	mutex_unlock(&rproc_list_mutex);
2164 
2165 	/* Ensure that no readers of rproc_list are still active */
2166 	synchronize_rcu();
2167 
2168 	device_del(&rproc->dev);
2169 
2170 	return 0;
2171 }
2172 EXPORT_SYMBOL(rproc_del);
2173 
2174 /**
2175  * rproc_add_subdev() - add a subdevice to a remoteproc
2176  * @rproc: rproc handle to add the subdevice to
2177  * @subdev: subdev handle to register
2178  *
2179  * Caller is responsible for populating optional subdevice function pointers.
2180  */
2181 void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev)
2182 {
2183 	list_add_tail(&subdev->node, &rproc->subdevs);
2184 }
2185 EXPORT_SYMBOL(rproc_add_subdev);
2186 
2187 /**
2188  * rproc_remove_subdev() - remove a subdevice from a remoteproc
2189  * @rproc: rproc handle to remove the subdevice from
2190  * @subdev: subdev handle, previously registered with rproc_add_subdev()
2191  */
2192 void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev)
2193 {
2194 	list_del(&subdev->node);
2195 }
2196 EXPORT_SYMBOL(rproc_remove_subdev);
2197 
2198 /**
2199  * rproc_get_by_child() - acquire rproc handle of @dev's ancestor
2200  * @dev:	child device to find ancestor of
2201  *
2202  * Returns the ancestor rproc instance, or NULL if not found.
2203  */
2204 struct rproc *rproc_get_by_child(struct device *dev)
2205 {
2206 	for (dev = dev->parent; dev; dev = dev->parent) {
2207 		if (dev->type == &rproc_type)
2208 			return dev->driver_data;
2209 	}
2210 
2211 	return NULL;
2212 }
2213 EXPORT_SYMBOL(rproc_get_by_child);
2214 
2215 /**
2216  * rproc_report_crash() - rproc crash reporter function
2217  * @rproc: remote processor
2218  * @type: crash type
2219  *
2220  * This function must be called every time a crash is detected by the low-level
2221  * drivers implementing a specific remoteproc. This should not be called from a
2222  * non-remoteproc driver.
2223  *
2224  * This function can be called from atomic/interrupt context.
2225  */
2226 void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
2227 {
2228 	if (!rproc) {
2229 		pr_err("NULL rproc pointer\n");
2230 		return;
2231 	}
2232 
2233 	dev_err(&rproc->dev, "crash detected in %s: type %s\n",
2234 		rproc->name, rproc_crash_to_string(type));
2235 
2236 	/* create a new task to handle the error */
2237 	schedule_work(&rproc->crash_handler);
2238 }
2239 EXPORT_SYMBOL(rproc_report_crash);
2240 
2241 static int rproc_panic_handler(struct notifier_block *nb, unsigned long event,
2242 			       void *ptr)
2243 {
2244 	unsigned int longest = 0;
2245 	struct rproc *rproc;
2246 	unsigned int d;
2247 
2248 	rcu_read_lock();
2249 	list_for_each_entry_rcu(rproc, &rproc_list, node) {
2250 		if (!rproc->ops->panic || rproc->state != RPROC_RUNNING)
2251 			continue;
2252 
2253 		d = rproc->ops->panic(rproc);
2254 		longest = max(longest, d);
2255 	}
2256 	rcu_read_unlock();
2257 
2258 	/*
2259 	 * Delay for the longest requested duration before returning. This can
2260 	 * be used by the remoteproc drivers to give the remote processor time
2261 	 * to perform any requested operations (such as flush caches), when
2262 	 * it's not possible to signal the Linux side due to the panic.
2263 	 */
2264 	mdelay(longest);
2265 
2266 	return NOTIFY_DONE;
2267 }
2268 
2269 static void __init rproc_init_panic(void)
2270 {
2271 	rproc_panic_nb.notifier_call = rproc_panic_handler;
2272 	atomic_notifier_chain_register(&panic_notifier_list, &rproc_panic_nb);
2273 }
2274 
2275 static void __exit rproc_exit_panic(void)
2276 {
2277 	atomic_notifier_chain_unregister(&panic_notifier_list, &rproc_panic_nb);
2278 }
2279 
2280 static int __init remoteproc_init(void)
2281 {
2282 	rproc_init_sysfs();
2283 	rproc_init_debugfs();
2284 	rproc_init_panic();
2285 
2286 	return 0;
2287 }
2288 subsys_initcall(remoteproc_init);
2289 
2290 static void __exit remoteproc_exit(void)
2291 {
2292 	ida_destroy(&rproc_dev_index);
2293 
2294 	rproc_exit_panic();
2295 	rproc_exit_debugfs();
2296 	rproc_exit_sysfs();
2297 }
2298 module_exit(remoteproc_exit);
2299 
2300 MODULE_LICENSE("GPL v2");
2301 MODULE_DESCRIPTION("Generic Remote Processor Framework");
2302