1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Remote Processor Framework 4 * 5 * Copyright (C) 2011 Texas Instruments, Inc. 6 * Copyright (C) 2011 Google, Inc. 7 * 8 * Ohad Ben-Cohen <ohad@wizery.com> 9 * Brian Swetland <swetland@google.com> 10 * Mark Grosen <mgrosen@ti.com> 11 * Fernando Guzman Lugo <fernando.lugo@ti.com> 12 * Suman Anna <s-anna@ti.com> 13 * Robert Tivy <rtivy@ti.com> 14 * Armando Uribe De Leon <x0095078@ti.com> 15 */ 16 17 #define pr_fmt(fmt) "%s: " fmt, __func__ 18 19 #include <linux/delay.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/device.h> 23 #include <linux/slab.h> 24 #include <linux/mutex.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/firmware.h> 27 #include <linux/string.h> 28 #include <linux/debugfs.h> 29 #include <linux/devcoredump.h> 30 #include <linux/rculist.h> 31 #include <linux/remoteproc.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/iommu.h> 34 #include <linux/idr.h> 35 #include <linux/elf.h> 36 #include <linux/crc32.h> 37 #include <linux/of_reserved_mem.h> 38 #include <linux/virtio_ids.h> 39 #include <linux/virtio_ring.h> 40 #include <asm/byteorder.h> 41 #include <linux/platform_device.h> 42 43 #include "remoteproc_internal.h" 44 #include "remoteproc_elf_helpers.h" 45 46 #define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL 47 48 static DEFINE_MUTEX(rproc_list_mutex); 49 static LIST_HEAD(rproc_list); 50 static struct notifier_block rproc_panic_nb; 51 52 typedef int (*rproc_handle_resource_t)(struct rproc *rproc, 53 void *, int offset, int avail); 54 55 static int rproc_alloc_carveout(struct rproc *rproc, 56 struct rproc_mem_entry *mem); 57 static int rproc_release_carveout(struct rproc *rproc, 58 struct rproc_mem_entry *mem); 59 60 /* Unique indices for remoteproc devices */ 61 static DEFINE_IDA(rproc_dev_index); 62 63 static const char * const rproc_crash_names[] = { 64 [RPROC_MMUFAULT] = "mmufault", 65 [RPROC_WATCHDOG] = "watchdog", 66 [RPROC_FATAL_ERROR] = "fatal error", 67 }; 68 69 /* translate rproc_crash_type to string */ 70 static const char *rproc_crash_to_string(enum rproc_crash_type type) 71 { 72 if (type < ARRAY_SIZE(rproc_crash_names)) 73 return rproc_crash_names[type]; 74 return "unknown"; 75 } 76 77 /* 78 * This is the IOMMU fault handler we register with the IOMMU API 79 * (when relevant; not all remote processors access memory through 80 * an IOMMU). 81 * 82 * IOMMU core will invoke this handler whenever the remote processor 83 * will try to access an unmapped device address. 84 */ 85 static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev, 86 unsigned long iova, int flags, void *token) 87 { 88 struct rproc *rproc = token; 89 90 dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags); 91 92 rproc_report_crash(rproc, RPROC_MMUFAULT); 93 94 /* 95 * Let the iommu core know we're not really handling this fault; 96 * we just used it as a recovery trigger. 97 */ 98 return -ENOSYS; 99 } 100 101 static int rproc_enable_iommu(struct rproc *rproc) 102 { 103 struct iommu_domain *domain; 104 struct device *dev = rproc->dev.parent; 105 int ret; 106 107 if (!rproc->has_iommu) { 108 dev_dbg(dev, "iommu not present\n"); 109 return 0; 110 } 111 112 domain = iommu_domain_alloc(dev->bus); 113 if (!domain) { 114 dev_err(dev, "can't alloc iommu domain\n"); 115 return -ENOMEM; 116 } 117 118 iommu_set_fault_handler(domain, rproc_iommu_fault, rproc); 119 120 ret = iommu_attach_device(domain, dev); 121 if (ret) { 122 dev_err(dev, "can't attach iommu device: %d\n", ret); 123 goto free_domain; 124 } 125 126 rproc->domain = domain; 127 128 return 0; 129 130 free_domain: 131 iommu_domain_free(domain); 132 return ret; 133 } 134 135 static void rproc_disable_iommu(struct rproc *rproc) 136 { 137 struct iommu_domain *domain = rproc->domain; 138 struct device *dev = rproc->dev.parent; 139 140 if (!domain) 141 return; 142 143 iommu_detach_device(domain, dev); 144 iommu_domain_free(domain); 145 } 146 147 phys_addr_t rproc_va_to_pa(void *cpu_addr) 148 { 149 /* 150 * Return physical address according to virtual address location 151 * - in vmalloc: if region ioremapped or defined as dma_alloc_coherent 152 * - in kernel: if region allocated in generic dma memory pool 153 */ 154 if (is_vmalloc_addr(cpu_addr)) { 155 return page_to_phys(vmalloc_to_page(cpu_addr)) + 156 offset_in_page(cpu_addr); 157 } 158 159 WARN_ON(!virt_addr_valid(cpu_addr)); 160 return virt_to_phys(cpu_addr); 161 } 162 EXPORT_SYMBOL(rproc_va_to_pa); 163 164 /** 165 * rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address 166 * @rproc: handle of a remote processor 167 * @da: remoteproc device address to translate 168 * @len: length of the memory region @da is pointing to 169 * 170 * Some remote processors will ask us to allocate them physically contiguous 171 * memory regions (which we call "carveouts"), and map them to specific 172 * device addresses (which are hardcoded in the firmware). They may also have 173 * dedicated memory regions internal to the processors, and use them either 174 * exclusively or alongside carveouts. 175 * 176 * They may then ask us to copy objects into specific device addresses (e.g. 177 * code/data sections) or expose us certain symbols in other device address 178 * (e.g. their trace buffer). 179 * 180 * This function is a helper function with which we can go over the allocated 181 * carveouts and translate specific device addresses to kernel virtual addresses 182 * so we can access the referenced memory. This function also allows to perform 183 * translations on the internal remoteproc memory regions through a platform 184 * implementation specific da_to_va ops, if present. 185 * 186 * The function returns a valid kernel address on success or NULL on failure. 187 * 188 * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too, 189 * but only on kernel direct mapped RAM memory. Instead, we're just using 190 * here the output of the DMA API for the carveouts, which should be more 191 * correct. 192 */ 193 void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) 194 { 195 struct rproc_mem_entry *carveout; 196 void *ptr = NULL; 197 198 if (rproc->ops->da_to_va) { 199 ptr = rproc->ops->da_to_va(rproc, da, len); 200 if (ptr) 201 goto out; 202 } 203 204 list_for_each_entry(carveout, &rproc->carveouts, node) { 205 int offset = da - carveout->da; 206 207 /* Verify that carveout is allocated */ 208 if (!carveout->va) 209 continue; 210 211 /* try next carveout if da is too small */ 212 if (offset < 0) 213 continue; 214 215 /* try next carveout if da is too large */ 216 if (offset + len > carveout->len) 217 continue; 218 219 ptr = carveout->va + offset; 220 221 break; 222 } 223 224 out: 225 return ptr; 226 } 227 EXPORT_SYMBOL(rproc_da_to_va); 228 229 /** 230 * rproc_find_carveout_by_name() - lookup the carveout region by a name 231 * @rproc: handle of a remote processor 232 * @name: carveout name to find (format string) 233 * @...: optional parameters matching @name string 234 * 235 * Platform driver has the capability to register some pre-allacoted carveout 236 * (physically contiguous memory regions) before rproc firmware loading and 237 * associated resource table analysis. These regions may be dedicated memory 238 * regions internal to the coprocessor or specified DDR region with specific 239 * attributes 240 * 241 * This function is a helper function with which we can go over the 242 * allocated carveouts and return associated region characteristics like 243 * coprocessor address, length or processor virtual address. 244 * 245 * Return: a valid pointer on carveout entry on success or NULL on failure. 246 */ 247 struct rproc_mem_entry * 248 rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...) 249 { 250 va_list args; 251 char _name[32]; 252 struct rproc_mem_entry *carveout, *mem = NULL; 253 254 if (!name) 255 return NULL; 256 257 va_start(args, name); 258 vsnprintf(_name, sizeof(_name), name, args); 259 va_end(args); 260 261 list_for_each_entry(carveout, &rproc->carveouts, node) { 262 /* Compare carveout and requested names */ 263 if (!strcmp(carveout->name, _name)) { 264 mem = carveout; 265 break; 266 } 267 } 268 269 return mem; 270 } 271 272 /** 273 * rproc_check_carveout_da() - Check specified carveout da configuration 274 * @rproc: handle of a remote processor 275 * @mem: pointer on carveout to check 276 * @da: area device address 277 * @len: associated area size 278 * 279 * This function is a helper function to verify requested device area (couple 280 * da, len) is part of specified carveout. 281 * If da is not set (defined as FW_RSC_ADDR_ANY), only requested length is 282 * checked. 283 * 284 * Return: 0 if carveout matches request else error 285 */ 286 static int rproc_check_carveout_da(struct rproc *rproc, 287 struct rproc_mem_entry *mem, u32 da, u32 len) 288 { 289 struct device *dev = &rproc->dev; 290 int delta; 291 292 /* Check requested resource length */ 293 if (len > mem->len) { 294 dev_err(dev, "Registered carveout doesn't fit len request\n"); 295 return -EINVAL; 296 } 297 298 if (da != FW_RSC_ADDR_ANY && mem->da == FW_RSC_ADDR_ANY) { 299 /* Address doesn't match registered carveout configuration */ 300 return -EINVAL; 301 } else if (da != FW_RSC_ADDR_ANY && mem->da != FW_RSC_ADDR_ANY) { 302 delta = da - mem->da; 303 304 /* Check requested resource belongs to registered carveout */ 305 if (delta < 0) { 306 dev_err(dev, 307 "Registered carveout doesn't fit da request\n"); 308 return -EINVAL; 309 } 310 311 if (delta + len > mem->len) { 312 dev_err(dev, 313 "Registered carveout doesn't fit len request\n"); 314 return -EINVAL; 315 } 316 } 317 318 return 0; 319 } 320 321 int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) 322 { 323 struct rproc *rproc = rvdev->rproc; 324 struct device *dev = &rproc->dev; 325 struct rproc_vring *rvring = &rvdev->vring[i]; 326 struct fw_rsc_vdev *rsc; 327 int ret, notifyid; 328 struct rproc_mem_entry *mem; 329 size_t size; 330 331 /* actual size of vring (in bytes) */ 332 size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); 333 334 rsc = (void *)rproc->table_ptr + rvdev->rsc_offset; 335 336 /* Search for pre-registered carveout */ 337 mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index, 338 i); 339 if (mem) { 340 if (rproc_check_carveout_da(rproc, mem, rsc->vring[i].da, size)) 341 return -ENOMEM; 342 } else { 343 /* Register carveout in in list */ 344 mem = rproc_mem_entry_init(dev, NULL, 0, 345 size, rsc->vring[i].da, 346 rproc_alloc_carveout, 347 rproc_release_carveout, 348 "vdev%dvring%d", 349 rvdev->index, i); 350 if (!mem) { 351 dev_err(dev, "Can't allocate memory entry structure\n"); 352 return -ENOMEM; 353 } 354 355 rproc_add_carveout(rproc, mem); 356 } 357 358 /* 359 * Assign an rproc-wide unique index for this vring 360 * TODO: assign a notifyid for rvdev updates as well 361 * TODO: support predefined notifyids (via resource table) 362 */ 363 ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); 364 if (ret < 0) { 365 dev_err(dev, "idr_alloc failed: %d\n", ret); 366 return ret; 367 } 368 notifyid = ret; 369 370 /* Potentially bump max_notifyid */ 371 if (notifyid > rproc->max_notifyid) 372 rproc->max_notifyid = notifyid; 373 374 rvring->notifyid = notifyid; 375 376 /* Let the rproc know the notifyid of this vring.*/ 377 rsc->vring[i].notifyid = notifyid; 378 return 0; 379 } 380 381 static int 382 rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i) 383 { 384 struct rproc *rproc = rvdev->rproc; 385 struct device *dev = &rproc->dev; 386 struct fw_rsc_vdev_vring *vring = &rsc->vring[i]; 387 struct rproc_vring *rvring = &rvdev->vring[i]; 388 389 dev_dbg(dev, "vdev rsc: vring%d: da 0x%x, qsz %d, align %d\n", 390 i, vring->da, vring->num, vring->align); 391 392 /* verify queue size and vring alignment are sane */ 393 if (!vring->num || !vring->align) { 394 dev_err(dev, "invalid qsz (%d) or alignment (%d)\n", 395 vring->num, vring->align); 396 return -EINVAL; 397 } 398 399 rvring->len = vring->num; 400 rvring->align = vring->align; 401 rvring->rvdev = rvdev; 402 403 return 0; 404 } 405 406 void rproc_free_vring(struct rproc_vring *rvring) 407 { 408 struct rproc *rproc = rvring->rvdev->rproc; 409 int idx = rvring - rvring->rvdev->vring; 410 struct fw_rsc_vdev *rsc; 411 412 idr_remove(&rproc->notifyids, rvring->notifyid); 413 414 /* reset resource entry info */ 415 rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset; 416 rsc->vring[idx].da = 0; 417 rsc->vring[idx].notifyid = -1; 418 } 419 420 static int rproc_vdev_do_start(struct rproc_subdev *subdev) 421 { 422 struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); 423 424 return rproc_add_virtio_dev(rvdev, rvdev->id); 425 } 426 427 static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed) 428 { 429 struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); 430 int ret; 431 432 ret = device_for_each_child(&rvdev->dev, NULL, rproc_remove_virtio_dev); 433 if (ret) 434 dev_warn(&rvdev->dev, "can't remove vdev child device: %d\n", ret); 435 } 436 437 /** 438 * rproc_rvdev_release() - release the existence of a rvdev 439 * 440 * @dev: the subdevice's dev 441 */ 442 static void rproc_rvdev_release(struct device *dev) 443 { 444 struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev); 445 446 of_reserved_mem_device_release(dev); 447 448 kfree(rvdev); 449 } 450 451 /** 452 * rproc_handle_vdev() - handle a vdev fw resource 453 * @rproc: the remote processor 454 * @rsc: the vring resource descriptor 455 * @offset: offset of the resource entry 456 * @avail: size of available data (for sanity checking the image) 457 * 458 * This resource entry requests the host to statically register a virtio 459 * device (vdev), and setup everything needed to support it. It contains 460 * everything needed to make it possible: the virtio device id, virtio 461 * device features, vrings information, virtio config space, etc... 462 * 463 * Before registering the vdev, the vrings are allocated from non-cacheable 464 * physically contiguous memory. Currently we only support two vrings per 465 * remote processor (temporary limitation). We might also want to consider 466 * doing the vring allocation only later when ->find_vqs() is invoked, and 467 * then release them upon ->del_vqs(). 468 * 469 * Note: @da is currently not really handled correctly: we dynamically 470 * allocate it using the DMA API, ignoring requested hard coded addresses, 471 * and we don't take care of any required IOMMU programming. This is all 472 * going to be taken care of when the generic iommu-based DMA API will be 473 * merged. Meanwhile, statically-addressed iommu-based firmware images should 474 * use RSC_DEVMEM resource entries to map their required @da to the physical 475 * address of their base CMA region (ouch, hacky!). 476 * 477 * Returns 0 on success, or an appropriate error code otherwise 478 */ 479 static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, 480 int offset, int avail) 481 { 482 struct device *dev = &rproc->dev; 483 struct rproc_vdev *rvdev; 484 int i, ret; 485 char name[16]; 486 487 /* make sure resource isn't truncated */ 488 if (struct_size(rsc, vring, rsc->num_of_vrings) + rsc->config_len > 489 avail) { 490 dev_err(dev, "vdev rsc is truncated\n"); 491 return -EINVAL; 492 } 493 494 /* make sure reserved bytes are zeroes */ 495 if (rsc->reserved[0] || rsc->reserved[1]) { 496 dev_err(dev, "vdev rsc has non zero reserved bytes\n"); 497 return -EINVAL; 498 } 499 500 dev_dbg(dev, "vdev rsc: id %d, dfeatures 0x%x, cfg len %d, %d vrings\n", 501 rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings); 502 503 /* we currently support only two vrings per rvdev */ 504 if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) { 505 dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings); 506 return -EINVAL; 507 } 508 509 rvdev = kzalloc(sizeof(*rvdev), GFP_KERNEL); 510 if (!rvdev) 511 return -ENOMEM; 512 513 kref_init(&rvdev->refcount); 514 515 rvdev->id = rsc->id; 516 rvdev->rproc = rproc; 517 rvdev->index = rproc->nb_vdev++; 518 519 /* Initialise vdev subdevice */ 520 snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index); 521 rvdev->dev.parent = &rproc->dev; 522 rvdev->dev.dma_pfn_offset = rproc->dev.parent->dma_pfn_offset; 523 rvdev->dev.release = rproc_rvdev_release; 524 dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name); 525 dev_set_drvdata(&rvdev->dev, rvdev); 526 527 ret = device_register(&rvdev->dev); 528 if (ret) { 529 put_device(&rvdev->dev); 530 return ret; 531 } 532 /* Make device dma capable by inheriting from parent's capabilities */ 533 set_dma_ops(&rvdev->dev, get_dma_ops(rproc->dev.parent)); 534 535 ret = dma_coerce_mask_and_coherent(&rvdev->dev, 536 dma_get_mask(rproc->dev.parent)); 537 if (ret) { 538 dev_warn(dev, 539 "Failed to set DMA mask %llx. Trying to continue... %x\n", 540 dma_get_mask(rproc->dev.parent), ret); 541 } 542 543 /* parse the vrings */ 544 for (i = 0; i < rsc->num_of_vrings; i++) { 545 ret = rproc_parse_vring(rvdev, rsc, i); 546 if (ret) 547 goto free_rvdev; 548 } 549 550 /* remember the resource offset*/ 551 rvdev->rsc_offset = offset; 552 553 /* allocate the vring resources */ 554 for (i = 0; i < rsc->num_of_vrings; i++) { 555 ret = rproc_alloc_vring(rvdev, i); 556 if (ret) 557 goto unwind_vring_allocations; 558 } 559 560 list_add_tail(&rvdev->node, &rproc->rvdevs); 561 562 rvdev->subdev.start = rproc_vdev_do_start; 563 rvdev->subdev.stop = rproc_vdev_do_stop; 564 565 rproc_add_subdev(rproc, &rvdev->subdev); 566 567 return 0; 568 569 unwind_vring_allocations: 570 for (i--; i >= 0; i--) 571 rproc_free_vring(&rvdev->vring[i]); 572 free_rvdev: 573 device_unregister(&rvdev->dev); 574 return ret; 575 } 576 577 void rproc_vdev_release(struct kref *ref) 578 { 579 struct rproc_vdev *rvdev = container_of(ref, struct rproc_vdev, refcount); 580 struct rproc_vring *rvring; 581 struct rproc *rproc = rvdev->rproc; 582 int id; 583 584 for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) { 585 rvring = &rvdev->vring[id]; 586 rproc_free_vring(rvring); 587 } 588 589 rproc_remove_subdev(rproc, &rvdev->subdev); 590 list_del(&rvdev->node); 591 device_unregister(&rvdev->dev); 592 } 593 594 /** 595 * rproc_handle_trace() - handle a shared trace buffer resource 596 * @rproc: the remote processor 597 * @rsc: the trace resource descriptor 598 * @offset: offset of the resource entry 599 * @avail: size of available data (for sanity checking the image) 600 * 601 * In case the remote processor dumps trace logs into memory, 602 * export it via debugfs. 603 * 604 * Currently, the 'da' member of @rsc should contain the device address 605 * where the remote processor is dumping the traces. Later we could also 606 * support dynamically allocating this address using the generic 607 * DMA API (but currently there isn't a use case for that). 608 * 609 * Returns 0 on success, or an appropriate error code otherwise 610 */ 611 static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, 612 int offset, int avail) 613 { 614 struct rproc_debug_trace *trace; 615 struct device *dev = &rproc->dev; 616 char name[15]; 617 618 if (sizeof(*rsc) > avail) { 619 dev_err(dev, "trace rsc is truncated\n"); 620 return -EINVAL; 621 } 622 623 /* make sure reserved bytes are zeroes */ 624 if (rsc->reserved) { 625 dev_err(dev, "trace rsc has non zero reserved bytes\n"); 626 return -EINVAL; 627 } 628 629 trace = kzalloc(sizeof(*trace), GFP_KERNEL); 630 if (!trace) 631 return -ENOMEM; 632 633 /* set the trace buffer dma properties */ 634 trace->trace_mem.len = rsc->len; 635 trace->trace_mem.da = rsc->da; 636 637 /* set pointer on rproc device */ 638 trace->rproc = rproc; 639 640 /* make sure snprintf always null terminates, even if truncating */ 641 snprintf(name, sizeof(name), "trace%d", rproc->num_traces); 642 643 /* create the debugfs entry */ 644 trace->tfile = rproc_create_trace_file(name, rproc, trace); 645 if (!trace->tfile) { 646 kfree(trace); 647 return -EINVAL; 648 } 649 650 list_add_tail(&trace->node, &rproc->traces); 651 652 rproc->num_traces++; 653 654 dev_dbg(dev, "%s added: da 0x%x, len 0x%x\n", 655 name, rsc->da, rsc->len); 656 657 return 0; 658 } 659 660 /** 661 * rproc_handle_devmem() - handle devmem resource entry 662 * @rproc: remote processor handle 663 * @rsc: the devmem resource entry 664 * @offset: offset of the resource entry 665 * @avail: size of available data (for sanity checking the image) 666 * 667 * Remote processors commonly need to access certain on-chip peripherals. 668 * 669 * Some of these remote processors access memory via an iommu device, 670 * and might require us to configure their iommu before they can access 671 * the on-chip peripherals they need. 672 * 673 * This resource entry is a request to map such a peripheral device. 674 * 675 * These devmem entries will contain the physical address of the device in 676 * the 'pa' member. If a specific device address is expected, then 'da' will 677 * contain it (currently this is the only use case supported). 'len' will 678 * contain the size of the physical region we need to map. 679 * 680 * Currently we just "trust" those devmem entries to contain valid physical 681 * addresses, but this is going to change: we want the implementations to 682 * tell us ranges of physical addresses the firmware is allowed to request, 683 * and not allow firmwares to request access to physical addresses that 684 * are outside those ranges. 685 */ 686 static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc, 687 int offset, int avail) 688 { 689 struct rproc_mem_entry *mapping; 690 struct device *dev = &rproc->dev; 691 int ret; 692 693 /* no point in handling this resource without a valid iommu domain */ 694 if (!rproc->domain) 695 return -EINVAL; 696 697 if (sizeof(*rsc) > avail) { 698 dev_err(dev, "devmem rsc is truncated\n"); 699 return -EINVAL; 700 } 701 702 /* make sure reserved bytes are zeroes */ 703 if (rsc->reserved) { 704 dev_err(dev, "devmem rsc has non zero reserved bytes\n"); 705 return -EINVAL; 706 } 707 708 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 709 if (!mapping) 710 return -ENOMEM; 711 712 ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags); 713 if (ret) { 714 dev_err(dev, "failed to map devmem: %d\n", ret); 715 goto out; 716 } 717 718 /* 719 * We'll need this info later when we'll want to unmap everything 720 * (e.g. on shutdown). 721 * 722 * We can't trust the remote processor not to change the resource 723 * table, so we must maintain this info independently. 724 */ 725 mapping->da = rsc->da; 726 mapping->len = rsc->len; 727 list_add_tail(&mapping->node, &rproc->mappings); 728 729 dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n", 730 rsc->pa, rsc->da, rsc->len); 731 732 return 0; 733 734 out: 735 kfree(mapping); 736 return ret; 737 } 738 739 /** 740 * rproc_alloc_carveout() - allocated specified carveout 741 * @rproc: rproc handle 742 * @mem: the memory entry to allocate 743 * 744 * This function allocate specified memory entry @mem using 745 * dma_alloc_coherent() as default allocator 746 */ 747 static int rproc_alloc_carveout(struct rproc *rproc, 748 struct rproc_mem_entry *mem) 749 { 750 struct rproc_mem_entry *mapping = NULL; 751 struct device *dev = &rproc->dev; 752 dma_addr_t dma; 753 void *va; 754 int ret; 755 756 va = dma_alloc_coherent(dev->parent, mem->len, &dma, GFP_KERNEL); 757 if (!va) { 758 dev_err(dev->parent, 759 "failed to allocate dma memory: len 0x%zx\n", 760 mem->len); 761 return -ENOMEM; 762 } 763 764 dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%zx\n", 765 va, &dma, mem->len); 766 767 if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) { 768 /* 769 * Check requested da is equal to dma address 770 * and print a warn message in case of missalignment. 771 * Don't stop rproc_start sequence as coprocessor may 772 * build pa to da translation on its side. 773 */ 774 if (mem->da != (u32)dma) 775 dev_warn(dev->parent, 776 "Allocated carveout doesn't fit device address request\n"); 777 } 778 779 /* 780 * Ok, this is non-standard. 781 * 782 * Sometimes we can't rely on the generic iommu-based DMA API 783 * to dynamically allocate the device address and then set the IOMMU 784 * tables accordingly, because some remote processors might 785 * _require_ us to use hard coded device addresses that their 786 * firmware was compiled with. 787 * 788 * In this case, we must use the IOMMU API directly and map 789 * the memory to the device address as expected by the remote 790 * processor. 791 * 792 * Obviously such remote processor devices should not be configured 793 * to use the iommu-based DMA API: we expect 'dma' to contain the 794 * physical address in this case. 795 */ 796 if (mem->da != FW_RSC_ADDR_ANY && rproc->domain) { 797 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 798 if (!mapping) { 799 ret = -ENOMEM; 800 goto dma_free; 801 } 802 803 ret = iommu_map(rproc->domain, mem->da, dma, mem->len, 804 mem->flags); 805 if (ret) { 806 dev_err(dev, "iommu_map failed: %d\n", ret); 807 goto free_mapping; 808 } 809 810 /* 811 * We'll need this info later when we'll want to unmap 812 * everything (e.g. on shutdown). 813 * 814 * We can't trust the remote processor not to change the 815 * resource table, so we must maintain this info independently. 816 */ 817 mapping->da = mem->da; 818 mapping->len = mem->len; 819 list_add_tail(&mapping->node, &rproc->mappings); 820 821 dev_dbg(dev, "carveout mapped 0x%x to %pad\n", 822 mem->da, &dma); 823 } 824 825 if (mem->da == FW_RSC_ADDR_ANY) { 826 /* Update device address as undefined by requester */ 827 if ((u64)dma & HIGH_BITS_MASK) 828 dev_warn(dev, "DMA address cast in 32bit to fit resource table format\n"); 829 830 mem->da = (u32)dma; 831 } 832 833 mem->dma = dma; 834 mem->va = va; 835 836 return 0; 837 838 free_mapping: 839 kfree(mapping); 840 dma_free: 841 dma_free_coherent(dev->parent, mem->len, va, dma); 842 return ret; 843 } 844 845 /** 846 * rproc_release_carveout() - release acquired carveout 847 * @rproc: rproc handle 848 * @mem: the memory entry to release 849 * 850 * This function releases specified memory entry @mem allocated via 851 * rproc_alloc_carveout() function by @rproc. 852 */ 853 static int rproc_release_carveout(struct rproc *rproc, 854 struct rproc_mem_entry *mem) 855 { 856 struct device *dev = &rproc->dev; 857 858 /* clean up carveout allocations */ 859 dma_free_coherent(dev->parent, mem->len, mem->va, mem->dma); 860 return 0; 861 } 862 863 /** 864 * rproc_handle_carveout() - handle phys contig memory allocation requests 865 * @rproc: rproc handle 866 * @rsc: the resource entry 867 * @offset: offset of the resource entry 868 * @avail: size of available data (for image validation) 869 * 870 * This function will handle firmware requests for allocation of physically 871 * contiguous memory regions. 872 * 873 * These request entries should come first in the firmware's resource table, 874 * as other firmware entries might request placing other data objects inside 875 * these memory regions (e.g. data/code segments, trace resource entries, ...). 876 * 877 * Allocating memory this way helps utilizing the reserved physical memory 878 * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries 879 * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB 880 * pressure is important; it may have a substantial impact on performance. 881 */ 882 static int rproc_handle_carveout(struct rproc *rproc, 883 struct fw_rsc_carveout *rsc, 884 int offset, int avail) 885 { 886 struct rproc_mem_entry *carveout; 887 struct device *dev = &rproc->dev; 888 889 if (sizeof(*rsc) > avail) { 890 dev_err(dev, "carveout rsc is truncated\n"); 891 return -EINVAL; 892 } 893 894 /* make sure reserved bytes are zeroes */ 895 if (rsc->reserved) { 896 dev_err(dev, "carveout rsc has non zero reserved bytes\n"); 897 return -EINVAL; 898 } 899 900 dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n", 901 rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags); 902 903 /* 904 * Check carveout rsc already part of a registered carveout, 905 * Search by name, then check the da and length 906 */ 907 carveout = rproc_find_carveout_by_name(rproc, rsc->name); 908 909 if (carveout) { 910 if (carveout->rsc_offset != FW_RSC_ADDR_ANY) { 911 dev_err(dev, 912 "Carveout already associated to resource table\n"); 913 return -ENOMEM; 914 } 915 916 if (rproc_check_carveout_da(rproc, carveout, rsc->da, rsc->len)) 917 return -ENOMEM; 918 919 /* Update memory carveout with resource table info */ 920 carveout->rsc_offset = offset; 921 carveout->flags = rsc->flags; 922 923 return 0; 924 } 925 926 /* Register carveout in in list */ 927 carveout = rproc_mem_entry_init(dev, NULL, 0, rsc->len, rsc->da, 928 rproc_alloc_carveout, 929 rproc_release_carveout, rsc->name); 930 if (!carveout) { 931 dev_err(dev, "Can't allocate memory entry structure\n"); 932 return -ENOMEM; 933 } 934 935 carveout->flags = rsc->flags; 936 carveout->rsc_offset = offset; 937 rproc_add_carveout(rproc, carveout); 938 939 return 0; 940 } 941 942 /** 943 * rproc_add_carveout() - register an allocated carveout region 944 * @rproc: rproc handle 945 * @mem: memory entry to register 946 * 947 * This function registers specified memory entry in @rproc carveouts list. 948 * Specified carveout should have been allocated before registering. 949 */ 950 void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem) 951 { 952 list_add_tail(&mem->node, &rproc->carveouts); 953 } 954 EXPORT_SYMBOL(rproc_add_carveout); 955 956 /** 957 * rproc_mem_entry_init() - allocate and initialize rproc_mem_entry struct 958 * @dev: pointer on device struct 959 * @va: virtual address 960 * @dma: dma address 961 * @len: memory carveout length 962 * @da: device address 963 * @alloc: memory carveout allocation function 964 * @release: memory carveout release function 965 * @name: carveout name 966 * 967 * This function allocates a rproc_mem_entry struct and fill it with parameters 968 * provided by client. 969 */ 970 struct rproc_mem_entry * 971 rproc_mem_entry_init(struct device *dev, 972 void *va, dma_addr_t dma, size_t len, u32 da, 973 int (*alloc)(struct rproc *, struct rproc_mem_entry *), 974 int (*release)(struct rproc *, struct rproc_mem_entry *), 975 const char *name, ...) 976 { 977 struct rproc_mem_entry *mem; 978 va_list args; 979 980 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 981 if (!mem) 982 return mem; 983 984 mem->va = va; 985 mem->dma = dma; 986 mem->da = da; 987 mem->len = len; 988 mem->alloc = alloc; 989 mem->release = release; 990 mem->rsc_offset = FW_RSC_ADDR_ANY; 991 mem->of_resm_idx = -1; 992 993 va_start(args, name); 994 vsnprintf(mem->name, sizeof(mem->name), name, args); 995 va_end(args); 996 997 return mem; 998 } 999 EXPORT_SYMBOL(rproc_mem_entry_init); 1000 1001 /** 1002 * rproc_of_resm_mem_entry_init() - allocate and initialize rproc_mem_entry struct 1003 * from a reserved memory phandle 1004 * @dev: pointer on device struct 1005 * @of_resm_idx: reserved memory phandle index in "memory-region" 1006 * @len: memory carveout length 1007 * @da: device address 1008 * @name: carveout name 1009 * 1010 * This function allocates a rproc_mem_entry struct and fill it with parameters 1011 * provided by client. 1012 */ 1013 struct rproc_mem_entry * 1014 rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, 1015 u32 da, const char *name, ...) 1016 { 1017 struct rproc_mem_entry *mem; 1018 va_list args; 1019 1020 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 1021 if (!mem) 1022 return mem; 1023 1024 mem->da = da; 1025 mem->len = len; 1026 mem->rsc_offset = FW_RSC_ADDR_ANY; 1027 mem->of_resm_idx = of_resm_idx; 1028 1029 va_start(args, name); 1030 vsnprintf(mem->name, sizeof(mem->name), name, args); 1031 va_end(args); 1032 1033 return mem; 1034 } 1035 EXPORT_SYMBOL(rproc_of_resm_mem_entry_init); 1036 1037 /* 1038 * A lookup table for resource handlers. The indices are defined in 1039 * enum fw_resource_type. 1040 */ 1041 static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = { 1042 [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout, 1043 [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem, 1044 [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace, 1045 [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev, 1046 }; 1047 1048 /* handle firmware resource entries before booting the remote processor */ 1049 static int rproc_handle_resources(struct rproc *rproc, 1050 rproc_handle_resource_t handlers[RSC_LAST]) 1051 { 1052 struct device *dev = &rproc->dev; 1053 rproc_handle_resource_t handler; 1054 int ret = 0, i; 1055 1056 if (!rproc->table_ptr) 1057 return 0; 1058 1059 for (i = 0; i < rproc->table_ptr->num; i++) { 1060 int offset = rproc->table_ptr->offset[i]; 1061 struct fw_rsc_hdr *hdr = (void *)rproc->table_ptr + offset; 1062 int avail = rproc->table_sz - offset - sizeof(*hdr); 1063 void *rsc = (void *)hdr + sizeof(*hdr); 1064 1065 /* make sure table isn't truncated */ 1066 if (avail < 0) { 1067 dev_err(dev, "rsc table is truncated\n"); 1068 return -EINVAL; 1069 } 1070 1071 dev_dbg(dev, "rsc: type %d\n", hdr->type); 1072 1073 if (hdr->type >= RSC_VENDOR_START && 1074 hdr->type <= RSC_VENDOR_END) { 1075 ret = rproc_handle_rsc(rproc, hdr->type, rsc, 1076 offset + sizeof(*hdr), avail); 1077 if (ret == RSC_HANDLED) 1078 continue; 1079 else if (ret < 0) 1080 break; 1081 1082 dev_warn(dev, "unsupported vendor resource %d\n", 1083 hdr->type); 1084 continue; 1085 } 1086 1087 if (hdr->type >= RSC_LAST) { 1088 dev_warn(dev, "unsupported resource %d\n", hdr->type); 1089 continue; 1090 } 1091 1092 handler = handlers[hdr->type]; 1093 if (!handler) 1094 continue; 1095 1096 ret = handler(rproc, rsc, offset + sizeof(*hdr), avail); 1097 if (ret) 1098 break; 1099 } 1100 1101 return ret; 1102 } 1103 1104 static int rproc_prepare_subdevices(struct rproc *rproc) 1105 { 1106 struct rproc_subdev *subdev; 1107 int ret; 1108 1109 list_for_each_entry(subdev, &rproc->subdevs, node) { 1110 if (subdev->prepare) { 1111 ret = subdev->prepare(subdev); 1112 if (ret) 1113 goto unroll_preparation; 1114 } 1115 } 1116 1117 return 0; 1118 1119 unroll_preparation: 1120 list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) { 1121 if (subdev->unprepare) 1122 subdev->unprepare(subdev); 1123 } 1124 1125 return ret; 1126 } 1127 1128 static int rproc_start_subdevices(struct rproc *rproc) 1129 { 1130 struct rproc_subdev *subdev; 1131 int ret; 1132 1133 list_for_each_entry(subdev, &rproc->subdevs, node) { 1134 if (subdev->start) { 1135 ret = subdev->start(subdev); 1136 if (ret) 1137 goto unroll_registration; 1138 } 1139 } 1140 1141 return 0; 1142 1143 unroll_registration: 1144 list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) { 1145 if (subdev->stop) 1146 subdev->stop(subdev, true); 1147 } 1148 1149 return ret; 1150 } 1151 1152 static void rproc_stop_subdevices(struct rproc *rproc, bool crashed) 1153 { 1154 struct rproc_subdev *subdev; 1155 1156 list_for_each_entry_reverse(subdev, &rproc->subdevs, node) { 1157 if (subdev->stop) 1158 subdev->stop(subdev, crashed); 1159 } 1160 } 1161 1162 static void rproc_unprepare_subdevices(struct rproc *rproc) 1163 { 1164 struct rproc_subdev *subdev; 1165 1166 list_for_each_entry_reverse(subdev, &rproc->subdevs, node) { 1167 if (subdev->unprepare) 1168 subdev->unprepare(subdev); 1169 } 1170 } 1171 1172 /** 1173 * rproc_alloc_registered_carveouts() - allocate all carveouts registered 1174 * in the list 1175 * @rproc: the remote processor handle 1176 * 1177 * This function parses registered carveout list, performs allocation 1178 * if alloc() ops registered and updates resource table information 1179 * if rsc_offset set. 1180 * 1181 * Return: 0 on success 1182 */ 1183 static int rproc_alloc_registered_carveouts(struct rproc *rproc) 1184 { 1185 struct rproc_mem_entry *entry, *tmp; 1186 struct fw_rsc_carveout *rsc; 1187 struct device *dev = &rproc->dev; 1188 u64 pa; 1189 int ret; 1190 1191 list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) { 1192 if (entry->alloc) { 1193 ret = entry->alloc(rproc, entry); 1194 if (ret) { 1195 dev_err(dev, "Unable to allocate carveout %s: %d\n", 1196 entry->name, ret); 1197 return -ENOMEM; 1198 } 1199 } 1200 1201 if (entry->rsc_offset != FW_RSC_ADDR_ANY) { 1202 /* update resource table */ 1203 rsc = (void *)rproc->table_ptr + entry->rsc_offset; 1204 1205 /* 1206 * Some remote processors might need to know the pa 1207 * even though they are behind an IOMMU. E.g., OMAP4's 1208 * remote M3 processor needs this so it can control 1209 * on-chip hardware accelerators that are not behind 1210 * the IOMMU, and therefor must know the pa. 1211 * 1212 * Generally we don't want to expose physical addresses 1213 * if we don't have to (remote processors are generally 1214 * _not_ trusted), so we might want to do this only for 1215 * remote processor that _must_ have this (e.g. OMAP4's 1216 * dual M3 subsystem). 1217 * 1218 * Non-IOMMU processors might also want to have this info. 1219 * In this case, the device address and the physical address 1220 * are the same. 1221 */ 1222 1223 /* Use va if defined else dma to generate pa */ 1224 if (entry->va) 1225 pa = (u64)rproc_va_to_pa(entry->va); 1226 else 1227 pa = (u64)entry->dma; 1228 1229 if (((u64)pa) & HIGH_BITS_MASK) 1230 dev_warn(dev, 1231 "Physical address cast in 32bit to fit resource table format\n"); 1232 1233 rsc->pa = (u32)pa; 1234 rsc->da = entry->da; 1235 rsc->len = entry->len; 1236 } 1237 } 1238 1239 return 0; 1240 } 1241 1242 /** 1243 * rproc_coredump_cleanup() - clean up dump_segments list 1244 * @rproc: the remote processor handle 1245 */ 1246 static void rproc_coredump_cleanup(struct rproc *rproc) 1247 { 1248 struct rproc_dump_segment *entry, *tmp; 1249 1250 list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) { 1251 list_del(&entry->node); 1252 kfree(entry); 1253 } 1254 } 1255 1256 /** 1257 * rproc_resource_cleanup() - clean up and free all acquired resources 1258 * @rproc: rproc handle 1259 * 1260 * This function will free all resources acquired for @rproc, and it 1261 * is called whenever @rproc either shuts down or fails to boot. 1262 */ 1263 static void rproc_resource_cleanup(struct rproc *rproc) 1264 { 1265 struct rproc_mem_entry *entry, *tmp; 1266 struct rproc_debug_trace *trace, *ttmp; 1267 struct rproc_vdev *rvdev, *rvtmp; 1268 struct device *dev = &rproc->dev; 1269 1270 /* clean up debugfs trace entries */ 1271 list_for_each_entry_safe(trace, ttmp, &rproc->traces, node) { 1272 rproc_remove_trace_file(trace->tfile); 1273 rproc->num_traces--; 1274 list_del(&trace->node); 1275 kfree(trace); 1276 } 1277 1278 /* clean up iommu mapping entries */ 1279 list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) { 1280 size_t unmapped; 1281 1282 unmapped = iommu_unmap(rproc->domain, entry->da, entry->len); 1283 if (unmapped != entry->len) { 1284 /* nothing much to do besides complaining */ 1285 dev_err(dev, "failed to unmap %zx/%zu\n", entry->len, 1286 unmapped); 1287 } 1288 1289 list_del(&entry->node); 1290 kfree(entry); 1291 } 1292 1293 /* clean up carveout allocations */ 1294 list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) { 1295 if (entry->release) 1296 entry->release(rproc, entry); 1297 list_del(&entry->node); 1298 kfree(entry); 1299 } 1300 1301 /* clean up remote vdev entries */ 1302 list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) 1303 kref_put(&rvdev->refcount, rproc_vdev_release); 1304 1305 rproc_coredump_cleanup(rproc); 1306 } 1307 1308 static int rproc_start(struct rproc *rproc, const struct firmware *fw) 1309 { 1310 struct resource_table *loaded_table; 1311 struct device *dev = &rproc->dev; 1312 int ret; 1313 1314 /* load the ELF segments to memory */ 1315 ret = rproc_load_segments(rproc, fw); 1316 if (ret) { 1317 dev_err(dev, "Failed to load program segments: %d\n", ret); 1318 return ret; 1319 } 1320 1321 /* 1322 * The starting device has been given the rproc->cached_table as the 1323 * resource table. The address of the vring along with the other 1324 * allocated resources (carveouts etc) is stored in cached_table. 1325 * In order to pass this information to the remote device we must copy 1326 * this information to device memory. We also update the table_ptr so 1327 * that any subsequent changes will be applied to the loaded version. 1328 */ 1329 loaded_table = rproc_find_loaded_rsc_table(rproc, fw); 1330 if (loaded_table) { 1331 memcpy(loaded_table, rproc->cached_table, rproc->table_sz); 1332 rproc->table_ptr = loaded_table; 1333 } 1334 1335 ret = rproc_prepare_subdevices(rproc); 1336 if (ret) { 1337 dev_err(dev, "failed to prepare subdevices for %s: %d\n", 1338 rproc->name, ret); 1339 goto reset_table_ptr; 1340 } 1341 1342 /* power up the remote processor */ 1343 ret = rproc->ops->start(rproc); 1344 if (ret) { 1345 dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret); 1346 goto unprepare_subdevices; 1347 } 1348 1349 /* Start any subdevices for the remote processor */ 1350 ret = rproc_start_subdevices(rproc); 1351 if (ret) { 1352 dev_err(dev, "failed to probe subdevices for %s: %d\n", 1353 rproc->name, ret); 1354 goto stop_rproc; 1355 } 1356 1357 rproc->state = RPROC_RUNNING; 1358 1359 dev_info(dev, "remote processor %s is now up\n", rproc->name); 1360 1361 return 0; 1362 1363 stop_rproc: 1364 rproc->ops->stop(rproc); 1365 unprepare_subdevices: 1366 rproc_unprepare_subdevices(rproc); 1367 reset_table_ptr: 1368 rproc->table_ptr = rproc->cached_table; 1369 1370 return ret; 1371 } 1372 1373 /* 1374 * take a firmware and boot a remote processor with it. 1375 */ 1376 static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) 1377 { 1378 struct device *dev = &rproc->dev; 1379 const char *name = rproc->firmware; 1380 int ret; 1381 1382 ret = rproc_fw_sanity_check(rproc, fw); 1383 if (ret) 1384 return ret; 1385 1386 ret = pm_runtime_get_sync(dev); 1387 if (ret < 0) { 1388 dev_err(dev, "pm_runtime_get_sync failed: %d\n", ret); 1389 return ret; 1390 } 1391 1392 dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size); 1393 1394 /* 1395 * if enabling an IOMMU isn't relevant for this rproc, this is 1396 * just a nop 1397 */ 1398 ret = rproc_enable_iommu(rproc); 1399 if (ret) { 1400 dev_err(dev, "can't enable iommu: %d\n", ret); 1401 goto put_pm_runtime; 1402 } 1403 1404 /* Prepare rproc for firmware loading if needed */ 1405 ret = rproc_prepare_device(rproc); 1406 if (ret) { 1407 dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret); 1408 goto disable_iommu; 1409 } 1410 1411 rproc->bootaddr = rproc_get_boot_addr(rproc, fw); 1412 1413 /* Load resource table, core dump segment list etc from the firmware */ 1414 ret = rproc_parse_fw(rproc, fw); 1415 if (ret) 1416 goto unprepare_rproc; 1417 1418 /* reset max_notifyid */ 1419 rproc->max_notifyid = -1; 1420 1421 /* reset handled vdev */ 1422 rproc->nb_vdev = 0; 1423 1424 /* handle fw resources which are required to boot rproc */ 1425 ret = rproc_handle_resources(rproc, rproc_loading_handlers); 1426 if (ret) { 1427 dev_err(dev, "Failed to process resources: %d\n", ret); 1428 goto clean_up_resources; 1429 } 1430 1431 /* Allocate carveout resources associated to rproc */ 1432 ret = rproc_alloc_registered_carveouts(rproc); 1433 if (ret) { 1434 dev_err(dev, "Failed to allocate associated carveouts: %d\n", 1435 ret); 1436 goto clean_up_resources; 1437 } 1438 1439 ret = rproc_start(rproc, fw); 1440 if (ret) 1441 goto clean_up_resources; 1442 1443 return 0; 1444 1445 clean_up_resources: 1446 rproc_resource_cleanup(rproc); 1447 kfree(rproc->cached_table); 1448 rproc->cached_table = NULL; 1449 rproc->table_ptr = NULL; 1450 unprepare_rproc: 1451 /* release HW resources if needed */ 1452 rproc_unprepare_device(rproc); 1453 disable_iommu: 1454 rproc_disable_iommu(rproc); 1455 put_pm_runtime: 1456 pm_runtime_put(dev); 1457 return ret; 1458 } 1459 1460 /* 1461 * take a firmware and boot it up. 1462 * 1463 * Note: this function is called asynchronously upon registration of the 1464 * remote processor (so we must wait until it completes before we try 1465 * to unregister the device. one other option is just to use kref here, 1466 * that might be cleaner). 1467 */ 1468 static void rproc_auto_boot_callback(const struct firmware *fw, void *context) 1469 { 1470 struct rproc *rproc = context; 1471 1472 rproc_boot(rproc); 1473 1474 release_firmware(fw); 1475 } 1476 1477 static int rproc_trigger_auto_boot(struct rproc *rproc) 1478 { 1479 int ret; 1480 1481 /* 1482 * We're initiating an asynchronous firmware loading, so we can 1483 * be built-in kernel code, without hanging the boot process. 1484 */ 1485 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 1486 rproc->firmware, &rproc->dev, GFP_KERNEL, 1487 rproc, rproc_auto_boot_callback); 1488 if (ret < 0) 1489 dev_err(&rproc->dev, "request_firmware_nowait err: %d\n", ret); 1490 1491 return ret; 1492 } 1493 1494 static int rproc_stop(struct rproc *rproc, bool crashed) 1495 { 1496 struct device *dev = &rproc->dev; 1497 int ret; 1498 1499 /* Stop any subdevices for the remote processor */ 1500 rproc_stop_subdevices(rproc, crashed); 1501 1502 /* the installed resource table is no longer accessible */ 1503 rproc->table_ptr = rproc->cached_table; 1504 1505 /* power off the remote processor */ 1506 ret = rproc->ops->stop(rproc); 1507 if (ret) { 1508 dev_err(dev, "can't stop rproc: %d\n", ret); 1509 return ret; 1510 } 1511 1512 rproc_unprepare_subdevices(rproc); 1513 1514 rproc->state = RPROC_OFFLINE; 1515 1516 dev_info(dev, "stopped remote processor %s\n", rproc->name); 1517 1518 return 0; 1519 } 1520 1521 /** 1522 * rproc_coredump_add_segment() - add segment of device memory to coredump 1523 * @rproc: handle of a remote processor 1524 * @da: device address 1525 * @size: size of segment 1526 * 1527 * Add device memory to the list of segments to be included in a coredump for 1528 * the remoteproc. 1529 * 1530 * Return: 0 on success, negative errno on error. 1531 */ 1532 int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size) 1533 { 1534 struct rproc_dump_segment *segment; 1535 1536 segment = kzalloc(sizeof(*segment), GFP_KERNEL); 1537 if (!segment) 1538 return -ENOMEM; 1539 1540 segment->da = da; 1541 segment->size = size; 1542 1543 list_add_tail(&segment->node, &rproc->dump_segments); 1544 1545 return 0; 1546 } 1547 EXPORT_SYMBOL(rproc_coredump_add_segment); 1548 1549 /** 1550 * rproc_coredump_add_custom_segment() - add custom coredump segment 1551 * @rproc: handle of a remote processor 1552 * @da: device address 1553 * @size: size of segment 1554 * @dumpfn: custom dump function called for each segment during coredump 1555 * @priv: private data 1556 * 1557 * Add device memory to the list of segments to be included in the coredump 1558 * and associate the segment with the given custom dump function and private 1559 * data. 1560 * 1561 * Return: 0 on success, negative errno on error. 1562 */ 1563 int rproc_coredump_add_custom_segment(struct rproc *rproc, 1564 dma_addr_t da, size_t size, 1565 void (*dumpfn)(struct rproc *rproc, 1566 struct rproc_dump_segment *segment, 1567 void *dest), 1568 void *priv) 1569 { 1570 struct rproc_dump_segment *segment; 1571 1572 segment = kzalloc(sizeof(*segment), GFP_KERNEL); 1573 if (!segment) 1574 return -ENOMEM; 1575 1576 segment->da = da; 1577 segment->size = size; 1578 segment->priv = priv; 1579 segment->dump = dumpfn; 1580 1581 list_add_tail(&segment->node, &rproc->dump_segments); 1582 1583 return 0; 1584 } 1585 EXPORT_SYMBOL(rproc_coredump_add_custom_segment); 1586 1587 /** 1588 * rproc_coredump_set_elf_info() - set coredump elf information 1589 * @rproc: handle of a remote processor 1590 * @class: elf class for coredump elf file 1591 * @machine: elf machine for coredump elf file 1592 * 1593 * Set elf information which will be used for coredump elf file. 1594 * 1595 * Return: 0 on success, negative errno on error. 1596 */ 1597 int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine) 1598 { 1599 if (class != ELFCLASS64 && class != ELFCLASS32) 1600 return -EINVAL; 1601 1602 rproc->elf_class = class; 1603 rproc->elf_machine = machine; 1604 1605 return 0; 1606 } 1607 EXPORT_SYMBOL(rproc_coredump_set_elf_info); 1608 1609 /** 1610 * rproc_coredump() - perform coredump 1611 * @rproc: rproc handle 1612 * 1613 * This function will generate an ELF header for the registered segments 1614 * and create a devcoredump device associated with rproc. 1615 */ 1616 static void rproc_coredump(struct rproc *rproc) 1617 { 1618 struct rproc_dump_segment *segment; 1619 void *phdr; 1620 void *ehdr; 1621 size_t data_size; 1622 size_t offset; 1623 void *data; 1624 void *ptr; 1625 u8 class = rproc->elf_class; 1626 int phnum = 0; 1627 1628 if (list_empty(&rproc->dump_segments)) 1629 return; 1630 1631 if (class == ELFCLASSNONE) { 1632 dev_err(&rproc->dev, "Elf class is not set\n"); 1633 return; 1634 } 1635 1636 data_size = elf_size_of_hdr(class); 1637 list_for_each_entry(segment, &rproc->dump_segments, node) { 1638 data_size += elf_size_of_phdr(class) + segment->size; 1639 1640 phnum++; 1641 } 1642 1643 data = vmalloc(data_size); 1644 if (!data) 1645 return; 1646 1647 ehdr = data; 1648 1649 memset(ehdr, 0, elf_size_of_hdr(class)); 1650 /* e_ident field is common for both elf32 and elf64 */ 1651 elf_hdr_init_ident(ehdr, class); 1652 1653 elf_hdr_set_e_type(class, ehdr, ET_CORE); 1654 elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine); 1655 elf_hdr_set_e_version(class, ehdr, EV_CURRENT); 1656 elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr); 1657 elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class)); 1658 elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class)); 1659 elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class)); 1660 elf_hdr_set_e_phnum(class, ehdr, phnum); 1661 1662 phdr = data + elf_hdr_get_e_phoff(class, ehdr); 1663 offset = elf_hdr_get_e_phoff(class, ehdr); 1664 offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr); 1665 1666 list_for_each_entry(segment, &rproc->dump_segments, node) { 1667 memset(phdr, 0, elf_size_of_phdr(class)); 1668 elf_phdr_set_p_type(class, phdr, PT_LOAD); 1669 elf_phdr_set_p_offset(class, phdr, offset); 1670 elf_phdr_set_p_vaddr(class, phdr, segment->da); 1671 elf_phdr_set_p_paddr(class, phdr, segment->da); 1672 elf_phdr_set_p_filesz(class, phdr, segment->size); 1673 elf_phdr_set_p_memsz(class, phdr, segment->size); 1674 elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X); 1675 elf_phdr_set_p_align(class, phdr, 0); 1676 1677 if (segment->dump) { 1678 segment->dump(rproc, segment, data + offset); 1679 } else { 1680 ptr = rproc_da_to_va(rproc, segment->da, segment->size); 1681 if (!ptr) { 1682 dev_err(&rproc->dev, 1683 "invalid coredump segment (%pad, %zu)\n", 1684 &segment->da, segment->size); 1685 memset(data + offset, 0xff, segment->size); 1686 } else { 1687 memcpy(data + offset, ptr, segment->size); 1688 } 1689 } 1690 1691 offset += elf_phdr_get_p_filesz(class, phdr); 1692 phdr += elf_size_of_phdr(class); 1693 } 1694 1695 dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL); 1696 } 1697 1698 /** 1699 * rproc_trigger_recovery() - recover a remoteproc 1700 * @rproc: the remote processor 1701 * 1702 * The recovery is done by resetting all the virtio devices, that way all the 1703 * rpmsg drivers will be reseted along with the remote processor making the 1704 * remoteproc functional again. 1705 * 1706 * This function can sleep, so it cannot be called from atomic context. 1707 */ 1708 int rproc_trigger_recovery(struct rproc *rproc) 1709 { 1710 const struct firmware *firmware_p; 1711 struct device *dev = &rproc->dev; 1712 int ret; 1713 1714 ret = mutex_lock_interruptible(&rproc->lock); 1715 if (ret) 1716 return ret; 1717 1718 /* State could have changed before we got the mutex */ 1719 if (rproc->state != RPROC_CRASHED) 1720 goto unlock_mutex; 1721 1722 dev_err(dev, "recovering %s\n", rproc->name); 1723 1724 ret = rproc_stop(rproc, true); 1725 if (ret) 1726 goto unlock_mutex; 1727 1728 /* generate coredump */ 1729 rproc_coredump(rproc); 1730 1731 /* load firmware */ 1732 ret = request_firmware(&firmware_p, rproc->firmware, dev); 1733 if (ret < 0) { 1734 dev_err(dev, "request_firmware failed: %d\n", ret); 1735 goto unlock_mutex; 1736 } 1737 1738 /* boot the remote processor up again */ 1739 ret = rproc_start(rproc, firmware_p); 1740 1741 release_firmware(firmware_p); 1742 1743 unlock_mutex: 1744 mutex_unlock(&rproc->lock); 1745 return ret; 1746 } 1747 1748 /** 1749 * rproc_crash_handler_work() - handle a crash 1750 * @work: work treating the crash 1751 * 1752 * This function needs to handle everything related to a crash, like cpu 1753 * registers and stack dump, information to help to debug the fatal error, etc. 1754 */ 1755 static void rproc_crash_handler_work(struct work_struct *work) 1756 { 1757 struct rproc *rproc = container_of(work, struct rproc, crash_handler); 1758 struct device *dev = &rproc->dev; 1759 1760 dev_dbg(dev, "enter %s\n", __func__); 1761 1762 mutex_lock(&rproc->lock); 1763 1764 if (rproc->state == RPROC_CRASHED || rproc->state == RPROC_OFFLINE) { 1765 /* handle only the first crash detected */ 1766 mutex_unlock(&rproc->lock); 1767 return; 1768 } 1769 1770 rproc->state = RPROC_CRASHED; 1771 dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt, 1772 rproc->name); 1773 1774 mutex_unlock(&rproc->lock); 1775 1776 if (!rproc->recovery_disabled) 1777 rproc_trigger_recovery(rproc); 1778 1779 pm_relax(rproc->dev.parent); 1780 } 1781 1782 /** 1783 * rproc_boot() - boot a remote processor 1784 * @rproc: handle of a remote processor 1785 * 1786 * Boot a remote processor (i.e. load its firmware, power it on, ...). 1787 * 1788 * If the remote processor is already powered on, this function immediately 1789 * returns (successfully). 1790 * 1791 * Returns 0 on success, and an appropriate error value otherwise. 1792 */ 1793 int rproc_boot(struct rproc *rproc) 1794 { 1795 const struct firmware *firmware_p; 1796 struct device *dev; 1797 int ret; 1798 1799 if (!rproc) { 1800 pr_err("invalid rproc handle\n"); 1801 return -EINVAL; 1802 } 1803 1804 dev = &rproc->dev; 1805 1806 ret = mutex_lock_interruptible(&rproc->lock); 1807 if (ret) { 1808 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); 1809 return ret; 1810 } 1811 1812 if (rproc->state == RPROC_DELETED) { 1813 ret = -ENODEV; 1814 dev_err(dev, "can't boot deleted rproc %s\n", rproc->name); 1815 goto unlock_mutex; 1816 } 1817 1818 /* skip the boot process if rproc is already powered up */ 1819 if (atomic_inc_return(&rproc->power) > 1) { 1820 ret = 0; 1821 goto unlock_mutex; 1822 } 1823 1824 dev_info(dev, "powering up %s\n", rproc->name); 1825 1826 /* load firmware */ 1827 ret = request_firmware(&firmware_p, rproc->firmware, dev); 1828 if (ret < 0) { 1829 dev_err(dev, "request_firmware failed: %d\n", ret); 1830 goto downref_rproc; 1831 } 1832 1833 ret = rproc_fw_boot(rproc, firmware_p); 1834 1835 release_firmware(firmware_p); 1836 1837 downref_rproc: 1838 if (ret) 1839 atomic_dec(&rproc->power); 1840 unlock_mutex: 1841 mutex_unlock(&rproc->lock); 1842 return ret; 1843 } 1844 EXPORT_SYMBOL(rproc_boot); 1845 1846 /** 1847 * rproc_shutdown() - power off the remote processor 1848 * @rproc: the remote processor 1849 * 1850 * Power off a remote processor (previously booted with rproc_boot()). 1851 * 1852 * In case @rproc is still being used by an additional user(s), then 1853 * this function will just decrement the power refcount and exit, 1854 * without really powering off the device. 1855 * 1856 * Every call to rproc_boot() must (eventually) be accompanied by a call 1857 * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug. 1858 * 1859 * Notes: 1860 * - we're not decrementing the rproc's refcount, only the power refcount. 1861 * which means that the @rproc handle stays valid even after rproc_shutdown() 1862 * returns, and users can still use it with a subsequent rproc_boot(), if 1863 * needed. 1864 */ 1865 void rproc_shutdown(struct rproc *rproc) 1866 { 1867 struct device *dev = &rproc->dev; 1868 int ret; 1869 1870 ret = mutex_lock_interruptible(&rproc->lock); 1871 if (ret) { 1872 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); 1873 return; 1874 } 1875 1876 /* if the remote proc is still needed, bail out */ 1877 if (!atomic_dec_and_test(&rproc->power)) 1878 goto out; 1879 1880 ret = rproc_stop(rproc, false); 1881 if (ret) { 1882 atomic_inc(&rproc->power); 1883 goto out; 1884 } 1885 1886 /* clean up all acquired resources */ 1887 rproc_resource_cleanup(rproc); 1888 1889 /* release HW resources if needed */ 1890 rproc_unprepare_device(rproc); 1891 1892 rproc_disable_iommu(rproc); 1893 1894 pm_runtime_put(dev); 1895 1896 /* Free the copy of the resource table */ 1897 kfree(rproc->cached_table); 1898 rproc->cached_table = NULL; 1899 rproc->table_ptr = NULL; 1900 out: 1901 mutex_unlock(&rproc->lock); 1902 } 1903 EXPORT_SYMBOL(rproc_shutdown); 1904 1905 /** 1906 * rproc_get_by_phandle() - find a remote processor by phandle 1907 * @phandle: phandle to the rproc 1908 * 1909 * Finds an rproc handle using the remote processor's phandle, and then 1910 * return a handle to the rproc. 1911 * 1912 * This function increments the remote processor's refcount, so always 1913 * use rproc_put() to decrement it back once rproc isn't needed anymore. 1914 * 1915 * Returns the rproc handle on success, and NULL on failure. 1916 */ 1917 #ifdef CONFIG_OF 1918 struct rproc *rproc_get_by_phandle(phandle phandle) 1919 { 1920 struct rproc *rproc = NULL, *r; 1921 struct device_node *np; 1922 1923 np = of_find_node_by_phandle(phandle); 1924 if (!np) 1925 return NULL; 1926 1927 rcu_read_lock(); 1928 list_for_each_entry_rcu(r, &rproc_list, node) { 1929 if (r->dev.parent && r->dev.parent->of_node == np) { 1930 /* prevent underlying implementation from being removed */ 1931 if (!try_module_get(r->dev.parent->driver->owner)) { 1932 dev_err(&r->dev, "can't get owner\n"); 1933 break; 1934 } 1935 1936 rproc = r; 1937 get_device(&rproc->dev); 1938 break; 1939 } 1940 } 1941 rcu_read_unlock(); 1942 1943 of_node_put(np); 1944 1945 return rproc; 1946 } 1947 #else 1948 struct rproc *rproc_get_by_phandle(phandle phandle) 1949 { 1950 return NULL; 1951 } 1952 #endif 1953 EXPORT_SYMBOL(rproc_get_by_phandle); 1954 1955 /** 1956 * rproc_add() - register a remote processor 1957 * @rproc: the remote processor handle to register 1958 * 1959 * Registers @rproc with the remoteproc framework, after it has been 1960 * allocated with rproc_alloc(). 1961 * 1962 * This is called by the platform-specific rproc implementation, whenever 1963 * a new remote processor device is probed. 1964 * 1965 * Returns 0 on success and an appropriate error code otherwise. 1966 * 1967 * Note: this function initiates an asynchronous firmware loading 1968 * context, which will look for virtio devices supported by the rproc's 1969 * firmware. 1970 * 1971 * If found, those virtio devices will be created and added, so as a result 1972 * of registering this remote processor, additional virtio drivers might be 1973 * probed. 1974 */ 1975 int rproc_add(struct rproc *rproc) 1976 { 1977 struct device *dev = &rproc->dev; 1978 int ret; 1979 1980 ret = device_add(dev); 1981 if (ret < 0) 1982 return ret; 1983 1984 dev_info(dev, "%s is available\n", rproc->name); 1985 1986 /* create debugfs entries */ 1987 rproc_create_debug_dir(rproc); 1988 1989 /* if rproc is marked always-on, request it to boot */ 1990 if (rproc->auto_boot) { 1991 ret = rproc_trigger_auto_boot(rproc); 1992 if (ret < 0) 1993 return ret; 1994 } 1995 1996 /* expose to rproc_get_by_phandle users */ 1997 mutex_lock(&rproc_list_mutex); 1998 list_add_rcu(&rproc->node, &rproc_list); 1999 mutex_unlock(&rproc_list_mutex); 2000 2001 return 0; 2002 } 2003 EXPORT_SYMBOL(rproc_add); 2004 2005 static void devm_rproc_remove(void *rproc) 2006 { 2007 rproc_del(rproc); 2008 } 2009 2010 /** 2011 * devm_rproc_add() - resource managed rproc_add() 2012 * @dev: the underlying device 2013 * @rproc: the remote processor handle to register 2014 * 2015 * This function performs like rproc_add() but the registered rproc device will 2016 * automatically be removed on driver detach. 2017 * 2018 * Returns: 0 on success, negative errno on failure 2019 */ 2020 int devm_rproc_add(struct device *dev, struct rproc *rproc) 2021 { 2022 int err; 2023 2024 err = rproc_add(rproc); 2025 if (err) 2026 return err; 2027 2028 return devm_add_action_or_reset(dev, devm_rproc_remove, rproc); 2029 } 2030 EXPORT_SYMBOL(devm_rproc_add); 2031 2032 /** 2033 * rproc_type_release() - release a remote processor instance 2034 * @dev: the rproc's device 2035 * 2036 * This function should _never_ be called directly. 2037 * 2038 * It will be called by the driver core when no one holds a valid pointer 2039 * to @dev anymore. 2040 */ 2041 static void rproc_type_release(struct device *dev) 2042 { 2043 struct rproc *rproc = container_of(dev, struct rproc, dev); 2044 2045 dev_info(&rproc->dev, "releasing %s\n", rproc->name); 2046 2047 idr_destroy(&rproc->notifyids); 2048 2049 if (rproc->index >= 0) 2050 ida_simple_remove(&rproc_dev_index, rproc->index); 2051 2052 kfree_const(rproc->firmware); 2053 kfree_const(rproc->name); 2054 kfree(rproc->ops); 2055 kfree(rproc); 2056 } 2057 2058 static const struct device_type rproc_type = { 2059 .name = "remoteproc", 2060 .release = rproc_type_release, 2061 }; 2062 2063 static int rproc_alloc_firmware(struct rproc *rproc, 2064 const char *name, const char *firmware) 2065 { 2066 const char *p; 2067 2068 /* 2069 * Allocate a firmware name if the caller gave us one to work 2070 * with. Otherwise construct a new one using a default pattern. 2071 */ 2072 if (firmware) 2073 p = kstrdup_const(firmware, GFP_KERNEL); 2074 else 2075 p = kasprintf(GFP_KERNEL, "rproc-%s-fw", name); 2076 2077 if (!p) 2078 return -ENOMEM; 2079 2080 rproc->firmware = p; 2081 2082 return 0; 2083 } 2084 2085 static int rproc_alloc_ops(struct rproc *rproc, const struct rproc_ops *ops) 2086 { 2087 rproc->ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL); 2088 if (!rproc->ops) 2089 return -ENOMEM; 2090 2091 if (rproc->ops->load) 2092 return 0; 2093 2094 /* Default to ELF loader if no load function is specified */ 2095 rproc->ops->load = rproc_elf_load_segments; 2096 rproc->ops->parse_fw = rproc_elf_load_rsc_table; 2097 rproc->ops->find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table; 2098 rproc->ops->sanity_check = rproc_elf_sanity_check; 2099 rproc->ops->get_boot_addr = rproc_elf_get_boot_addr; 2100 2101 return 0; 2102 } 2103 2104 /** 2105 * rproc_alloc() - allocate a remote processor handle 2106 * @dev: the underlying device 2107 * @name: name of this remote processor 2108 * @ops: platform-specific handlers (mainly start/stop) 2109 * @firmware: name of firmware file to load, can be NULL 2110 * @len: length of private data needed by the rproc driver (in bytes) 2111 * 2112 * Allocates a new remote processor handle, but does not register 2113 * it yet. if @firmware is NULL, a default name is used. 2114 * 2115 * This function should be used by rproc implementations during initialization 2116 * of the remote processor. 2117 * 2118 * After creating an rproc handle using this function, and when ready, 2119 * implementations should then call rproc_add() to complete 2120 * the registration of the remote processor. 2121 * 2122 * On success the new rproc is returned, and on failure, NULL. 2123 * 2124 * Note: _never_ directly deallocate @rproc, even if it was not registered 2125 * yet. Instead, when you need to unroll rproc_alloc(), use rproc_free(). 2126 */ 2127 struct rproc *rproc_alloc(struct device *dev, const char *name, 2128 const struct rproc_ops *ops, 2129 const char *firmware, int len) 2130 { 2131 struct rproc *rproc; 2132 2133 if (!dev || !name || !ops) 2134 return NULL; 2135 2136 rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL); 2137 if (!rproc) 2138 return NULL; 2139 2140 rproc->priv = &rproc[1]; 2141 rproc->auto_boot = true; 2142 rproc->elf_class = ELFCLASSNONE; 2143 rproc->elf_machine = EM_NONE; 2144 2145 device_initialize(&rproc->dev); 2146 rproc->dev.parent = dev; 2147 rproc->dev.type = &rproc_type; 2148 rproc->dev.class = &rproc_class; 2149 rproc->dev.driver_data = rproc; 2150 idr_init(&rproc->notifyids); 2151 2152 rproc->name = kstrdup_const(name, GFP_KERNEL); 2153 if (!rproc->name) 2154 goto put_device; 2155 2156 if (rproc_alloc_firmware(rproc, name, firmware)) 2157 goto put_device; 2158 2159 if (rproc_alloc_ops(rproc, ops)) 2160 goto put_device; 2161 2162 /* Assign a unique device index and name */ 2163 rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL); 2164 if (rproc->index < 0) { 2165 dev_err(dev, "ida_simple_get failed: %d\n", rproc->index); 2166 goto put_device; 2167 } 2168 2169 dev_set_name(&rproc->dev, "remoteproc%d", rproc->index); 2170 2171 atomic_set(&rproc->power, 0); 2172 2173 mutex_init(&rproc->lock); 2174 2175 INIT_LIST_HEAD(&rproc->carveouts); 2176 INIT_LIST_HEAD(&rproc->mappings); 2177 INIT_LIST_HEAD(&rproc->traces); 2178 INIT_LIST_HEAD(&rproc->rvdevs); 2179 INIT_LIST_HEAD(&rproc->subdevs); 2180 INIT_LIST_HEAD(&rproc->dump_segments); 2181 2182 INIT_WORK(&rproc->crash_handler, rproc_crash_handler_work); 2183 2184 rproc->state = RPROC_OFFLINE; 2185 2186 pm_runtime_no_callbacks(&rproc->dev); 2187 pm_runtime_enable(&rproc->dev); 2188 2189 return rproc; 2190 2191 put_device: 2192 put_device(&rproc->dev); 2193 return NULL; 2194 } 2195 EXPORT_SYMBOL(rproc_alloc); 2196 2197 /** 2198 * rproc_free() - unroll rproc_alloc() 2199 * @rproc: the remote processor handle 2200 * 2201 * This function decrements the rproc dev refcount. 2202 * 2203 * If no one holds any reference to rproc anymore, then its refcount would 2204 * now drop to zero, and it would be freed. 2205 */ 2206 void rproc_free(struct rproc *rproc) 2207 { 2208 pm_runtime_disable(&rproc->dev); 2209 put_device(&rproc->dev); 2210 } 2211 EXPORT_SYMBOL(rproc_free); 2212 2213 /** 2214 * rproc_put() - release rproc reference 2215 * @rproc: the remote processor handle 2216 * 2217 * This function decrements the rproc dev refcount. 2218 * 2219 * If no one holds any reference to rproc anymore, then its refcount would 2220 * now drop to zero, and it would be freed. 2221 */ 2222 void rproc_put(struct rproc *rproc) 2223 { 2224 module_put(rproc->dev.parent->driver->owner); 2225 put_device(&rproc->dev); 2226 } 2227 EXPORT_SYMBOL(rproc_put); 2228 2229 /** 2230 * rproc_del() - unregister a remote processor 2231 * @rproc: rproc handle to unregister 2232 * 2233 * This function should be called when the platform specific rproc 2234 * implementation decides to remove the rproc device. it should 2235 * _only_ be called if a previous invocation of rproc_add() 2236 * has completed successfully. 2237 * 2238 * After rproc_del() returns, @rproc isn't freed yet, because 2239 * of the outstanding reference created by rproc_alloc. To decrement that 2240 * one last refcount, one still needs to call rproc_free(). 2241 * 2242 * Returns 0 on success and -EINVAL if @rproc isn't valid. 2243 */ 2244 int rproc_del(struct rproc *rproc) 2245 { 2246 if (!rproc) 2247 return -EINVAL; 2248 2249 /* if rproc is marked always-on, rproc_add() booted it */ 2250 /* TODO: make sure this works with rproc->power > 1 */ 2251 if (rproc->auto_boot) 2252 rproc_shutdown(rproc); 2253 2254 mutex_lock(&rproc->lock); 2255 rproc->state = RPROC_DELETED; 2256 mutex_unlock(&rproc->lock); 2257 2258 rproc_delete_debug_dir(rproc); 2259 2260 /* the rproc is downref'ed as soon as it's removed from the klist */ 2261 mutex_lock(&rproc_list_mutex); 2262 list_del_rcu(&rproc->node); 2263 mutex_unlock(&rproc_list_mutex); 2264 2265 /* Ensure that no readers of rproc_list are still active */ 2266 synchronize_rcu(); 2267 2268 device_del(&rproc->dev); 2269 2270 return 0; 2271 } 2272 EXPORT_SYMBOL(rproc_del); 2273 2274 static void devm_rproc_free(struct device *dev, void *res) 2275 { 2276 rproc_free(*(struct rproc **)res); 2277 } 2278 2279 /** 2280 * devm_rproc_alloc() - resource managed rproc_alloc() 2281 * @dev: the underlying device 2282 * @name: name of this remote processor 2283 * @ops: platform-specific handlers (mainly start/stop) 2284 * @firmware: name of firmware file to load, can be NULL 2285 * @len: length of private data needed by the rproc driver (in bytes) 2286 * 2287 * This function performs like rproc_alloc() but the acquired rproc device will 2288 * automatically be released on driver detach. 2289 * 2290 * Returns: new rproc instance, or NULL on failure 2291 */ 2292 struct rproc *devm_rproc_alloc(struct device *dev, const char *name, 2293 const struct rproc_ops *ops, 2294 const char *firmware, int len) 2295 { 2296 struct rproc **ptr, *rproc; 2297 2298 ptr = devres_alloc(devm_rproc_free, sizeof(*ptr), GFP_KERNEL); 2299 if (!ptr) 2300 return NULL; 2301 2302 rproc = rproc_alloc(dev, name, ops, firmware, len); 2303 if (rproc) { 2304 *ptr = rproc; 2305 devres_add(dev, ptr); 2306 } else { 2307 devres_free(ptr); 2308 } 2309 2310 return rproc; 2311 } 2312 EXPORT_SYMBOL(devm_rproc_alloc); 2313 2314 /** 2315 * rproc_add_subdev() - add a subdevice to a remoteproc 2316 * @rproc: rproc handle to add the subdevice to 2317 * @subdev: subdev handle to register 2318 * 2319 * Caller is responsible for populating optional subdevice function pointers. 2320 */ 2321 void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev) 2322 { 2323 list_add_tail(&subdev->node, &rproc->subdevs); 2324 } 2325 EXPORT_SYMBOL(rproc_add_subdev); 2326 2327 /** 2328 * rproc_remove_subdev() - remove a subdevice from a remoteproc 2329 * @rproc: rproc handle to remove the subdevice from 2330 * @subdev: subdev handle, previously registered with rproc_add_subdev() 2331 */ 2332 void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev) 2333 { 2334 list_del(&subdev->node); 2335 } 2336 EXPORT_SYMBOL(rproc_remove_subdev); 2337 2338 /** 2339 * rproc_get_by_child() - acquire rproc handle of @dev's ancestor 2340 * @dev: child device to find ancestor of 2341 * 2342 * Returns the ancestor rproc instance, or NULL if not found. 2343 */ 2344 struct rproc *rproc_get_by_child(struct device *dev) 2345 { 2346 for (dev = dev->parent; dev; dev = dev->parent) { 2347 if (dev->type == &rproc_type) 2348 return dev->driver_data; 2349 } 2350 2351 return NULL; 2352 } 2353 EXPORT_SYMBOL(rproc_get_by_child); 2354 2355 /** 2356 * rproc_report_crash() - rproc crash reporter function 2357 * @rproc: remote processor 2358 * @type: crash type 2359 * 2360 * This function must be called every time a crash is detected by the low-level 2361 * drivers implementing a specific remoteproc. This should not be called from a 2362 * non-remoteproc driver. 2363 * 2364 * This function can be called from atomic/interrupt context. 2365 */ 2366 void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type) 2367 { 2368 if (!rproc) { 2369 pr_err("NULL rproc pointer\n"); 2370 return; 2371 } 2372 2373 /* Prevent suspend while the remoteproc is being recovered */ 2374 pm_stay_awake(rproc->dev.parent); 2375 2376 dev_err(&rproc->dev, "crash detected in %s: type %s\n", 2377 rproc->name, rproc_crash_to_string(type)); 2378 2379 /* create a new task to handle the error */ 2380 schedule_work(&rproc->crash_handler); 2381 } 2382 EXPORT_SYMBOL(rproc_report_crash); 2383 2384 static int rproc_panic_handler(struct notifier_block *nb, unsigned long event, 2385 void *ptr) 2386 { 2387 unsigned int longest = 0; 2388 struct rproc *rproc; 2389 unsigned int d; 2390 2391 rcu_read_lock(); 2392 list_for_each_entry_rcu(rproc, &rproc_list, node) { 2393 if (!rproc->ops->panic || rproc->state != RPROC_RUNNING) 2394 continue; 2395 2396 d = rproc->ops->panic(rproc); 2397 longest = max(longest, d); 2398 } 2399 rcu_read_unlock(); 2400 2401 /* 2402 * Delay for the longest requested duration before returning. This can 2403 * be used by the remoteproc drivers to give the remote processor time 2404 * to perform any requested operations (such as flush caches), when 2405 * it's not possible to signal the Linux side due to the panic. 2406 */ 2407 mdelay(longest); 2408 2409 return NOTIFY_DONE; 2410 } 2411 2412 static void __init rproc_init_panic(void) 2413 { 2414 rproc_panic_nb.notifier_call = rproc_panic_handler; 2415 atomic_notifier_chain_register(&panic_notifier_list, &rproc_panic_nb); 2416 } 2417 2418 static void __exit rproc_exit_panic(void) 2419 { 2420 atomic_notifier_chain_unregister(&panic_notifier_list, &rproc_panic_nb); 2421 } 2422 2423 static int __init remoteproc_init(void) 2424 { 2425 rproc_init_sysfs(); 2426 rproc_init_debugfs(); 2427 rproc_init_panic(); 2428 2429 return 0; 2430 } 2431 subsys_initcall(remoteproc_init); 2432 2433 static void __exit remoteproc_exit(void) 2434 { 2435 ida_destroy(&rproc_dev_index); 2436 2437 rproc_exit_panic(); 2438 rproc_exit_debugfs(); 2439 rproc_exit_sysfs(); 2440 } 2441 module_exit(remoteproc_exit); 2442 2443 MODULE_LICENSE("GPL v2"); 2444 MODULE_DESCRIPTION("Generic Remote Processor Framework"); 2445