1 /* 2 * Remote Processor Framework 3 * 4 * Copyright (C) 2011 Texas Instruments, Inc. 5 * Copyright (C) 2011 Google, Inc. 6 * 7 * Ohad Ben-Cohen <ohad@wizery.com> 8 * Brian Swetland <swetland@google.com> 9 * Mark Grosen <mgrosen@ti.com> 10 * Fernando Guzman Lugo <fernando.lugo@ti.com> 11 * Suman Anna <s-anna@ti.com> 12 * Robert Tivy <rtivy@ti.com> 13 * Armando Uribe De Leon <x0095078@ti.com> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * version 2 as published by the Free Software Foundation. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 */ 24 25 #define pr_fmt(fmt) "%s: " fmt, __func__ 26 27 #include <linux/kernel.h> 28 #include <linux/module.h> 29 #include <linux/device.h> 30 #include <linux/slab.h> 31 #include <linux/mutex.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/firmware.h> 34 #include <linux/string.h> 35 #include <linux/debugfs.h> 36 #include <linux/remoteproc.h> 37 #include <linux/iommu.h> 38 #include <linux/klist.h> 39 #include <linux/elf.h> 40 #include <linux/virtio_ids.h> 41 #include <linux/virtio_ring.h> 42 #include <asm/byteorder.h> 43 44 #include "remoteproc_internal.h" 45 46 static void klist_rproc_get(struct klist_node *n); 47 static void klist_rproc_put(struct klist_node *n); 48 49 /* 50 * klist of the available remote processors. 51 * 52 * We need this in order to support name-based lookups (needed by the 53 * rproc_get_by_name()). 54 * 55 * That said, we don't use rproc_get_by_name() at this point. 56 * The use cases that do require its existence should be 57 * scrutinized, and hopefully migrated to rproc_boot() using device-based 58 * binding. 59 * 60 * If/when this materializes, we could drop the klist (and the by_name 61 * API). 62 */ 63 static DEFINE_KLIST(rprocs, klist_rproc_get, klist_rproc_put); 64 65 typedef int (*rproc_handle_resources_t)(struct rproc *rproc, 66 struct resource_table *table, int len); 67 typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int avail); 68 69 /* 70 * This is the IOMMU fault handler we register with the IOMMU API 71 * (when relevant; not all remote processors access memory through 72 * an IOMMU). 73 * 74 * IOMMU core will invoke this handler whenever the remote processor 75 * will try to access an unmapped device address. 76 * 77 * Currently this is mostly a stub, but it will be later used to trigger 78 * the recovery of the remote processor. 79 */ 80 static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev, 81 unsigned long iova, int flags) 82 { 83 dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags); 84 85 /* 86 * Let the iommu core know we're not really handling this fault; 87 * we just plan to use this as a recovery trigger. 88 */ 89 return -ENOSYS; 90 } 91 92 static int rproc_enable_iommu(struct rproc *rproc) 93 { 94 struct iommu_domain *domain; 95 struct device *dev = rproc->dev; 96 int ret; 97 98 /* 99 * We currently use iommu_present() to decide if an IOMMU 100 * setup is needed. 101 * 102 * This works for simple cases, but will easily fail with 103 * platforms that do have an IOMMU, but not for this specific 104 * rproc. 105 * 106 * This will be easily solved by introducing hw capabilities 107 * that will be set by the remoteproc driver. 108 */ 109 if (!iommu_present(dev->bus)) { 110 dev_dbg(dev, "iommu not found\n"); 111 return 0; 112 } 113 114 domain = iommu_domain_alloc(dev->bus); 115 if (!domain) { 116 dev_err(dev, "can't alloc iommu domain\n"); 117 return -ENOMEM; 118 } 119 120 iommu_set_fault_handler(domain, rproc_iommu_fault); 121 122 ret = iommu_attach_device(domain, dev); 123 if (ret) { 124 dev_err(dev, "can't attach iommu device: %d\n", ret); 125 goto free_domain; 126 } 127 128 rproc->domain = domain; 129 130 return 0; 131 132 free_domain: 133 iommu_domain_free(domain); 134 return ret; 135 } 136 137 static void rproc_disable_iommu(struct rproc *rproc) 138 { 139 struct iommu_domain *domain = rproc->domain; 140 struct device *dev = rproc->dev; 141 142 if (!domain) 143 return; 144 145 iommu_detach_device(domain, dev); 146 iommu_domain_free(domain); 147 148 return; 149 } 150 151 /* 152 * Some remote processors will ask us to allocate them physically contiguous 153 * memory regions (which we call "carveouts"), and map them to specific 154 * device addresses (which are hardcoded in the firmware). 155 * 156 * They may then ask us to copy objects into specific device addresses (e.g. 157 * code/data sections) or expose us certain symbols in other device address 158 * (e.g. their trace buffer). 159 * 160 * This function is an internal helper with which we can go over the allocated 161 * carveouts and translate specific device address to kernel virtual addresses 162 * so we can access the referenced memory. 163 * 164 * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too, 165 * but only on kernel direct mapped RAM memory. Instead, we're just using 166 * here the output of the DMA API, which should be more correct. 167 */ 168 static void *rproc_da_to_va(struct rproc *rproc, u64 da, int len) 169 { 170 struct rproc_mem_entry *carveout; 171 void *ptr = NULL; 172 173 list_for_each_entry(carveout, &rproc->carveouts, node) { 174 int offset = da - carveout->da; 175 176 /* try next carveout if da is too small */ 177 if (offset < 0) 178 continue; 179 180 /* try next carveout if da is too large */ 181 if (offset + len > carveout->len) 182 continue; 183 184 ptr = carveout->va + offset; 185 186 break; 187 } 188 189 return ptr; 190 } 191 192 /** 193 * rproc_load_segments() - load firmware segments to memory 194 * @rproc: remote processor which will be booted using these fw segments 195 * @elf_data: the content of the ELF firmware image 196 * @len: firmware size (in bytes) 197 * 198 * This function loads the firmware segments to memory, where the remote 199 * processor expects them. 200 * 201 * Some remote processors will expect their code and data to be placed 202 * in specific device addresses, and can't have them dynamically assigned. 203 * 204 * We currently support only those kind of remote processors, and expect 205 * the program header's paddr member to contain those addresses. We then go 206 * through the physically contiguous "carveout" memory regions which we 207 * allocated (and mapped) earlier on behalf of the remote processor, 208 * and "translate" device address to kernel addresses, so we can copy the 209 * segments where they are expected. 210 * 211 * Currently we only support remote processors that required carveout 212 * allocations and got them mapped onto their iommus. Some processors 213 * might be different: they might not have iommus, and would prefer to 214 * directly allocate memory for every segment/resource. This is not yet 215 * supported, though. 216 */ 217 static int 218 rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len) 219 { 220 struct device *dev = rproc->dev; 221 struct elf32_hdr *ehdr; 222 struct elf32_phdr *phdr; 223 int i, ret = 0; 224 225 ehdr = (struct elf32_hdr *)elf_data; 226 phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff); 227 228 /* go through the available ELF segments */ 229 for (i = 0; i < ehdr->e_phnum; i++, phdr++) { 230 u32 da = phdr->p_paddr; 231 u32 memsz = phdr->p_memsz; 232 u32 filesz = phdr->p_filesz; 233 u32 offset = phdr->p_offset; 234 void *ptr; 235 236 if (phdr->p_type != PT_LOAD) 237 continue; 238 239 dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n", 240 phdr->p_type, da, memsz, filesz); 241 242 if (filesz > memsz) { 243 dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n", 244 filesz, memsz); 245 ret = -EINVAL; 246 break; 247 } 248 249 if (offset + filesz > len) { 250 dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n", 251 offset + filesz, len); 252 ret = -EINVAL; 253 break; 254 } 255 256 /* grab the kernel address for this device address */ 257 ptr = rproc_da_to_va(rproc, da, memsz); 258 if (!ptr) { 259 dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz); 260 ret = -EINVAL; 261 break; 262 } 263 264 /* put the segment where the remote processor expects it */ 265 if (phdr->p_filesz) 266 memcpy(ptr, elf_data + phdr->p_offset, filesz); 267 268 /* 269 * Zero out remaining memory for this segment. 270 * 271 * This isn't strictly required since dma_alloc_coherent already 272 * did this for us. albeit harmless, we may consider removing 273 * this. 274 */ 275 if (memsz > filesz) 276 memset(ptr + filesz, 0, memsz - filesz); 277 } 278 279 return ret; 280 } 281 282 static int 283 __rproc_handle_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i) 284 { 285 struct rproc *rproc = rvdev->rproc; 286 struct device *dev = rproc->dev; 287 struct fw_rsc_vdev_vring *vring = &rsc->vring[i]; 288 dma_addr_t dma; 289 void *va; 290 int ret, size, notifyid; 291 292 dev_dbg(dev, "vdev rsc: vring%d: da %x, qsz %d, align %d\n", 293 i, vring->da, vring->num, vring->align); 294 295 /* make sure reserved bytes are zeroes */ 296 if (vring->reserved) { 297 dev_err(dev, "vring rsc has non zero reserved bytes\n"); 298 return -EINVAL; 299 } 300 301 /* verify queue size and vring alignment are sane */ 302 if (!vring->num || !vring->align) { 303 dev_err(dev, "invalid qsz (%d) or alignment (%d)\n", 304 vring->num, vring->align); 305 return -EINVAL; 306 } 307 308 /* actual size of vring (in bytes) */ 309 size = PAGE_ALIGN(vring_size(vring->num, vring->align)); 310 311 if (!idr_pre_get(&rproc->notifyids, GFP_KERNEL)) { 312 dev_err(dev, "idr_pre_get failed\n"); 313 return -ENOMEM; 314 } 315 316 /* 317 * Allocate non-cacheable memory for the vring. In the future 318 * this call will also configure the IOMMU for us 319 */ 320 va = dma_alloc_coherent(dev, size, &dma, GFP_KERNEL); 321 if (!va) { 322 dev_err(dev, "dma_alloc_coherent failed\n"); 323 return -EINVAL; 324 } 325 326 /* assign an rproc-wide unique index for this vring */ 327 /* TODO: assign a notifyid for rvdev updates as well */ 328 ret = idr_get_new(&rproc->notifyids, &rvdev->vring[i], ¬ifyid); 329 if (ret) { 330 dev_err(dev, "idr_get_new failed: %d\n", ret); 331 dma_free_coherent(dev, size, va, dma); 332 return ret; 333 } 334 335 /* let the rproc know the da and notifyid of this vring */ 336 /* TODO: expose this to remote processor */ 337 vring->da = dma; 338 vring->notifyid = notifyid; 339 340 dev_dbg(dev, "vring%d: va %p dma %x size %x idr %d\n", i, va, 341 dma, size, notifyid); 342 343 rvdev->vring[i].len = vring->num; 344 rvdev->vring[i].align = vring->align; 345 rvdev->vring[i].va = va; 346 rvdev->vring[i].dma = dma; 347 rvdev->vring[i].notifyid = notifyid; 348 rvdev->vring[i].rvdev = rvdev; 349 350 return 0; 351 } 352 353 static void __rproc_free_vrings(struct rproc_vdev *rvdev, int i) 354 { 355 struct rproc *rproc = rvdev->rproc; 356 357 for (i--; i >= 0; i--) { 358 struct rproc_vring *rvring = &rvdev->vring[i]; 359 int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); 360 361 dma_free_coherent(rproc->dev, size, rvring->va, rvring->dma); 362 idr_remove(&rproc->notifyids, rvring->notifyid); 363 } 364 } 365 366 /** 367 * rproc_handle_vdev() - handle a vdev fw resource 368 * @rproc: the remote processor 369 * @rsc: the vring resource descriptor 370 * @avail: size of available data (for sanity checking the image) 371 * 372 * This resource entry requests the host to statically register a virtio 373 * device (vdev), and setup everything needed to support it. It contains 374 * everything needed to make it possible: the virtio device id, virtio 375 * device features, vrings information, virtio config space, etc... 376 * 377 * Before registering the vdev, the vrings are allocated from non-cacheable 378 * physically contiguous memory. Currently we only support two vrings per 379 * remote processor (temporary limitation). We might also want to consider 380 * doing the vring allocation only later when ->find_vqs() is invoked, and 381 * then release them upon ->del_vqs(). 382 * 383 * Note: @da is currently not really handled correctly: we dynamically 384 * allocate it using the DMA API, ignoring requested hard coded addresses, 385 * and we don't take care of any required IOMMU programming. This is all 386 * going to be taken care of when the generic iommu-based DMA API will be 387 * merged. Meanwhile, statically-addressed iommu-based firmware images should 388 * use RSC_DEVMEM resource entries to map their required @da to the physical 389 * address of their base CMA region (ouch, hacky!). 390 * 391 * Returns 0 on success, or an appropriate error code otherwise 392 */ 393 static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, 394 int avail) 395 { 396 struct device *dev = rproc->dev; 397 struct rproc_vdev *rvdev; 398 int i, ret; 399 400 /* make sure resource isn't truncated */ 401 if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) 402 + rsc->config_len > avail) { 403 dev_err(rproc->dev, "vdev rsc is truncated\n"); 404 return -EINVAL; 405 } 406 407 /* make sure reserved bytes are zeroes */ 408 if (rsc->reserved[0] || rsc->reserved[1]) { 409 dev_err(dev, "vdev rsc has non zero reserved bytes\n"); 410 return -EINVAL; 411 } 412 413 dev_dbg(dev, "vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n", 414 rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings); 415 416 /* we currently support only two vrings per rvdev */ 417 if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) { 418 dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings); 419 return -EINVAL; 420 } 421 422 rvdev = kzalloc(sizeof(struct rproc_vdev), GFP_KERNEL); 423 if (!rvdev) 424 return -ENOMEM; 425 426 rvdev->rproc = rproc; 427 428 /* allocate the vrings */ 429 for (i = 0; i < rsc->num_of_vrings; i++) { 430 ret = __rproc_handle_vring(rvdev, rsc, i); 431 if (ret) 432 goto free_vrings; 433 } 434 435 /* remember the device features */ 436 rvdev->dfeatures = rsc->dfeatures; 437 438 list_add_tail(&rvdev->node, &rproc->rvdevs); 439 440 /* it is now safe to add the virtio device */ 441 ret = rproc_add_virtio_dev(rvdev, rsc->id); 442 if (ret) 443 goto free_vrings; 444 445 return 0; 446 447 free_vrings: 448 __rproc_free_vrings(rvdev, i); 449 kfree(rvdev); 450 return ret; 451 } 452 453 /** 454 * rproc_handle_trace() - handle a shared trace buffer resource 455 * @rproc: the remote processor 456 * @rsc: the trace resource descriptor 457 * @avail: size of available data (for sanity checking the image) 458 * 459 * In case the remote processor dumps trace logs into memory, 460 * export it via debugfs. 461 * 462 * Currently, the 'da' member of @rsc should contain the device address 463 * where the remote processor is dumping the traces. Later we could also 464 * support dynamically allocating this address using the generic 465 * DMA API (but currently there isn't a use case for that). 466 * 467 * Returns 0 on success, or an appropriate error code otherwise 468 */ 469 static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, 470 int avail) 471 { 472 struct rproc_mem_entry *trace; 473 struct device *dev = rproc->dev; 474 void *ptr; 475 char name[15]; 476 477 if (sizeof(*rsc) > avail) { 478 dev_err(rproc->dev, "trace rsc is truncated\n"); 479 return -EINVAL; 480 } 481 482 /* make sure reserved bytes are zeroes */ 483 if (rsc->reserved) { 484 dev_err(dev, "trace rsc has non zero reserved bytes\n"); 485 return -EINVAL; 486 } 487 488 /* what's the kernel address of this resource ? */ 489 ptr = rproc_da_to_va(rproc, rsc->da, rsc->len); 490 if (!ptr) { 491 dev_err(dev, "erroneous trace resource entry\n"); 492 return -EINVAL; 493 } 494 495 trace = kzalloc(sizeof(*trace), GFP_KERNEL); 496 if (!trace) { 497 dev_err(dev, "kzalloc trace failed\n"); 498 return -ENOMEM; 499 } 500 501 /* set the trace buffer dma properties */ 502 trace->len = rsc->len; 503 trace->va = ptr; 504 505 /* make sure snprintf always null terminates, even if truncating */ 506 snprintf(name, sizeof(name), "trace%d", rproc->num_traces); 507 508 /* create the debugfs entry */ 509 trace->priv = rproc_create_trace_file(name, rproc, trace); 510 if (!trace->priv) { 511 trace->va = NULL; 512 kfree(trace); 513 return -EINVAL; 514 } 515 516 list_add_tail(&trace->node, &rproc->traces); 517 518 rproc->num_traces++; 519 520 dev_dbg(dev, "%s added: va %p, da 0x%x, len 0x%x\n", name, ptr, 521 rsc->da, rsc->len); 522 523 return 0; 524 } 525 526 /** 527 * rproc_handle_devmem() - handle devmem resource entry 528 * @rproc: remote processor handle 529 * @rsc: the devmem resource entry 530 * @avail: size of available data (for sanity checking the image) 531 * 532 * Remote processors commonly need to access certain on-chip peripherals. 533 * 534 * Some of these remote processors access memory via an iommu device, 535 * and might require us to configure their iommu before they can access 536 * the on-chip peripherals they need. 537 * 538 * This resource entry is a request to map such a peripheral device. 539 * 540 * These devmem entries will contain the physical address of the device in 541 * the 'pa' member. If a specific device address is expected, then 'da' will 542 * contain it (currently this is the only use case supported). 'len' will 543 * contain the size of the physical region we need to map. 544 * 545 * Currently we just "trust" those devmem entries to contain valid physical 546 * addresses, but this is going to change: we want the implementations to 547 * tell us ranges of physical addresses the firmware is allowed to request, 548 * and not allow firmwares to request access to physical addresses that 549 * are outside those ranges. 550 */ 551 static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc, 552 int avail) 553 { 554 struct rproc_mem_entry *mapping; 555 int ret; 556 557 /* no point in handling this resource without a valid iommu domain */ 558 if (!rproc->domain) 559 return -EINVAL; 560 561 if (sizeof(*rsc) > avail) { 562 dev_err(rproc->dev, "devmem rsc is truncated\n"); 563 return -EINVAL; 564 } 565 566 /* make sure reserved bytes are zeroes */ 567 if (rsc->reserved) { 568 dev_err(rproc->dev, "devmem rsc has non zero reserved bytes\n"); 569 return -EINVAL; 570 } 571 572 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 573 if (!mapping) { 574 dev_err(rproc->dev, "kzalloc mapping failed\n"); 575 return -ENOMEM; 576 } 577 578 ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags); 579 if (ret) { 580 dev_err(rproc->dev, "failed to map devmem: %d\n", ret); 581 goto out; 582 } 583 584 /* 585 * We'll need this info later when we'll want to unmap everything 586 * (e.g. on shutdown). 587 * 588 * We can't trust the remote processor not to change the resource 589 * table, so we must maintain this info independently. 590 */ 591 mapping->da = rsc->da; 592 mapping->len = rsc->len; 593 list_add_tail(&mapping->node, &rproc->mappings); 594 595 dev_dbg(rproc->dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n", 596 rsc->pa, rsc->da, rsc->len); 597 598 return 0; 599 600 out: 601 kfree(mapping); 602 return ret; 603 } 604 605 /** 606 * rproc_handle_carveout() - handle phys contig memory allocation requests 607 * @rproc: rproc handle 608 * @rsc: the resource entry 609 * @avail: size of available data (for image validation) 610 * 611 * This function will handle firmware requests for allocation of physically 612 * contiguous memory regions. 613 * 614 * These request entries should come first in the firmware's resource table, 615 * as other firmware entries might request placing other data objects inside 616 * these memory regions (e.g. data/code segments, trace resource entries, ...). 617 * 618 * Allocating memory this way helps utilizing the reserved physical memory 619 * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries 620 * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB 621 * pressure is important; it may have a substantial impact on performance. 622 */ 623 static int rproc_handle_carveout(struct rproc *rproc, 624 struct fw_rsc_carveout *rsc, int avail) 625 { 626 struct rproc_mem_entry *carveout, *mapping; 627 struct device *dev = rproc->dev; 628 dma_addr_t dma; 629 void *va; 630 int ret; 631 632 if (sizeof(*rsc) > avail) { 633 dev_err(rproc->dev, "carveout rsc is truncated\n"); 634 return -EINVAL; 635 } 636 637 /* make sure reserved bytes are zeroes */ 638 if (rsc->reserved) { 639 dev_err(dev, "carveout rsc has non zero reserved bytes\n"); 640 return -EINVAL; 641 } 642 643 dev_dbg(dev, "carveout rsc: da %x, pa %x, len %x, flags %x\n", 644 rsc->da, rsc->pa, rsc->len, rsc->flags); 645 646 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 647 if (!mapping) { 648 dev_err(dev, "kzalloc mapping failed\n"); 649 return -ENOMEM; 650 } 651 652 carveout = kzalloc(sizeof(*carveout), GFP_KERNEL); 653 if (!carveout) { 654 dev_err(dev, "kzalloc carveout failed\n"); 655 ret = -ENOMEM; 656 goto free_mapping; 657 } 658 659 va = dma_alloc_coherent(dev, rsc->len, &dma, GFP_KERNEL); 660 if (!va) { 661 dev_err(dev, "failed to dma alloc carveout: %d\n", rsc->len); 662 ret = -ENOMEM; 663 goto free_carv; 664 } 665 666 dev_dbg(dev, "carveout va %p, dma %x, len 0x%x\n", va, dma, rsc->len); 667 668 /* 669 * Ok, this is non-standard. 670 * 671 * Sometimes we can't rely on the generic iommu-based DMA API 672 * to dynamically allocate the device address and then set the IOMMU 673 * tables accordingly, because some remote processors might 674 * _require_ us to use hard coded device addresses that their 675 * firmware was compiled with. 676 * 677 * In this case, we must use the IOMMU API directly and map 678 * the memory to the device address as expected by the remote 679 * processor. 680 * 681 * Obviously such remote processor devices should not be configured 682 * to use the iommu-based DMA API: we expect 'dma' to contain the 683 * physical address in this case. 684 */ 685 if (rproc->domain) { 686 ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len, 687 rsc->flags); 688 if (ret) { 689 dev_err(dev, "iommu_map failed: %d\n", ret); 690 goto dma_free; 691 } 692 693 /* 694 * We'll need this info later when we'll want to unmap 695 * everything (e.g. on shutdown). 696 * 697 * We can't trust the remote processor not to change the 698 * resource table, so we must maintain this info independently. 699 */ 700 mapping->da = rsc->da; 701 mapping->len = rsc->len; 702 list_add_tail(&mapping->node, &rproc->mappings); 703 704 dev_dbg(dev, "carveout mapped 0x%x to 0x%x\n", rsc->da, dma); 705 706 /* 707 * Some remote processors might need to know the pa 708 * even though they are behind an IOMMU. E.g., OMAP4's 709 * remote M3 processor needs this so it can control 710 * on-chip hardware accelerators that are not behind 711 * the IOMMU, and therefor must know the pa. 712 * 713 * Generally we don't want to expose physical addresses 714 * if we don't have to (remote processors are generally 715 * _not_ trusted), so we might want to do this only for 716 * remote processor that _must_ have this (e.g. OMAP4's 717 * dual M3 subsystem). 718 */ 719 rsc->pa = dma; 720 } 721 722 carveout->va = va; 723 carveout->len = rsc->len; 724 carveout->dma = dma; 725 carveout->da = rsc->da; 726 727 list_add_tail(&carveout->node, &rproc->carveouts); 728 729 return 0; 730 731 dma_free: 732 dma_free_coherent(dev, rsc->len, va, dma); 733 free_carv: 734 kfree(carveout); 735 free_mapping: 736 kfree(mapping); 737 return ret; 738 } 739 740 /* 741 * A lookup table for resource handlers. The indices are defined in 742 * enum fw_resource_type. 743 */ 744 static rproc_handle_resource_t rproc_handle_rsc[] = { 745 [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout, 746 [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem, 747 [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace, 748 [RSC_VDEV] = NULL, /* VDEVs were handled upon registrarion */ 749 }; 750 751 /* handle firmware resource entries before booting the remote processor */ 752 static int 753 rproc_handle_boot_rsc(struct rproc *rproc, struct resource_table *table, int len) 754 { 755 struct device *dev = rproc->dev; 756 rproc_handle_resource_t handler; 757 int ret = 0, i; 758 759 for (i = 0; i < table->num; i++) { 760 int offset = table->offset[i]; 761 struct fw_rsc_hdr *hdr = (void *)table + offset; 762 int avail = len - offset - sizeof(*hdr); 763 void *rsc = (void *)hdr + sizeof(*hdr); 764 765 /* make sure table isn't truncated */ 766 if (avail < 0) { 767 dev_err(dev, "rsc table is truncated\n"); 768 return -EINVAL; 769 } 770 771 dev_dbg(dev, "rsc: type %d\n", hdr->type); 772 773 if (hdr->type >= RSC_LAST) { 774 dev_warn(dev, "unsupported resource %d\n", hdr->type); 775 continue; 776 } 777 778 handler = rproc_handle_rsc[hdr->type]; 779 if (!handler) 780 continue; 781 782 ret = handler(rproc, rsc, avail); 783 if (ret) 784 break; 785 } 786 787 return ret; 788 } 789 790 /* handle firmware resource entries while registering the remote processor */ 791 static int 792 rproc_handle_virtio_rsc(struct rproc *rproc, struct resource_table *table, int len) 793 { 794 struct device *dev = rproc->dev; 795 int ret = 0, i; 796 797 for (i = 0; i < table->num; i++) { 798 int offset = table->offset[i]; 799 struct fw_rsc_hdr *hdr = (void *)table + offset; 800 int avail = len - offset - sizeof(*hdr); 801 struct fw_rsc_vdev *vrsc; 802 803 /* make sure table isn't truncated */ 804 if (avail < 0) { 805 dev_err(dev, "rsc table is truncated\n"); 806 return -EINVAL; 807 } 808 809 dev_dbg(dev, "%s: rsc type %d\n", __func__, hdr->type); 810 811 if (hdr->type != RSC_VDEV) 812 continue; 813 814 vrsc = (struct fw_rsc_vdev *)hdr->data; 815 816 ret = rproc_handle_vdev(rproc, vrsc, avail); 817 if (ret) 818 break; 819 } 820 821 return ret; 822 } 823 824 /** 825 * rproc_find_rsc_table() - find the resource table 826 * @rproc: the rproc handle 827 * @elf_data: the content of the ELF firmware image 828 * @len: firmware size (in bytes) 829 * @tablesz: place holder for providing back the table size 830 * 831 * This function finds the resource table inside the remote processor's 832 * firmware. It is used both upon the registration of @rproc (in order 833 * to look for and register the supported virito devices), and when the 834 * @rproc is booted. 835 * 836 * Returns the pointer to the resource table if it is found, and write its 837 * size into @tablesz. If a valid table isn't found, NULL is returned 838 * (and @tablesz isn't set). 839 */ 840 static struct resource_table * 841 rproc_find_rsc_table(struct rproc *rproc, const u8 *elf_data, size_t len, 842 int *tablesz) 843 { 844 struct elf32_hdr *ehdr; 845 struct elf32_shdr *shdr; 846 const char *name_table; 847 struct device *dev = rproc->dev; 848 struct resource_table *table = NULL; 849 int i; 850 851 ehdr = (struct elf32_hdr *)elf_data; 852 shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff); 853 name_table = elf_data + shdr[ehdr->e_shstrndx].sh_offset; 854 855 /* look for the resource table and handle it */ 856 for (i = 0; i < ehdr->e_shnum; i++, shdr++) { 857 int size = shdr->sh_size; 858 int offset = shdr->sh_offset; 859 860 if (strcmp(name_table + shdr->sh_name, ".resource_table")) 861 continue; 862 863 table = (struct resource_table *)(elf_data + offset); 864 865 /* make sure we have the entire table */ 866 if (offset + size > len) { 867 dev_err(dev, "resource table truncated\n"); 868 return NULL; 869 } 870 871 /* make sure table has at least the header */ 872 if (sizeof(struct resource_table) > size) { 873 dev_err(dev, "header-less resource table\n"); 874 return NULL; 875 } 876 877 /* we don't support any version beyond the first */ 878 if (table->ver != 1) { 879 dev_err(dev, "unsupported fw ver: %d\n", table->ver); 880 return NULL; 881 } 882 883 /* make sure reserved bytes are zeroes */ 884 if (table->reserved[0] || table->reserved[1]) { 885 dev_err(dev, "non zero reserved bytes\n"); 886 return NULL; 887 } 888 889 /* make sure the offsets array isn't truncated */ 890 if (table->num * sizeof(table->offset[0]) + 891 sizeof(struct resource_table) > size) { 892 dev_err(dev, "resource table incomplete\n"); 893 return NULL; 894 } 895 896 *tablesz = shdr->sh_size; 897 break; 898 } 899 900 return table; 901 } 902 903 /** 904 * rproc_resource_cleanup() - clean up and free all acquired resources 905 * @rproc: rproc handle 906 * 907 * This function will free all resources acquired for @rproc, and it 908 * is called whenever @rproc either shuts down or fails to boot. 909 */ 910 static void rproc_resource_cleanup(struct rproc *rproc) 911 { 912 struct rproc_mem_entry *entry, *tmp; 913 struct device *dev = rproc->dev; 914 915 /* clean up debugfs trace entries */ 916 list_for_each_entry_safe(entry, tmp, &rproc->traces, node) { 917 rproc_remove_trace_file(entry->priv); 918 rproc->num_traces--; 919 list_del(&entry->node); 920 kfree(entry); 921 } 922 923 /* clean up carveout allocations */ 924 list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) { 925 dma_free_coherent(dev, entry->len, entry->va, entry->dma); 926 list_del(&entry->node); 927 kfree(entry); 928 } 929 930 /* clean up iommu mapping entries */ 931 list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) { 932 size_t unmapped; 933 934 unmapped = iommu_unmap(rproc->domain, entry->da, entry->len); 935 if (unmapped != entry->len) { 936 /* nothing much to do besides complaining */ 937 dev_err(dev, "failed to unmap %u/%u\n", entry->len, 938 unmapped); 939 } 940 941 list_del(&entry->node); 942 kfree(entry); 943 } 944 } 945 946 /* make sure this fw image is sane */ 947 static int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw) 948 { 949 const char *name = rproc->firmware; 950 struct device *dev = rproc->dev; 951 struct elf32_hdr *ehdr; 952 char class; 953 954 if (!fw) { 955 dev_err(dev, "failed to load %s\n", name); 956 return -EINVAL; 957 } 958 959 if (fw->size < sizeof(struct elf32_hdr)) { 960 dev_err(dev, "Image is too small\n"); 961 return -EINVAL; 962 } 963 964 ehdr = (struct elf32_hdr *)fw->data; 965 966 /* We only support ELF32 at this point */ 967 class = ehdr->e_ident[EI_CLASS]; 968 if (class != ELFCLASS32) { 969 dev_err(dev, "Unsupported class: %d\n", class); 970 return -EINVAL; 971 } 972 973 /* We assume the firmware has the same endianess as the host */ 974 # ifdef __LITTLE_ENDIAN 975 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) { 976 # else /* BIG ENDIAN */ 977 if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB) { 978 # endif 979 dev_err(dev, "Unsupported firmware endianess\n"); 980 return -EINVAL; 981 } 982 983 if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) { 984 dev_err(dev, "Image is too small\n"); 985 return -EINVAL; 986 } 987 988 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) { 989 dev_err(dev, "Image is corrupted (bad magic)\n"); 990 return -EINVAL; 991 } 992 993 if (ehdr->e_phnum == 0) { 994 dev_err(dev, "No loadable segments\n"); 995 return -EINVAL; 996 } 997 998 if (ehdr->e_phoff > fw->size) { 999 dev_err(dev, "Firmware size is too small\n"); 1000 return -EINVAL; 1001 } 1002 1003 return 0; 1004 } 1005 1006 /* 1007 * take a firmware and boot a remote processor with it. 1008 */ 1009 static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) 1010 { 1011 struct device *dev = rproc->dev; 1012 const char *name = rproc->firmware; 1013 struct elf32_hdr *ehdr; 1014 struct resource_table *table; 1015 int ret, tablesz; 1016 1017 ret = rproc_fw_sanity_check(rproc, fw); 1018 if (ret) 1019 return ret; 1020 1021 ehdr = (struct elf32_hdr *)fw->data; 1022 1023 dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size); 1024 1025 /* 1026 * if enabling an IOMMU isn't relevant for this rproc, this is 1027 * just a nop 1028 */ 1029 ret = rproc_enable_iommu(rproc); 1030 if (ret) { 1031 dev_err(dev, "can't enable iommu: %d\n", ret); 1032 return ret; 1033 } 1034 1035 /* 1036 * The ELF entry point is the rproc's boot addr (though this is not 1037 * a configurable property of all remote processors: some will always 1038 * boot at a specific hardcoded address). 1039 */ 1040 rproc->bootaddr = ehdr->e_entry; 1041 1042 /* look for the resource table */ 1043 table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz); 1044 if (!table) 1045 goto clean_up; 1046 1047 /* handle fw resources which are required to boot rproc */ 1048 ret = rproc_handle_boot_rsc(rproc, table, tablesz); 1049 if (ret) { 1050 dev_err(dev, "Failed to process resources: %d\n", ret); 1051 goto clean_up; 1052 } 1053 1054 /* load the ELF segments to memory */ 1055 ret = rproc_load_segments(rproc, fw->data, fw->size); 1056 if (ret) { 1057 dev_err(dev, "Failed to load program segments: %d\n", ret); 1058 goto clean_up; 1059 } 1060 1061 /* power up the remote processor */ 1062 ret = rproc->ops->start(rproc); 1063 if (ret) { 1064 dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret); 1065 goto clean_up; 1066 } 1067 1068 rproc->state = RPROC_RUNNING; 1069 1070 dev_info(dev, "remote processor %s is now up\n", rproc->name); 1071 1072 return 0; 1073 1074 clean_up: 1075 rproc_resource_cleanup(rproc); 1076 rproc_disable_iommu(rproc); 1077 return ret; 1078 } 1079 1080 /* 1081 * take a firmware and look for virtio devices to register. 1082 * 1083 * Note: this function is called asynchronously upon registration of the 1084 * remote processor (so we must wait until it completes before we try 1085 * to unregister the device. one other option is just to use kref here, 1086 * that might be cleaner). 1087 */ 1088 static void rproc_fw_config_virtio(const struct firmware *fw, void *context) 1089 { 1090 struct rproc *rproc = context; 1091 struct resource_table *table; 1092 int ret, tablesz; 1093 1094 if (rproc_fw_sanity_check(rproc, fw) < 0) 1095 goto out; 1096 1097 /* look for the resource table */ 1098 table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz); 1099 if (!table) 1100 goto out; 1101 1102 /* look for virtio devices and register them */ 1103 ret = rproc_handle_virtio_rsc(rproc, table, tablesz); 1104 if (ret) 1105 goto out; 1106 1107 out: 1108 if (fw) 1109 release_firmware(fw); 1110 /* allow rproc_unregister() contexts, if any, to proceed */ 1111 complete_all(&rproc->firmware_loading_complete); 1112 } 1113 1114 /** 1115 * rproc_boot() - boot a remote processor 1116 * @rproc: handle of a remote processor 1117 * 1118 * Boot a remote processor (i.e. load its firmware, power it on, ...). 1119 * 1120 * If the remote processor is already powered on, this function immediately 1121 * returns (successfully). 1122 * 1123 * Returns 0 on success, and an appropriate error value otherwise. 1124 */ 1125 int rproc_boot(struct rproc *rproc) 1126 { 1127 const struct firmware *firmware_p; 1128 struct device *dev; 1129 int ret; 1130 1131 if (!rproc) { 1132 pr_err("invalid rproc handle\n"); 1133 return -EINVAL; 1134 } 1135 1136 dev = rproc->dev; 1137 1138 ret = mutex_lock_interruptible(&rproc->lock); 1139 if (ret) { 1140 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); 1141 return ret; 1142 } 1143 1144 /* loading a firmware is required */ 1145 if (!rproc->firmware) { 1146 dev_err(dev, "%s: no firmware to load\n", __func__); 1147 ret = -EINVAL; 1148 goto unlock_mutex; 1149 } 1150 1151 /* prevent underlying implementation from being removed */ 1152 if (!try_module_get(dev->driver->owner)) { 1153 dev_err(dev, "%s: can't get owner\n", __func__); 1154 ret = -EINVAL; 1155 goto unlock_mutex; 1156 } 1157 1158 /* skip the boot process if rproc is already powered up */ 1159 if (atomic_inc_return(&rproc->power) > 1) { 1160 ret = 0; 1161 goto unlock_mutex; 1162 } 1163 1164 dev_info(dev, "powering up %s\n", rproc->name); 1165 1166 /* load firmware */ 1167 ret = request_firmware(&firmware_p, rproc->firmware, dev); 1168 if (ret < 0) { 1169 dev_err(dev, "request_firmware failed: %d\n", ret); 1170 goto downref_rproc; 1171 } 1172 1173 ret = rproc_fw_boot(rproc, firmware_p); 1174 1175 release_firmware(firmware_p); 1176 1177 downref_rproc: 1178 if (ret) { 1179 module_put(dev->driver->owner); 1180 atomic_dec(&rproc->power); 1181 } 1182 unlock_mutex: 1183 mutex_unlock(&rproc->lock); 1184 return ret; 1185 } 1186 EXPORT_SYMBOL(rproc_boot); 1187 1188 /** 1189 * rproc_shutdown() - power off the remote processor 1190 * @rproc: the remote processor 1191 * 1192 * Power off a remote processor (previously booted with rproc_boot()). 1193 * 1194 * In case @rproc is still being used by an additional user(s), then 1195 * this function will just decrement the power refcount and exit, 1196 * without really powering off the device. 1197 * 1198 * Every call to rproc_boot() must (eventually) be accompanied by a call 1199 * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug. 1200 * 1201 * Notes: 1202 * - we're not decrementing the rproc's refcount, only the power refcount. 1203 * which means that the @rproc handle stays valid even after rproc_shutdown() 1204 * returns, and users can still use it with a subsequent rproc_boot(), if 1205 * needed. 1206 * - don't call rproc_shutdown() to unroll rproc_get_by_name(), exactly 1207 * because rproc_shutdown() _does not_ decrement the refcount of @rproc. 1208 * To decrement the refcount of @rproc, use rproc_put() (but _only_ if 1209 * you acquired @rproc using rproc_get_by_name()). 1210 */ 1211 void rproc_shutdown(struct rproc *rproc) 1212 { 1213 struct device *dev = rproc->dev; 1214 int ret; 1215 1216 ret = mutex_lock_interruptible(&rproc->lock); 1217 if (ret) { 1218 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); 1219 return; 1220 } 1221 1222 /* if the remote proc is still needed, bail out */ 1223 if (!atomic_dec_and_test(&rproc->power)) 1224 goto out; 1225 1226 /* power off the remote processor */ 1227 ret = rproc->ops->stop(rproc); 1228 if (ret) { 1229 atomic_inc(&rproc->power); 1230 dev_err(dev, "can't stop rproc: %d\n", ret); 1231 goto out; 1232 } 1233 1234 /* clean up all acquired resources */ 1235 rproc_resource_cleanup(rproc); 1236 1237 rproc_disable_iommu(rproc); 1238 1239 rproc->state = RPROC_OFFLINE; 1240 1241 dev_info(dev, "stopped remote processor %s\n", rproc->name); 1242 1243 out: 1244 mutex_unlock(&rproc->lock); 1245 if (!ret) 1246 module_put(dev->driver->owner); 1247 } 1248 EXPORT_SYMBOL(rproc_shutdown); 1249 1250 /** 1251 * rproc_release() - completely deletes the existence of a remote processor 1252 * @kref: the rproc's kref 1253 * 1254 * This function should _never_ be called directly. 1255 * 1256 * The only reasonable location to use it is as an argument when kref_put'ing 1257 * @rproc's refcount. 1258 * 1259 * This way it will be called when no one holds a valid pointer to this @rproc 1260 * anymore (and obviously after it is removed from the rprocs klist). 1261 * 1262 * Note: this function is not static because rproc_vdev_release() needs it when 1263 * it decrements @rproc's refcount. 1264 */ 1265 void rproc_release(struct kref *kref) 1266 { 1267 struct rproc *rproc = container_of(kref, struct rproc, refcount); 1268 struct rproc_vdev *rvdev, *rvtmp; 1269 1270 dev_info(rproc->dev, "removing %s\n", rproc->name); 1271 1272 rproc_delete_debug_dir(rproc); 1273 1274 /* clean up remote vdev entries */ 1275 list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) { 1276 __rproc_free_vrings(rvdev, RVDEV_NUM_VRINGS); 1277 list_del(&rvdev->node); 1278 } 1279 1280 /* 1281 * At this point no one holds a reference to rproc anymore, 1282 * so we can directly unroll rproc_alloc() 1283 */ 1284 rproc_free(rproc); 1285 } 1286 1287 /* will be called when an rproc is added to the rprocs klist */ 1288 static void klist_rproc_get(struct klist_node *n) 1289 { 1290 struct rproc *rproc = container_of(n, struct rproc, node); 1291 1292 kref_get(&rproc->refcount); 1293 } 1294 1295 /* will be called when an rproc is removed from the rprocs klist */ 1296 static void klist_rproc_put(struct klist_node *n) 1297 { 1298 struct rproc *rproc = container_of(n, struct rproc, node); 1299 1300 kref_put(&rproc->refcount, rproc_release); 1301 } 1302 1303 static struct rproc *next_rproc(struct klist_iter *i) 1304 { 1305 struct klist_node *n; 1306 1307 n = klist_next(i); 1308 if (!n) 1309 return NULL; 1310 1311 return container_of(n, struct rproc, node); 1312 } 1313 1314 /** 1315 * rproc_get_by_name() - find a remote processor by name and boot it 1316 * @name: name of the remote processor 1317 * 1318 * Finds an rproc handle using the remote processor's name, and then 1319 * boot it. If it's already powered on, then just immediately return 1320 * (successfully). 1321 * 1322 * Returns the rproc handle on success, and NULL on failure. 1323 * 1324 * This function increments the remote processor's refcount, so always 1325 * use rproc_put() to decrement it back once rproc isn't needed anymore. 1326 * 1327 * Note: currently this function (and its counterpart rproc_put()) are not 1328 * being used. We need to scrutinize the use cases 1329 * that still need them, and see if we can migrate them to use the non 1330 * name-based boot/shutdown interface. 1331 */ 1332 struct rproc *rproc_get_by_name(const char *name) 1333 { 1334 struct rproc *rproc; 1335 struct klist_iter i; 1336 int ret; 1337 1338 /* find the remote processor, and upref its refcount */ 1339 klist_iter_init(&rprocs, &i); 1340 while ((rproc = next_rproc(&i)) != NULL) 1341 if (!strcmp(rproc->name, name)) { 1342 kref_get(&rproc->refcount); 1343 break; 1344 } 1345 klist_iter_exit(&i); 1346 1347 /* can't find this rproc ? */ 1348 if (!rproc) { 1349 pr_err("can't find remote processor %s\n", name); 1350 return NULL; 1351 } 1352 1353 ret = rproc_boot(rproc); 1354 if (ret < 0) { 1355 kref_put(&rproc->refcount, rproc_release); 1356 return NULL; 1357 } 1358 1359 return rproc; 1360 } 1361 EXPORT_SYMBOL(rproc_get_by_name); 1362 1363 /** 1364 * rproc_put() - decrement the refcount of a remote processor, and shut it down 1365 * @rproc: the remote processor 1366 * 1367 * This function tries to shutdown @rproc, and it then decrements its 1368 * refcount. 1369 * 1370 * After this function returns, @rproc may _not_ be used anymore, and its 1371 * handle should be considered invalid. 1372 * 1373 * This function should be called _iff_ the @rproc handle was grabbed by 1374 * calling rproc_get_by_name(). 1375 */ 1376 void rproc_put(struct rproc *rproc) 1377 { 1378 /* try to power off the remote processor */ 1379 rproc_shutdown(rproc); 1380 1381 /* downref rproc's refcount */ 1382 kref_put(&rproc->refcount, rproc_release); 1383 } 1384 EXPORT_SYMBOL(rproc_put); 1385 1386 /** 1387 * rproc_register() - register a remote processor 1388 * @rproc: the remote processor handle to register 1389 * 1390 * Registers @rproc with the remoteproc framework, after it has been 1391 * allocated with rproc_alloc(). 1392 * 1393 * This is called by the platform-specific rproc implementation, whenever 1394 * a new remote processor device is probed. 1395 * 1396 * Returns 0 on success and an appropriate error code otherwise. 1397 * 1398 * Note: this function initiates an asynchronous firmware loading 1399 * context, which will look for virtio devices supported by the rproc's 1400 * firmware. 1401 * 1402 * If found, those virtio devices will be created and added, so as a result 1403 * of registering this remote processor, additional virtio drivers might be 1404 * probed. 1405 */ 1406 int rproc_register(struct rproc *rproc) 1407 { 1408 struct device *dev = rproc->dev; 1409 int ret = 0; 1410 1411 /* expose to rproc_get_by_name users */ 1412 klist_add_tail(&rproc->node, &rprocs); 1413 1414 dev_info(rproc->dev, "%s is available\n", rproc->name); 1415 1416 dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n"); 1417 dev_info(dev, "THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.\n"); 1418 1419 /* create debugfs entries */ 1420 rproc_create_debug_dir(rproc); 1421 1422 /* rproc_unregister() calls must wait until async loader completes */ 1423 init_completion(&rproc->firmware_loading_complete); 1424 1425 /* 1426 * We must retrieve early virtio configuration info from 1427 * the firmware (e.g. whether to register a virtio device, 1428 * what virtio features does it support, ...). 1429 * 1430 * We're initiating an asynchronous firmware loading, so we can 1431 * be built-in kernel code, without hanging the boot process. 1432 */ 1433 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 1434 rproc->firmware, dev, GFP_KERNEL, 1435 rproc, rproc_fw_config_virtio); 1436 if (ret < 0) { 1437 dev_err(dev, "request_firmware_nowait failed: %d\n", ret); 1438 complete_all(&rproc->firmware_loading_complete); 1439 klist_remove(&rproc->node); 1440 } 1441 1442 return ret; 1443 } 1444 EXPORT_SYMBOL(rproc_register); 1445 1446 /** 1447 * rproc_alloc() - allocate a remote processor handle 1448 * @dev: the underlying device 1449 * @name: name of this remote processor 1450 * @ops: platform-specific handlers (mainly start/stop) 1451 * @firmware: name of firmware file to load 1452 * @len: length of private data needed by the rproc driver (in bytes) 1453 * 1454 * Allocates a new remote processor handle, but does not register 1455 * it yet. 1456 * 1457 * This function should be used by rproc implementations during initialization 1458 * of the remote processor. 1459 * 1460 * After creating an rproc handle using this function, and when ready, 1461 * implementations should then call rproc_register() to complete 1462 * the registration of the remote processor. 1463 * 1464 * On success the new rproc is returned, and on failure, NULL. 1465 * 1466 * Note: _never_ directly deallocate @rproc, even if it was not registered 1467 * yet. Instead, if you just need to unroll rproc_alloc(), use rproc_free(). 1468 */ 1469 struct rproc *rproc_alloc(struct device *dev, const char *name, 1470 const struct rproc_ops *ops, 1471 const char *firmware, int len) 1472 { 1473 struct rproc *rproc; 1474 1475 if (!dev || !name || !ops) 1476 return NULL; 1477 1478 rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL); 1479 if (!rproc) { 1480 dev_err(dev, "%s: kzalloc failed\n", __func__); 1481 return NULL; 1482 } 1483 1484 rproc->dev = dev; 1485 rproc->name = name; 1486 rproc->ops = ops; 1487 rproc->firmware = firmware; 1488 rproc->priv = &rproc[1]; 1489 1490 atomic_set(&rproc->power, 0); 1491 1492 kref_init(&rproc->refcount); 1493 1494 mutex_init(&rproc->lock); 1495 1496 idr_init(&rproc->notifyids); 1497 1498 INIT_LIST_HEAD(&rproc->carveouts); 1499 INIT_LIST_HEAD(&rproc->mappings); 1500 INIT_LIST_HEAD(&rproc->traces); 1501 INIT_LIST_HEAD(&rproc->rvdevs); 1502 1503 rproc->state = RPROC_OFFLINE; 1504 1505 return rproc; 1506 } 1507 EXPORT_SYMBOL(rproc_alloc); 1508 1509 /** 1510 * rproc_free() - free an rproc handle that was allocated by rproc_alloc 1511 * @rproc: the remote processor handle 1512 * 1513 * This function should _only_ be used if @rproc was only allocated, 1514 * but not registered yet. 1515 * 1516 * If @rproc was already successfully registered (by calling rproc_register()), 1517 * then use rproc_unregister() instead. 1518 */ 1519 void rproc_free(struct rproc *rproc) 1520 { 1521 idr_remove_all(&rproc->notifyids); 1522 idr_destroy(&rproc->notifyids); 1523 1524 kfree(rproc); 1525 } 1526 EXPORT_SYMBOL(rproc_free); 1527 1528 /** 1529 * rproc_unregister() - unregister a remote processor 1530 * @rproc: rproc handle to unregister 1531 * 1532 * Unregisters a remote processor, and decrements its refcount. 1533 * If its refcount drops to zero, then @rproc will be freed. If not, 1534 * it will be freed later once the last reference is dropped. 1535 * 1536 * This function should be called when the platform specific rproc 1537 * implementation decides to remove the rproc device. it should 1538 * _only_ be called if a previous invocation of rproc_register() 1539 * has completed successfully. 1540 * 1541 * After rproc_unregister() returns, @rproc is _not_ valid anymore and 1542 * it shouldn't be used. More specifically, don't call rproc_free() 1543 * or try to directly free @rproc after rproc_unregister() returns; 1544 * none of these are needed, and calling them is a bug. 1545 * 1546 * Returns 0 on success and -EINVAL if @rproc isn't valid. 1547 */ 1548 int rproc_unregister(struct rproc *rproc) 1549 { 1550 struct rproc_vdev *rvdev; 1551 1552 if (!rproc) 1553 return -EINVAL; 1554 1555 /* if rproc is just being registered, wait */ 1556 wait_for_completion(&rproc->firmware_loading_complete); 1557 1558 /* clean up remote vdev entries */ 1559 list_for_each_entry(rvdev, &rproc->rvdevs, node) 1560 rproc_remove_virtio_dev(rvdev); 1561 1562 /* the rproc is downref'ed as soon as it's removed from the klist */ 1563 klist_del(&rproc->node); 1564 1565 /* the rproc will only be released after its refcount drops to zero */ 1566 kref_put(&rproc->refcount, rproc_release); 1567 1568 return 0; 1569 } 1570 EXPORT_SYMBOL(rproc_unregister); 1571 1572 static int __init remoteproc_init(void) 1573 { 1574 rproc_init_debugfs(); 1575 return 0; 1576 } 1577 module_init(remoteproc_init); 1578 1579 static void __exit remoteproc_exit(void) 1580 { 1581 rproc_exit_debugfs(); 1582 } 1583 module_exit(remoteproc_exit); 1584 1585 MODULE_LICENSE("GPL v2"); 1586 MODULE_DESCRIPTION("Generic Remote Processor Framework"); 1587