1 /* 2 * Remote Processor Framework 3 * 4 * Copyright (C) 2011 Texas Instruments, Inc. 5 * Copyright (C) 2011 Google, Inc. 6 * 7 * Ohad Ben-Cohen <ohad@wizery.com> 8 * Brian Swetland <swetland@google.com> 9 * Mark Grosen <mgrosen@ti.com> 10 * Fernando Guzman Lugo <fernando.lugo@ti.com> 11 * Suman Anna <s-anna@ti.com> 12 * Robert Tivy <rtivy@ti.com> 13 * Armando Uribe De Leon <x0095078@ti.com> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * version 2 as published by the Free Software Foundation. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 */ 24 25 #define pr_fmt(fmt) "%s: " fmt, __func__ 26 27 #include <linux/kernel.h> 28 #include <linux/module.h> 29 #include <linux/device.h> 30 #include <linux/slab.h> 31 #include <linux/mutex.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/firmware.h> 34 #include <linux/string.h> 35 #include <linux/debugfs.h> 36 #include <linux/remoteproc.h> 37 #include <linux/iommu.h> 38 #include <linux/klist.h> 39 #include <linux/elf.h> 40 #include <linux/virtio_ids.h> 41 #include <linux/virtio_ring.h> 42 #include <asm/byteorder.h> 43 44 #include "remoteproc_internal.h" 45 46 static void klist_rproc_get(struct klist_node *n); 47 static void klist_rproc_put(struct klist_node *n); 48 49 /* 50 * klist of the available remote processors. 51 * 52 * We need this in order to support name-based lookups (needed by the 53 * rproc_get_by_name()). 54 * 55 * That said, we don't use rproc_get_by_name() at this point. 56 * The use cases that do require its existence should be 57 * scrutinized, and hopefully migrated to rproc_boot() using device-based 58 * binding. 59 * 60 * If/when this materializes, we could drop the klist (and the by_name 61 * API). 62 */ 63 static DEFINE_KLIST(rprocs, klist_rproc_get, klist_rproc_put); 64 65 typedef int (*rproc_handle_resources_t)(struct rproc *rproc, 66 struct resource_table *table, int len); 67 typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int avail); 68 69 /* 70 * This is the IOMMU fault handler we register with the IOMMU API 71 * (when relevant; not all remote processors access memory through 72 * an IOMMU). 73 * 74 * IOMMU core will invoke this handler whenever the remote processor 75 * will try to access an unmapped device address. 76 * 77 * Currently this is mostly a stub, but it will be later used to trigger 78 * the recovery of the remote processor. 79 */ 80 static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev, 81 unsigned long iova, int flags, void *token) 82 { 83 dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags); 84 85 /* 86 * Let the iommu core know we're not really handling this fault; 87 * we just plan to use this as a recovery trigger. 88 */ 89 return -ENOSYS; 90 } 91 92 static int rproc_enable_iommu(struct rproc *rproc) 93 { 94 struct iommu_domain *domain; 95 struct device *dev = rproc->dev; 96 int ret; 97 98 /* 99 * We currently use iommu_present() to decide if an IOMMU 100 * setup is needed. 101 * 102 * This works for simple cases, but will easily fail with 103 * platforms that do have an IOMMU, but not for this specific 104 * rproc. 105 * 106 * This will be easily solved by introducing hw capabilities 107 * that will be set by the remoteproc driver. 108 */ 109 if (!iommu_present(dev->bus)) { 110 dev_dbg(dev, "iommu not found\n"); 111 return 0; 112 } 113 114 domain = iommu_domain_alloc(dev->bus); 115 if (!domain) { 116 dev_err(dev, "can't alloc iommu domain\n"); 117 return -ENOMEM; 118 } 119 120 iommu_set_fault_handler(domain, rproc_iommu_fault, rproc); 121 122 ret = iommu_attach_device(domain, dev); 123 if (ret) { 124 dev_err(dev, "can't attach iommu device: %d\n", ret); 125 goto free_domain; 126 } 127 128 rproc->domain = domain; 129 130 return 0; 131 132 free_domain: 133 iommu_domain_free(domain); 134 return ret; 135 } 136 137 static void rproc_disable_iommu(struct rproc *rproc) 138 { 139 struct iommu_domain *domain = rproc->domain; 140 struct device *dev = rproc->dev; 141 142 if (!domain) 143 return; 144 145 iommu_detach_device(domain, dev); 146 iommu_domain_free(domain); 147 148 return; 149 } 150 151 /* 152 * Some remote processors will ask us to allocate them physically contiguous 153 * memory regions (which we call "carveouts"), and map them to specific 154 * device addresses (which are hardcoded in the firmware). 155 * 156 * They may then ask us to copy objects into specific device addresses (e.g. 157 * code/data sections) or expose us certain symbols in other device address 158 * (e.g. their trace buffer). 159 * 160 * This function is an internal helper with which we can go over the allocated 161 * carveouts and translate specific device address to kernel virtual addresses 162 * so we can access the referenced memory. 163 * 164 * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too, 165 * but only on kernel direct mapped RAM memory. Instead, we're just using 166 * here the output of the DMA API, which should be more correct. 167 */ 168 static void *rproc_da_to_va(struct rproc *rproc, u64 da, int len) 169 { 170 struct rproc_mem_entry *carveout; 171 void *ptr = NULL; 172 173 list_for_each_entry(carveout, &rproc->carveouts, node) { 174 int offset = da - carveout->da; 175 176 /* try next carveout if da is too small */ 177 if (offset < 0) 178 continue; 179 180 /* try next carveout if da is too large */ 181 if (offset + len > carveout->len) 182 continue; 183 184 ptr = carveout->va + offset; 185 186 break; 187 } 188 189 return ptr; 190 } 191 192 /** 193 * rproc_load_segments() - load firmware segments to memory 194 * @rproc: remote processor which will be booted using these fw segments 195 * @elf_data: the content of the ELF firmware image 196 * @len: firmware size (in bytes) 197 * 198 * This function loads the firmware segments to memory, where the remote 199 * processor expects them. 200 * 201 * Some remote processors will expect their code and data to be placed 202 * in specific device addresses, and can't have them dynamically assigned. 203 * 204 * We currently support only those kind of remote processors, and expect 205 * the program header's paddr member to contain those addresses. We then go 206 * through the physically contiguous "carveout" memory regions which we 207 * allocated (and mapped) earlier on behalf of the remote processor, 208 * and "translate" device address to kernel addresses, so we can copy the 209 * segments where they are expected. 210 * 211 * Currently we only support remote processors that required carveout 212 * allocations and got them mapped onto their iommus. Some processors 213 * might be different: they might not have iommus, and would prefer to 214 * directly allocate memory for every segment/resource. This is not yet 215 * supported, though. 216 */ 217 static int 218 rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len) 219 { 220 struct device *dev = rproc->dev; 221 struct elf32_hdr *ehdr; 222 struct elf32_phdr *phdr; 223 int i, ret = 0; 224 225 ehdr = (struct elf32_hdr *)elf_data; 226 phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff); 227 228 /* go through the available ELF segments */ 229 for (i = 0; i < ehdr->e_phnum; i++, phdr++) { 230 u32 da = phdr->p_paddr; 231 u32 memsz = phdr->p_memsz; 232 u32 filesz = phdr->p_filesz; 233 u32 offset = phdr->p_offset; 234 void *ptr; 235 236 if (phdr->p_type != PT_LOAD) 237 continue; 238 239 dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n", 240 phdr->p_type, da, memsz, filesz); 241 242 if (filesz > memsz) { 243 dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n", 244 filesz, memsz); 245 ret = -EINVAL; 246 break; 247 } 248 249 if (offset + filesz > len) { 250 dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n", 251 offset + filesz, len); 252 ret = -EINVAL; 253 break; 254 } 255 256 /* grab the kernel address for this device address */ 257 ptr = rproc_da_to_va(rproc, da, memsz); 258 if (!ptr) { 259 dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz); 260 ret = -EINVAL; 261 break; 262 } 263 264 /* put the segment where the remote processor expects it */ 265 if (phdr->p_filesz) 266 memcpy(ptr, elf_data + phdr->p_offset, filesz); 267 268 /* 269 * Zero out remaining memory for this segment. 270 * 271 * This isn't strictly required since dma_alloc_coherent already 272 * did this for us. albeit harmless, we may consider removing 273 * this. 274 */ 275 if (memsz > filesz) 276 memset(ptr + filesz, 0, memsz - filesz); 277 } 278 279 return ret; 280 } 281 282 static int 283 __rproc_handle_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i) 284 { 285 struct rproc *rproc = rvdev->rproc; 286 struct device *dev = rproc->dev; 287 struct fw_rsc_vdev_vring *vring = &rsc->vring[i]; 288 dma_addr_t dma; 289 void *va; 290 int ret, size, notifyid; 291 292 dev_dbg(dev, "vdev rsc: vring%d: da %x, qsz %d, align %d\n", 293 i, vring->da, vring->num, vring->align); 294 295 /* make sure reserved bytes are zeroes */ 296 if (vring->reserved) { 297 dev_err(dev, "vring rsc has non zero reserved bytes\n"); 298 return -EINVAL; 299 } 300 301 /* verify queue size and vring alignment are sane */ 302 if (!vring->num || !vring->align) { 303 dev_err(dev, "invalid qsz (%d) or alignment (%d)\n", 304 vring->num, vring->align); 305 return -EINVAL; 306 } 307 308 /* actual size of vring (in bytes) */ 309 size = PAGE_ALIGN(vring_size(vring->num, vring->align)); 310 311 if (!idr_pre_get(&rproc->notifyids, GFP_KERNEL)) { 312 dev_err(dev, "idr_pre_get failed\n"); 313 return -ENOMEM; 314 } 315 316 /* 317 * Allocate non-cacheable memory for the vring. In the future 318 * this call will also configure the IOMMU for us 319 */ 320 va = dma_alloc_coherent(dev, size, &dma, GFP_KERNEL); 321 if (!va) { 322 dev_err(dev, "dma_alloc_coherent failed\n"); 323 return -EINVAL; 324 } 325 326 /* assign an rproc-wide unique index for this vring */ 327 /* TODO: assign a notifyid for rvdev updates as well */ 328 ret = idr_get_new(&rproc->notifyids, &rvdev->vring[i], ¬ifyid); 329 if (ret) { 330 dev_err(dev, "idr_get_new failed: %d\n", ret); 331 dma_free_coherent(dev, size, va, dma); 332 return ret; 333 } 334 335 /* let the rproc know the da and notifyid of this vring */ 336 /* TODO: expose this to remote processor */ 337 vring->da = dma; 338 vring->notifyid = notifyid; 339 340 dev_dbg(dev, "vring%d: va %p dma %x size %x idr %d\n", i, va, 341 dma, size, notifyid); 342 343 rvdev->vring[i].len = vring->num; 344 rvdev->vring[i].align = vring->align; 345 rvdev->vring[i].va = va; 346 rvdev->vring[i].dma = dma; 347 rvdev->vring[i].notifyid = notifyid; 348 rvdev->vring[i].rvdev = rvdev; 349 350 return 0; 351 } 352 353 static void __rproc_free_vrings(struct rproc_vdev *rvdev, int i) 354 { 355 struct rproc *rproc = rvdev->rproc; 356 357 for (i--; i >= 0; i--) { 358 struct rproc_vring *rvring = &rvdev->vring[i]; 359 int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); 360 361 dma_free_coherent(rproc->dev, size, rvring->va, rvring->dma); 362 idr_remove(&rproc->notifyids, rvring->notifyid); 363 } 364 } 365 366 /** 367 * rproc_handle_vdev() - handle a vdev fw resource 368 * @rproc: the remote processor 369 * @rsc: the vring resource descriptor 370 * @avail: size of available data (for sanity checking the image) 371 * 372 * This resource entry requests the host to statically register a virtio 373 * device (vdev), and setup everything needed to support it. It contains 374 * everything needed to make it possible: the virtio device id, virtio 375 * device features, vrings information, virtio config space, etc... 376 * 377 * Before registering the vdev, the vrings are allocated from non-cacheable 378 * physically contiguous memory. Currently we only support two vrings per 379 * remote processor (temporary limitation). We might also want to consider 380 * doing the vring allocation only later when ->find_vqs() is invoked, and 381 * then release them upon ->del_vqs(). 382 * 383 * Note: @da is currently not really handled correctly: we dynamically 384 * allocate it using the DMA API, ignoring requested hard coded addresses, 385 * and we don't take care of any required IOMMU programming. This is all 386 * going to be taken care of when the generic iommu-based DMA API will be 387 * merged. Meanwhile, statically-addressed iommu-based firmware images should 388 * use RSC_DEVMEM resource entries to map their required @da to the physical 389 * address of their base CMA region (ouch, hacky!). 390 * 391 * Returns 0 on success, or an appropriate error code otherwise 392 */ 393 static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, 394 int avail) 395 { 396 struct device *dev = rproc->dev; 397 struct rproc_vdev *rvdev; 398 int i, ret; 399 400 /* make sure resource isn't truncated */ 401 if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) 402 + rsc->config_len > avail) { 403 dev_err(rproc->dev, "vdev rsc is truncated\n"); 404 return -EINVAL; 405 } 406 407 /* make sure reserved bytes are zeroes */ 408 if (rsc->reserved[0] || rsc->reserved[1]) { 409 dev_err(dev, "vdev rsc has non zero reserved bytes\n"); 410 return -EINVAL; 411 } 412 413 dev_dbg(dev, "vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n", 414 rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings); 415 416 /* we currently support only two vrings per rvdev */ 417 if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) { 418 dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings); 419 return -EINVAL; 420 } 421 422 rvdev = kzalloc(sizeof(struct rproc_vdev), GFP_KERNEL); 423 if (!rvdev) 424 return -ENOMEM; 425 426 rvdev->rproc = rproc; 427 428 /* allocate the vrings */ 429 for (i = 0; i < rsc->num_of_vrings; i++) { 430 ret = __rproc_handle_vring(rvdev, rsc, i); 431 if (ret) 432 goto free_vrings; 433 } 434 435 /* remember the device features */ 436 rvdev->dfeatures = rsc->dfeatures; 437 438 list_add_tail(&rvdev->node, &rproc->rvdevs); 439 440 /* it is now safe to add the virtio device */ 441 ret = rproc_add_virtio_dev(rvdev, rsc->id); 442 if (ret) 443 goto free_vrings; 444 445 return 0; 446 447 free_vrings: 448 __rproc_free_vrings(rvdev, i); 449 kfree(rvdev); 450 return ret; 451 } 452 453 /** 454 * rproc_handle_trace() - handle a shared trace buffer resource 455 * @rproc: the remote processor 456 * @rsc: the trace resource descriptor 457 * @avail: size of available data (for sanity checking the image) 458 * 459 * In case the remote processor dumps trace logs into memory, 460 * export it via debugfs. 461 * 462 * Currently, the 'da' member of @rsc should contain the device address 463 * where the remote processor is dumping the traces. Later we could also 464 * support dynamically allocating this address using the generic 465 * DMA API (but currently there isn't a use case for that). 466 * 467 * Returns 0 on success, or an appropriate error code otherwise 468 */ 469 static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, 470 int avail) 471 { 472 struct rproc_mem_entry *trace; 473 struct device *dev = rproc->dev; 474 void *ptr; 475 char name[15]; 476 477 if (sizeof(*rsc) > avail) { 478 dev_err(rproc->dev, "trace rsc is truncated\n"); 479 return -EINVAL; 480 } 481 482 /* make sure reserved bytes are zeroes */ 483 if (rsc->reserved) { 484 dev_err(dev, "trace rsc has non zero reserved bytes\n"); 485 return -EINVAL; 486 } 487 488 /* what's the kernel address of this resource ? */ 489 ptr = rproc_da_to_va(rproc, rsc->da, rsc->len); 490 if (!ptr) { 491 dev_err(dev, "erroneous trace resource entry\n"); 492 return -EINVAL; 493 } 494 495 trace = kzalloc(sizeof(*trace), GFP_KERNEL); 496 if (!trace) { 497 dev_err(dev, "kzalloc trace failed\n"); 498 return -ENOMEM; 499 } 500 501 /* set the trace buffer dma properties */ 502 trace->len = rsc->len; 503 trace->va = ptr; 504 505 /* make sure snprintf always null terminates, even if truncating */ 506 snprintf(name, sizeof(name), "trace%d", rproc->num_traces); 507 508 /* create the debugfs entry */ 509 trace->priv = rproc_create_trace_file(name, rproc, trace); 510 if (!trace->priv) { 511 trace->va = NULL; 512 kfree(trace); 513 return -EINVAL; 514 } 515 516 list_add_tail(&trace->node, &rproc->traces); 517 518 rproc->num_traces++; 519 520 dev_dbg(dev, "%s added: va %p, da 0x%x, len 0x%x\n", name, ptr, 521 rsc->da, rsc->len); 522 523 return 0; 524 } 525 526 /** 527 * rproc_handle_devmem() - handle devmem resource entry 528 * @rproc: remote processor handle 529 * @rsc: the devmem resource entry 530 * @avail: size of available data (for sanity checking the image) 531 * 532 * Remote processors commonly need to access certain on-chip peripherals. 533 * 534 * Some of these remote processors access memory via an iommu device, 535 * and might require us to configure their iommu before they can access 536 * the on-chip peripherals they need. 537 * 538 * This resource entry is a request to map such a peripheral device. 539 * 540 * These devmem entries will contain the physical address of the device in 541 * the 'pa' member. If a specific device address is expected, then 'da' will 542 * contain it (currently this is the only use case supported). 'len' will 543 * contain the size of the physical region we need to map. 544 * 545 * Currently we just "trust" those devmem entries to contain valid physical 546 * addresses, but this is going to change: we want the implementations to 547 * tell us ranges of physical addresses the firmware is allowed to request, 548 * and not allow firmwares to request access to physical addresses that 549 * are outside those ranges. 550 */ 551 static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc, 552 int avail) 553 { 554 struct rproc_mem_entry *mapping; 555 int ret; 556 557 /* no point in handling this resource without a valid iommu domain */ 558 if (!rproc->domain) 559 return -EINVAL; 560 561 if (sizeof(*rsc) > avail) { 562 dev_err(rproc->dev, "devmem rsc is truncated\n"); 563 return -EINVAL; 564 } 565 566 /* make sure reserved bytes are zeroes */ 567 if (rsc->reserved) { 568 dev_err(rproc->dev, "devmem rsc has non zero reserved bytes\n"); 569 return -EINVAL; 570 } 571 572 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 573 if (!mapping) { 574 dev_err(rproc->dev, "kzalloc mapping failed\n"); 575 return -ENOMEM; 576 } 577 578 ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags); 579 if (ret) { 580 dev_err(rproc->dev, "failed to map devmem: %d\n", ret); 581 goto out; 582 } 583 584 /* 585 * We'll need this info later when we'll want to unmap everything 586 * (e.g. on shutdown). 587 * 588 * We can't trust the remote processor not to change the resource 589 * table, so we must maintain this info independently. 590 */ 591 mapping->da = rsc->da; 592 mapping->len = rsc->len; 593 list_add_tail(&mapping->node, &rproc->mappings); 594 595 dev_dbg(rproc->dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n", 596 rsc->pa, rsc->da, rsc->len); 597 598 return 0; 599 600 out: 601 kfree(mapping); 602 return ret; 603 } 604 605 /** 606 * rproc_handle_carveout() - handle phys contig memory allocation requests 607 * @rproc: rproc handle 608 * @rsc: the resource entry 609 * @avail: size of available data (for image validation) 610 * 611 * This function will handle firmware requests for allocation of physically 612 * contiguous memory regions. 613 * 614 * These request entries should come first in the firmware's resource table, 615 * as other firmware entries might request placing other data objects inside 616 * these memory regions (e.g. data/code segments, trace resource entries, ...). 617 * 618 * Allocating memory this way helps utilizing the reserved physical memory 619 * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries 620 * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB 621 * pressure is important; it may have a substantial impact on performance. 622 */ 623 static int rproc_handle_carveout(struct rproc *rproc, 624 struct fw_rsc_carveout *rsc, int avail) 625 { 626 struct rproc_mem_entry *carveout, *mapping; 627 struct device *dev = rproc->dev; 628 dma_addr_t dma; 629 void *va; 630 int ret; 631 632 if (sizeof(*rsc) > avail) { 633 dev_err(rproc->dev, "carveout rsc is truncated\n"); 634 return -EINVAL; 635 } 636 637 /* make sure reserved bytes are zeroes */ 638 if (rsc->reserved) { 639 dev_err(dev, "carveout rsc has non zero reserved bytes\n"); 640 return -EINVAL; 641 } 642 643 dev_dbg(dev, "carveout rsc: da %x, pa %x, len %x, flags %x\n", 644 rsc->da, rsc->pa, rsc->len, rsc->flags); 645 646 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 647 if (!mapping) { 648 dev_err(dev, "kzalloc mapping failed\n"); 649 return -ENOMEM; 650 } 651 652 carveout = kzalloc(sizeof(*carveout), GFP_KERNEL); 653 if (!carveout) { 654 dev_err(dev, "kzalloc carveout failed\n"); 655 ret = -ENOMEM; 656 goto free_mapping; 657 } 658 659 va = dma_alloc_coherent(dev, rsc->len, &dma, GFP_KERNEL); 660 if (!va) { 661 dev_err(dev, "failed to dma alloc carveout: %d\n", rsc->len); 662 ret = -ENOMEM; 663 goto free_carv; 664 } 665 666 dev_dbg(dev, "carveout va %p, dma %x, len 0x%x\n", va, dma, rsc->len); 667 668 /* 669 * Ok, this is non-standard. 670 * 671 * Sometimes we can't rely on the generic iommu-based DMA API 672 * to dynamically allocate the device address and then set the IOMMU 673 * tables accordingly, because some remote processors might 674 * _require_ us to use hard coded device addresses that their 675 * firmware was compiled with. 676 * 677 * In this case, we must use the IOMMU API directly and map 678 * the memory to the device address as expected by the remote 679 * processor. 680 * 681 * Obviously such remote processor devices should not be configured 682 * to use the iommu-based DMA API: we expect 'dma' to contain the 683 * physical address in this case. 684 */ 685 if (rproc->domain) { 686 ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len, 687 rsc->flags); 688 if (ret) { 689 dev_err(dev, "iommu_map failed: %d\n", ret); 690 goto dma_free; 691 } 692 693 /* 694 * We'll need this info later when we'll want to unmap 695 * everything (e.g. on shutdown). 696 * 697 * We can't trust the remote processor not to change the 698 * resource table, so we must maintain this info independently. 699 */ 700 mapping->da = rsc->da; 701 mapping->len = rsc->len; 702 list_add_tail(&mapping->node, &rproc->mappings); 703 704 dev_dbg(dev, "carveout mapped 0x%x to 0x%x\n", rsc->da, dma); 705 706 /* 707 * Some remote processors might need to know the pa 708 * even though they are behind an IOMMU. E.g., OMAP4's 709 * remote M3 processor needs this so it can control 710 * on-chip hardware accelerators that are not behind 711 * the IOMMU, and therefor must know the pa. 712 * 713 * Generally we don't want to expose physical addresses 714 * if we don't have to (remote processors are generally 715 * _not_ trusted), so we might want to do this only for 716 * remote processor that _must_ have this (e.g. OMAP4's 717 * dual M3 subsystem). 718 */ 719 rsc->pa = dma; 720 } 721 722 carveout->va = va; 723 carveout->len = rsc->len; 724 carveout->dma = dma; 725 carveout->da = rsc->da; 726 727 list_add_tail(&carveout->node, &rproc->carveouts); 728 729 return 0; 730 731 dma_free: 732 dma_free_coherent(dev, rsc->len, va, dma); 733 free_carv: 734 kfree(carveout); 735 free_mapping: 736 kfree(mapping); 737 return ret; 738 } 739 740 /* 741 * A lookup table for resource handlers. The indices are defined in 742 * enum fw_resource_type. 743 */ 744 static rproc_handle_resource_t rproc_handle_rsc[] = { 745 [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout, 746 [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem, 747 [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace, 748 [RSC_VDEV] = NULL, /* VDEVs were handled upon registrarion */ 749 }; 750 751 /* handle firmware resource entries before booting the remote processor */ 752 static int 753 rproc_handle_boot_rsc(struct rproc *rproc, struct resource_table *table, int len) 754 { 755 struct device *dev = rproc->dev; 756 rproc_handle_resource_t handler; 757 int ret = 0, i; 758 759 for (i = 0; i < table->num; i++) { 760 int offset = table->offset[i]; 761 struct fw_rsc_hdr *hdr = (void *)table + offset; 762 int avail = len - offset - sizeof(*hdr); 763 void *rsc = (void *)hdr + sizeof(*hdr); 764 765 /* make sure table isn't truncated */ 766 if (avail < 0) { 767 dev_err(dev, "rsc table is truncated\n"); 768 return -EINVAL; 769 } 770 771 dev_dbg(dev, "rsc: type %d\n", hdr->type); 772 773 if (hdr->type >= RSC_LAST) { 774 dev_warn(dev, "unsupported resource %d\n", hdr->type); 775 continue; 776 } 777 778 handler = rproc_handle_rsc[hdr->type]; 779 if (!handler) 780 continue; 781 782 ret = handler(rproc, rsc, avail); 783 if (ret) 784 break; 785 } 786 787 return ret; 788 } 789 790 /* handle firmware resource entries while registering the remote processor */ 791 static int 792 rproc_handle_virtio_rsc(struct rproc *rproc, struct resource_table *table, int len) 793 { 794 struct device *dev = rproc->dev; 795 int ret = 0, i; 796 797 for (i = 0; i < table->num; i++) { 798 int offset = table->offset[i]; 799 struct fw_rsc_hdr *hdr = (void *)table + offset; 800 int avail = len - offset - sizeof(*hdr); 801 struct fw_rsc_vdev *vrsc; 802 803 /* make sure table isn't truncated */ 804 if (avail < 0) { 805 dev_err(dev, "rsc table is truncated\n"); 806 return -EINVAL; 807 } 808 809 dev_dbg(dev, "%s: rsc type %d\n", __func__, hdr->type); 810 811 if (hdr->type != RSC_VDEV) 812 continue; 813 814 vrsc = (struct fw_rsc_vdev *)hdr->data; 815 816 ret = rproc_handle_vdev(rproc, vrsc, avail); 817 if (ret) 818 break; 819 } 820 821 return ret; 822 } 823 824 /** 825 * rproc_find_rsc_table() - find the resource table 826 * @rproc: the rproc handle 827 * @elf_data: the content of the ELF firmware image 828 * @len: firmware size (in bytes) 829 * @tablesz: place holder for providing back the table size 830 * 831 * This function finds the resource table inside the remote processor's 832 * firmware. It is used both upon the registration of @rproc (in order 833 * to look for and register the supported virito devices), and when the 834 * @rproc is booted. 835 * 836 * Returns the pointer to the resource table if it is found, and write its 837 * size into @tablesz. If a valid table isn't found, NULL is returned 838 * (and @tablesz isn't set). 839 */ 840 static struct resource_table * 841 rproc_find_rsc_table(struct rproc *rproc, const u8 *elf_data, size_t len, 842 int *tablesz) 843 { 844 struct elf32_hdr *ehdr; 845 struct elf32_shdr *shdr; 846 const char *name_table; 847 struct device *dev = rproc->dev; 848 struct resource_table *table = NULL; 849 int i; 850 851 ehdr = (struct elf32_hdr *)elf_data; 852 shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff); 853 name_table = elf_data + shdr[ehdr->e_shstrndx].sh_offset; 854 855 /* look for the resource table and handle it */ 856 for (i = 0; i < ehdr->e_shnum; i++, shdr++) { 857 int size = shdr->sh_size; 858 int offset = shdr->sh_offset; 859 860 if (strcmp(name_table + shdr->sh_name, ".resource_table")) 861 continue; 862 863 table = (struct resource_table *)(elf_data + offset); 864 865 /* make sure we have the entire table */ 866 if (offset + size > len) { 867 dev_err(dev, "resource table truncated\n"); 868 return NULL; 869 } 870 871 /* make sure table has at least the header */ 872 if (sizeof(struct resource_table) > size) { 873 dev_err(dev, "header-less resource table\n"); 874 return NULL; 875 } 876 877 /* we don't support any version beyond the first */ 878 if (table->ver != 1) { 879 dev_err(dev, "unsupported fw ver: %d\n", table->ver); 880 return NULL; 881 } 882 883 /* make sure reserved bytes are zeroes */ 884 if (table->reserved[0] || table->reserved[1]) { 885 dev_err(dev, "non zero reserved bytes\n"); 886 return NULL; 887 } 888 889 /* make sure the offsets array isn't truncated */ 890 if (table->num * sizeof(table->offset[0]) + 891 sizeof(struct resource_table) > size) { 892 dev_err(dev, "resource table incomplete\n"); 893 return NULL; 894 } 895 896 *tablesz = shdr->sh_size; 897 break; 898 } 899 900 return table; 901 } 902 903 /** 904 * rproc_resource_cleanup() - clean up and free all acquired resources 905 * @rproc: rproc handle 906 * 907 * This function will free all resources acquired for @rproc, and it 908 * is called whenever @rproc either shuts down or fails to boot. 909 */ 910 static void rproc_resource_cleanup(struct rproc *rproc) 911 { 912 struct rproc_mem_entry *entry, *tmp; 913 struct device *dev = rproc->dev; 914 915 /* clean up debugfs trace entries */ 916 list_for_each_entry_safe(entry, tmp, &rproc->traces, node) { 917 rproc_remove_trace_file(entry->priv); 918 rproc->num_traces--; 919 list_del(&entry->node); 920 kfree(entry); 921 } 922 923 /* clean up carveout allocations */ 924 list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) { 925 dma_free_coherent(dev, entry->len, entry->va, entry->dma); 926 list_del(&entry->node); 927 kfree(entry); 928 } 929 930 /* clean up iommu mapping entries */ 931 list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) { 932 size_t unmapped; 933 934 unmapped = iommu_unmap(rproc->domain, entry->da, entry->len); 935 if (unmapped != entry->len) { 936 /* nothing much to do besides complaining */ 937 dev_err(dev, "failed to unmap %u/%zu\n", entry->len, 938 unmapped); 939 } 940 941 list_del(&entry->node); 942 kfree(entry); 943 } 944 } 945 946 /* make sure this fw image is sane */ 947 static int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw) 948 { 949 const char *name = rproc->firmware; 950 struct device *dev = rproc->dev; 951 struct elf32_hdr *ehdr; 952 char class; 953 954 if (!fw) { 955 dev_err(dev, "failed to load %s\n", name); 956 return -EINVAL; 957 } 958 959 if (fw->size < sizeof(struct elf32_hdr)) { 960 dev_err(dev, "Image is too small\n"); 961 return -EINVAL; 962 } 963 964 ehdr = (struct elf32_hdr *)fw->data; 965 966 /* We only support ELF32 at this point */ 967 class = ehdr->e_ident[EI_CLASS]; 968 if (class != ELFCLASS32) { 969 dev_err(dev, "Unsupported class: %d\n", class); 970 return -EINVAL; 971 } 972 973 /* We assume the firmware has the same endianess as the host */ 974 # ifdef __LITTLE_ENDIAN 975 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) { 976 # else /* BIG ENDIAN */ 977 if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB) { 978 # endif 979 dev_err(dev, "Unsupported firmware endianess\n"); 980 return -EINVAL; 981 } 982 983 if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) { 984 dev_err(dev, "Image is too small\n"); 985 return -EINVAL; 986 } 987 988 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) { 989 dev_err(dev, "Image is corrupted (bad magic)\n"); 990 return -EINVAL; 991 } 992 993 if (ehdr->e_phnum == 0) { 994 dev_err(dev, "No loadable segments\n"); 995 return -EINVAL; 996 } 997 998 if (ehdr->e_phoff > fw->size) { 999 dev_err(dev, "Firmware size is too small\n"); 1000 return -EINVAL; 1001 } 1002 1003 return 0; 1004 } 1005 1006 /* 1007 * take a firmware and boot a remote processor with it. 1008 */ 1009 static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) 1010 { 1011 struct device *dev = rproc->dev; 1012 const char *name = rproc->firmware; 1013 struct elf32_hdr *ehdr; 1014 struct resource_table *table; 1015 int ret, tablesz; 1016 1017 ret = rproc_fw_sanity_check(rproc, fw); 1018 if (ret) 1019 return ret; 1020 1021 ehdr = (struct elf32_hdr *)fw->data; 1022 1023 dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size); 1024 1025 /* 1026 * if enabling an IOMMU isn't relevant for this rproc, this is 1027 * just a nop 1028 */ 1029 ret = rproc_enable_iommu(rproc); 1030 if (ret) { 1031 dev_err(dev, "can't enable iommu: %d\n", ret); 1032 return ret; 1033 } 1034 1035 /* 1036 * The ELF entry point is the rproc's boot addr (though this is not 1037 * a configurable property of all remote processors: some will always 1038 * boot at a specific hardcoded address). 1039 */ 1040 rproc->bootaddr = ehdr->e_entry; 1041 1042 /* look for the resource table */ 1043 table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz); 1044 if (!table) { 1045 ret = -EINVAL; 1046 goto clean_up; 1047 } 1048 1049 /* handle fw resources which are required to boot rproc */ 1050 ret = rproc_handle_boot_rsc(rproc, table, tablesz); 1051 if (ret) { 1052 dev_err(dev, "Failed to process resources: %d\n", ret); 1053 goto clean_up; 1054 } 1055 1056 /* load the ELF segments to memory */ 1057 ret = rproc_load_segments(rproc, fw->data, fw->size); 1058 if (ret) { 1059 dev_err(dev, "Failed to load program segments: %d\n", ret); 1060 goto clean_up; 1061 } 1062 1063 /* power up the remote processor */ 1064 ret = rproc->ops->start(rproc); 1065 if (ret) { 1066 dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret); 1067 goto clean_up; 1068 } 1069 1070 rproc->state = RPROC_RUNNING; 1071 1072 dev_info(dev, "remote processor %s is now up\n", rproc->name); 1073 1074 return 0; 1075 1076 clean_up: 1077 rproc_resource_cleanup(rproc); 1078 rproc_disable_iommu(rproc); 1079 return ret; 1080 } 1081 1082 /* 1083 * take a firmware and look for virtio devices to register. 1084 * 1085 * Note: this function is called asynchronously upon registration of the 1086 * remote processor (so we must wait until it completes before we try 1087 * to unregister the device. one other option is just to use kref here, 1088 * that might be cleaner). 1089 */ 1090 static void rproc_fw_config_virtio(const struct firmware *fw, void *context) 1091 { 1092 struct rproc *rproc = context; 1093 struct resource_table *table; 1094 int ret, tablesz; 1095 1096 if (rproc_fw_sanity_check(rproc, fw) < 0) 1097 goto out; 1098 1099 /* look for the resource table */ 1100 table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz); 1101 if (!table) 1102 goto out; 1103 1104 /* look for virtio devices and register them */ 1105 ret = rproc_handle_virtio_rsc(rproc, table, tablesz); 1106 if (ret) 1107 goto out; 1108 1109 out: 1110 release_firmware(fw); 1111 /* allow rproc_unregister() contexts, if any, to proceed */ 1112 complete_all(&rproc->firmware_loading_complete); 1113 } 1114 1115 /** 1116 * rproc_boot() - boot a remote processor 1117 * @rproc: handle of a remote processor 1118 * 1119 * Boot a remote processor (i.e. load its firmware, power it on, ...). 1120 * 1121 * If the remote processor is already powered on, this function immediately 1122 * returns (successfully). 1123 * 1124 * Returns 0 on success, and an appropriate error value otherwise. 1125 */ 1126 int rproc_boot(struct rproc *rproc) 1127 { 1128 const struct firmware *firmware_p; 1129 struct device *dev; 1130 int ret; 1131 1132 if (!rproc) { 1133 pr_err("invalid rproc handle\n"); 1134 return -EINVAL; 1135 } 1136 1137 dev = rproc->dev; 1138 1139 ret = mutex_lock_interruptible(&rproc->lock); 1140 if (ret) { 1141 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); 1142 return ret; 1143 } 1144 1145 /* loading a firmware is required */ 1146 if (!rproc->firmware) { 1147 dev_err(dev, "%s: no firmware to load\n", __func__); 1148 ret = -EINVAL; 1149 goto unlock_mutex; 1150 } 1151 1152 /* prevent underlying implementation from being removed */ 1153 if (!try_module_get(dev->driver->owner)) { 1154 dev_err(dev, "%s: can't get owner\n", __func__); 1155 ret = -EINVAL; 1156 goto unlock_mutex; 1157 } 1158 1159 /* skip the boot process if rproc is already powered up */ 1160 if (atomic_inc_return(&rproc->power) > 1) { 1161 ret = 0; 1162 goto unlock_mutex; 1163 } 1164 1165 dev_info(dev, "powering up %s\n", rproc->name); 1166 1167 /* load firmware */ 1168 ret = request_firmware(&firmware_p, rproc->firmware, dev); 1169 if (ret < 0) { 1170 dev_err(dev, "request_firmware failed: %d\n", ret); 1171 goto downref_rproc; 1172 } 1173 1174 ret = rproc_fw_boot(rproc, firmware_p); 1175 1176 release_firmware(firmware_p); 1177 1178 downref_rproc: 1179 if (ret) { 1180 module_put(dev->driver->owner); 1181 atomic_dec(&rproc->power); 1182 } 1183 unlock_mutex: 1184 mutex_unlock(&rproc->lock); 1185 return ret; 1186 } 1187 EXPORT_SYMBOL(rproc_boot); 1188 1189 /** 1190 * rproc_shutdown() - power off the remote processor 1191 * @rproc: the remote processor 1192 * 1193 * Power off a remote processor (previously booted with rproc_boot()). 1194 * 1195 * In case @rproc is still being used by an additional user(s), then 1196 * this function will just decrement the power refcount and exit, 1197 * without really powering off the device. 1198 * 1199 * Every call to rproc_boot() must (eventually) be accompanied by a call 1200 * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug. 1201 * 1202 * Notes: 1203 * - we're not decrementing the rproc's refcount, only the power refcount. 1204 * which means that the @rproc handle stays valid even after rproc_shutdown() 1205 * returns, and users can still use it with a subsequent rproc_boot(), if 1206 * needed. 1207 * - don't call rproc_shutdown() to unroll rproc_get_by_name(), exactly 1208 * because rproc_shutdown() _does not_ decrement the refcount of @rproc. 1209 * To decrement the refcount of @rproc, use rproc_put() (but _only_ if 1210 * you acquired @rproc using rproc_get_by_name()). 1211 */ 1212 void rproc_shutdown(struct rproc *rproc) 1213 { 1214 struct device *dev = rproc->dev; 1215 int ret; 1216 1217 ret = mutex_lock_interruptible(&rproc->lock); 1218 if (ret) { 1219 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); 1220 return; 1221 } 1222 1223 /* if the remote proc is still needed, bail out */ 1224 if (!atomic_dec_and_test(&rproc->power)) 1225 goto out; 1226 1227 /* power off the remote processor */ 1228 ret = rproc->ops->stop(rproc); 1229 if (ret) { 1230 atomic_inc(&rproc->power); 1231 dev_err(dev, "can't stop rproc: %d\n", ret); 1232 goto out; 1233 } 1234 1235 /* clean up all acquired resources */ 1236 rproc_resource_cleanup(rproc); 1237 1238 rproc_disable_iommu(rproc); 1239 1240 rproc->state = RPROC_OFFLINE; 1241 1242 dev_info(dev, "stopped remote processor %s\n", rproc->name); 1243 1244 out: 1245 mutex_unlock(&rproc->lock); 1246 if (!ret) 1247 module_put(dev->driver->owner); 1248 } 1249 EXPORT_SYMBOL(rproc_shutdown); 1250 1251 /** 1252 * rproc_release() - completely deletes the existence of a remote processor 1253 * @kref: the rproc's kref 1254 * 1255 * This function should _never_ be called directly. 1256 * 1257 * The only reasonable location to use it is as an argument when kref_put'ing 1258 * @rproc's refcount. 1259 * 1260 * This way it will be called when no one holds a valid pointer to this @rproc 1261 * anymore (and obviously after it is removed from the rprocs klist). 1262 * 1263 * Note: this function is not static because rproc_vdev_release() needs it when 1264 * it decrements @rproc's refcount. 1265 */ 1266 void rproc_release(struct kref *kref) 1267 { 1268 struct rproc *rproc = container_of(kref, struct rproc, refcount); 1269 struct rproc_vdev *rvdev, *rvtmp; 1270 1271 dev_info(rproc->dev, "removing %s\n", rproc->name); 1272 1273 rproc_delete_debug_dir(rproc); 1274 1275 /* clean up remote vdev entries */ 1276 list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) { 1277 __rproc_free_vrings(rvdev, RVDEV_NUM_VRINGS); 1278 list_del(&rvdev->node); 1279 } 1280 1281 /* 1282 * At this point no one holds a reference to rproc anymore, 1283 * so we can directly unroll rproc_alloc() 1284 */ 1285 rproc_free(rproc); 1286 } 1287 1288 /* will be called when an rproc is added to the rprocs klist */ 1289 static void klist_rproc_get(struct klist_node *n) 1290 { 1291 struct rproc *rproc = container_of(n, struct rproc, node); 1292 1293 kref_get(&rproc->refcount); 1294 } 1295 1296 /* will be called when an rproc is removed from the rprocs klist */ 1297 static void klist_rproc_put(struct klist_node *n) 1298 { 1299 struct rproc *rproc = container_of(n, struct rproc, node); 1300 1301 kref_put(&rproc->refcount, rproc_release); 1302 } 1303 1304 static struct rproc *next_rproc(struct klist_iter *i) 1305 { 1306 struct klist_node *n; 1307 1308 n = klist_next(i); 1309 if (!n) 1310 return NULL; 1311 1312 return container_of(n, struct rproc, node); 1313 } 1314 1315 /** 1316 * rproc_get_by_name() - find a remote processor by name and boot it 1317 * @name: name of the remote processor 1318 * 1319 * Finds an rproc handle using the remote processor's name, and then 1320 * boot it. If it's already powered on, then just immediately return 1321 * (successfully). 1322 * 1323 * Returns the rproc handle on success, and NULL on failure. 1324 * 1325 * This function increments the remote processor's refcount, so always 1326 * use rproc_put() to decrement it back once rproc isn't needed anymore. 1327 * 1328 * Note: currently this function (and its counterpart rproc_put()) are not 1329 * being used. We need to scrutinize the use cases 1330 * that still need them, and see if we can migrate them to use the non 1331 * name-based boot/shutdown interface. 1332 */ 1333 struct rproc *rproc_get_by_name(const char *name) 1334 { 1335 struct rproc *rproc; 1336 struct klist_iter i; 1337 int ret; 1338 1339 /* find the remote processor, and upref its refcount */ 1340 klist_iter_init(&rprocs, &i); 1341 while ((rproc = next_rproc(&i)) != NULL) 1342 if (!strcmp(rproc->name, name)) { 1343 kref_get(&rproc->refcount); 1344 break; 1345 } 1346 klist_iter_exit(&i); 1347 1348 /* can't find this rproc ? */ 1349 if (!rproc) { 1350 pr_err("can't find remote processor %s\n", name); 1351 return NULL; 1352 } 1353 1354 ret = rproc_boot(rproc); 1355 if (ret < 0) { 1356 kref_put(&rproc->refcount, rproc_release); 1357 return NULL; 1358 } 1359 1360 return rproc; 1361 } 1362 EXPORT_SYMBOL(rproc_get_by_name); 1363 1364 /** 1365 * rproc_put() - decrement the refcount of a remote processor, and shut it down 1366 * @rproc: the remote processor 1367 * 1368 * This function tries to shutdown @rproc, and it then decrements its 1369 * refcount. 1370 * 1371 * After this function returns, @rproc may _not_ be used anymore, and its 1372 * handle should be considered invalid. 1373 * 1374 * This function should be called _iff_ the @rproc handle was grabbed by 1375 * calling rproc_get_by_name(). 1376 */ 1377 void rproc_put(struct rproc *rproc) 1378 { 1379 /* try to power off the remote processor */ 1380 rproc_shutdown(rproc); 1381 1382 /* downref rproc's refcount */ 1383 kref_put(&rproc->refcount, rproc_release); 1384 } 1385 EXPORT_SYMBOL(rproc_put); 1386 1387 /** 1388 * rproc_register() - register a remote processor 1389 * @rproc: the remote processor handle to register 1390 * 1391 * Registers @rproc with the remoteproc framework, after it has been 1392 * allocated with rproc_alloc(). 1393 * 1394 * This is called by the platform-specific rproc implementation, whenever 1395 * a new remote processor device is probed. 1396 * 1397 * Returns 0 on success and an appropriate error code otherwise. 1398 * 1399 * Note: this function initiates an asynchronous firmware loading 1400 * context, which will look for virtio devices supported by the rproc's 1401 * firmware. 1402 * 1403 * If found, those virtio devices will be created and added, so as a result 1404 * of registering this remote processor, additional virtio drivers might be 1405 * probed. 1406 */ 1407 int rproc_register(struct rproc *rproc) 1408 { 1409 struct device *dev = rproc->dev; 1410 int ret = 0; 1411 1412 /* expose to rproc_get_by_name users */ 1413 klist_add_tail(&rproc->node, &rprocs); 1414 1415 dev_info(rproc->dev, "%s is available\n", rproc->name); 1416 1417 dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n"); 1418 dev_info(dev, "THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.\n"); 1419 1420 /* create debugfs entries */ 1421 rproc_create_debug_dir(rproc); 1422 1423 /* rproc_unregister() calls must wait until async loader completes */ 1424 init_completion(&rproc->firmware_loading_complete); 1425 1426 /* 1427 * We must retrieve early virtio configuration info from 1428 * the firmware (e.g. whether to register a virtio device, 1429 * what virtio features does it support, ...). 1430 * 1431 * We're initiating an asynchronous firmware loading, so we can 1432 * be built-in kernel code, without hanging the boot process. 1433 */ 1434 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 1435 rproc->firmware, dev, GFP_KERNEL, 1436 rproc, rproc_fw_config_virtio); 1437 if (ret < 0) { 1438 dev_err(dev, "request_firmware_nowait failed: %d\n", ret); 1439 complete_all(&rproc->firmware_loading_complete); 1440 klist_remove(&rproc->node); 1441 } 1442 1443 return ret; 1444 } 1445 EXPORT_SYMBOL(rproc_register); 1446 1447 /** 1448 * rproc_alloc() - allocate a remote processor handle 1449 * @dev: the underlying device 1450 * @name: name of this remote processor 1451 * @ops: platform-specific handlers (mainly start/stop) 1452 * @firmware: name of firmware file to load 1453 * @len: length of private data needed by the rproc driver (in bytes) 1454 * 1455 * Allocates a new remote processor handle, but does not register 1456 * it yet. 1457 * 1458 * This function should be used by rproc implementations during initialization 1459 * of the remote processor. 1460 * 1461 * After creating an rproc handle using this function, and when ready, 1462 * implementations should then call rproc_register() to complete 1463 * the registration of the remote processor. 1464 * 1465 * On success the new rproc is returned, and on failure, NULL. 1466 * 1467 * Note: _never_ directly deallocate @rproc, even if it was not registered 1468 * yet. Instead, if you just need to unroll rproc_alloc(), use rproc_free(). 1469 */ 1470 struct rproc *rproc_alloc(struct device *dev, const char *name, 1471 const struct rproc_ops *ops, 1472 const char *firmware, int len) 1473 { 1474 struct rproc *rproc; 1475 1476 if (!dev || !name || !ops) 1477 return NULL; 1478 1479 rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL); 1480 if (!rproc) { 1481 dev_err(dev, "%s: kzalloc failed\n", __func__); 1482 return NULL; 1483 } 1484 1485 rproc->dev = dev; 1486 rproc->name = name; 1487 rproc->ops = ops; 1488 rproc->firmware = firmware; 1489 rproc->priv = &rproc[1]; 1490 1491 atomic_set(&rproc->power, 0); 1492 1493 kref_init(&rproc->refcount); 1494 1495 mutex_init(&rproc->lock); 1496 1497 idr_init(&rproc->notifyids); 1498 1499 INIT_LIST_HEAD(&rproc->carveouts); 1500 INIT_LIST_HEAD(&rproc->mappings); 1501 INIT_LIST_HEAD(&rproc->traces); 1502 INIT_LIST_HEAD(&rproc->rvdevs); 1503 1504 rproc->state = RPROC_OFFLINE; 1505 1506 return rproc; 1507 } 1508 EXPORT_SYMBOL(rproc_alloc); 1509 1510 /** 1511 * rproc_free() - free an rproc handle that was allocated by rproc_alloc 1512 * @rproc: the remote processor handle 1513 * 1514 * This function should _only_ be used if @rproc was only allocated, 1515 * but not registered yet. 1516 * 1517 * If @rproc was already successfully registered (by calling rproc_register()), 1518 * then use rproc_unregister() instead. 1519 */ 1520 void rproc_free(struct rproc *rproc) 1521 { 1522 idr_remove_all(&rproc->notifyids); 1523 idr_destroy(&rproc->notifyids); 1524 1525 kfree(rproc); 1526 } 1527 EXPORT_SYMBOL(rproc_free); 1528 1529 /** 1530 * rproc_unregister() - unregister a remote processor 1531 * @rproc: rproc handle to unregister 1532 * 1533 * Unregisters a remote processor, and decrements its refcount. 1534 * If its refcount drops to zero, then @rproc will be freed. If not, 1535 * it will be freed later once the last reference is dropped. 1536 * 1537 * This function should be called when the platform specific rproc 1538 * implementation decides to remove the rproc device. it should 1539 * _only_ be called if a previous invocation of rproc_register() 1540 * has completed successfully. 1541 * 1542 * After rproc_unregister() returns, @rproc is _not_ valid anymore and 1543 * it shouldn't be used. More specifically, don't call rproc_free() 1544 * or try to directly free @rproc after rproc_unregister() returns; 1545 * none of these are needed, and calling them is a bug. 1546 * 1547 * Returns 0 on success and -EINVAL if @rproc isn't valid. 1548 */ 1549 int rproc_unregister(struct rproc *rproc) 1550 { 1551 struct rproc_vdev *rvdev; 1552 1553 if (!rproc) 1554 return -EINVAL; 1555 1556 /* if rproc is just being registered, wait */ 1557 wait_for_completion(&rproc->firmware_loading_complete); 1558 1559 /* clean up remote vdev entries */ 1560 list_for_each_entry(rvdev, &rproc->rvdevs, node) 1561 rproc_remove_virtio_dev(rvdev); 1562 1563 /* the rproc is downref'ed as soon as it's removed from the klist */ 1564 klist_del(&rproc->node); 1565 1566 /* the rproc will only be released after its refcount drops to zero */ 1567 kref_put(&rproc->refcount, rproc_release); 1568 1569 return 0; 1570 } 1571 EXPORT_SYMBOL(rproc_unregister); 1572 1573 static int __init remoteproc_init(void) 1574 { 1575 rproc_init_debugfs(); 1576 return 0; 1577 } 1578 module_init(remoteproc_init); 1579 1580 static void __exit remoteproc_exit(void) 1581 { 1582 rproc_exit_debugfs(); 1583 } 1584 module_exit(remoteproc_exit); 1585 1586 MODULE_LICENSE("GPL v2"); 1587 MODULE_DESCRIPTION("Generic Remote Processor Framework"); 1588