1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. 3 // Copyright (c) 2018, Linaro Limited 4 5 #include <linux/completion.h> 6 #include <linux/device.h> 7 #include <linux/dma-buf.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/idr.h> 10 #include <linux/list.h> 11 #include <linux/miscdevice.h> 12 #include <linux/module.h> 13 #include <linux/of_address.h> 14 #include <linux/of.h> 15 #include <linux/sort.h> 16 #include <linux/of_platform.h> 17 #include <linux/rpmsg.h> 18 #include <linux/scatterlist.h> 19 #include <linux/slab.h> 20 #include <uapi/misc/fastrpc.h> 21 22 #define ADSP_DOMAIN_ID (0) 23 #define MDSP_DOMAIN_ID (1) 24 #define SDSP_DOMAIN_ID (2) 25 #define CDSP_DOMAIN_ID (3) 26 #define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/ 27 #define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/ 28 #define FASTRPC_ALIGN 128 29 #define FASTRPC_MAX_FDLIST 16 30 #define FASTRPC_MAX_CRCLIST 64 31 #define FASTRPC_PHYS(p) ((p) & 0xffffffff) 32 #define FASTRPC_CTX_MAX (256) 33 #define FASTRPC_INIT_HANDLE 1 34 #define FASTRPC_CTXID_MASK (0xFF0) 35 #define INIT_FILELEN_MAX (64 * 1024 * 1024) 36 #define INIT_MEMLEN_MAX (8 * 1024 * 1024) 37 #define FASTRPC_DEVICE_NAME "fastrpc" 38 39 /* Retrives number of input buffers from the scalars parameter */ 40 #define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff) 41 42 /* Retrives number of output buffers from the scalars parameter */ 43 #define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff) 44 45 /* Retrives number of input handles from the scalars parameter */ 46 #define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f) 47 48 /* Retrives number of output handles from the scalars parameter */ 49 #define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f) 50 51 #define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \ 52 REMOTE_SCALARS_OUTBUFS(sc) + \ 53 REMOTE_SCALARS_INHANDLES(sc)+ \ 54 REMOTE_SCALARS_OUTHANDLES(sc)) 55 #define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \ 56 (((attr & 0x07) << 29) | \ 57 ((method & 0x1f) << 24) | \ 58 ((in & 0xff) << 16) | \ 59 ((out & 0xff) << 8) | \ 60 ((oin & 0x0f) << 4) | \ 61 (oout & 0x0f)) 62 63 #define FASTRPC_SCALARS(method, in, out) \ 64 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0) 65 66 #define FASTRPC_CREATE_PROCESS_NARGS 6 67 /* Remote Method id table */ 68 #define FASTRPC_RMID_INIT_ATTACH 0 69 #define FASTRPC_RMID_INIT_RELEASE 1 70 #define FASTRPC_RMID_INIT_CREATE 6 71 #define FASTRPC_RMID_INIT_CREATE_ATTR 7 72 #define FASTRPC_RMID_INIT_CREATE_STATIC 8 73 74 #define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev) 75 76 static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp", 77 "sdsp", "cdsp"}; 78 struct fastrpc_phy_page { 79 u64 addr; /* physical address */ 80 u64 size; /* size of contiguous region */ 81 }; 82 83 struct fastrpc_invoke_buf { 84 u32 num; /* number of contiguous regions */ 85 u32 pgidx; /* index to start of contiguous region */ 86 }; 87 88 struct fastrpc_remote_arg { 89 u64 pv; 90 u64 len; 91 }; 92 93 struct fastrpc_msg { 94 int pid; /* process group id */ 95 int tid; /* thread id */ 96 u64 ctx; /* invoke caller context */ 97 u32 handle; /* handle to invoke */ 98 u32 sc; /* scalars structure describing the data */ 99 u64 addr; /* physical address */ 100 u64 size; /* size of contiguous region */ 101 }; 102 103 struct fastrpc_invoke_rsp { 104 u64 ctx; /* invoke caller context */ 105 int retval; /* invoke return value */ 106 }; 107 108 struct fastrpc_buf_overlap { 109 u64 start; 110 u64 end; 111 int raix; 112 u64 mstart; 113 u64 mend; 114 u64 offset; 115 }; 116 117 struct fastrpc_buf { 118 struct fastrpc_user *fl; 119 struct dma_buf *dmabuf; 120 struct device *dev; 121 void *virt; 122 u64 phys; 123 u64 size; 124 /* Lock for dma buf attachments */ 125 struct mutex lock; 126 struct list_head attachments; 127 }; 128 129 struct fastrpc_dma_buf_attachment { 130 struct device *dev; 131 struct sg_table sgt; 132 struct list_head node; 133 }; 134 135 struct fastrpc_map { 136 struct list_head node; 137 struct fastrpc_user *fl; 138 int fd; 139 struct dma_buf *buf; 140 struct sg_table *table; 141 struct dma_buf_attachment *attach; 142 u64 phys; 143 u64 size; 144 void *va; 145 u64 len; 146 struct kref refcount; 147 }; 148 149 struct fastrpc_invoke_ctx { 150 int nscalars; 151 int nbufs; 152 int retval; 153 int pid; 154 int tgid; 155 u32 sc; 156 u32 *crc; 157 u64 ctxid; 158 u64 msg_sz; 159 struct kref refcount; 160 struct list_head node; /* list of ctxs */ 161 struct completion work; 162 struct work_struct put_work; 163 struct fastrpc_msg msg; 164 struct fastrpc_user *fl; 165 struct fastrpc_remote_arg *rpra; 166 struct fastrpc_map **maps; 167 struct fastrpc_buf *buf; 168 struct fastrpc_invoke_args *args; 169 struct fastrpc_buf_overlap *olaps; 170 struct fastrpc_channel_ctx *cctx; 171 }; 172 173 struct fastrpc_session_ctx { 174 struct device *dev; 175 int sid; 176 bool used; 177 bool valid; 178 }; 179 180 struct fastrpc_channel_ctx { 181 int domain_id; 182 int sesscount; 183 struct rpmsg_device *rpdev; 184 struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS]; 185 spinlock_t lock; 186 struct idr ctx_idr; 187 struct list_head users; 188 struct miscdevice miscdev; 189 }; 190 191 struct fastrpc_user { 192 struct list_head user; 193 struct list_head maps; 194 struct list_head pending; 195 196 struct fastrpc_channel_ctx *cctx; 197 struct fastrpc_session_ctx *sctx; 198 struct fastrpc_buf *init_mem; 199 200 int tgid; 201 int pd; 202 /* Lock for lists */ 203 spinlock_t lock; 204 /* lock for allocations */ 205 struct mutex mutex; 206 }; 207 208 static void fastrpc_free_map(struct kref *ref) 209 { 210 struct fastrpc_map *map; 211 212 map = container_of(ref, struct fastrpc_map, refcount); 213 214 if (map->table) { 215 dma_buf_unmap_attachment(map->attach, map->table, 216 DMA_BIDIRECTIONAL); 217 dma_buf_detach(map->buf, map->attach); 218 dma_buf_put(map->buf); 219 } 220 221 kfree(map); 222 } 223 224 static void fastrpc_map_put(struct fastrpc_map *map) 225 { 226 if (map) 227 kref_put(&map->refcount, fastrpc_free_map); 228 } 229 230 static void fastrpc_map_get(struct fastrpc_map *map) 231 { 232 if (map) 233 kref_get(&map->refcount); 234 } 235 236 static int fastrpc_map_find(struct fastrpc_user *fl, int fd, 237 struct fastrpc_map **ppmap) 238 { 239 struct fastrpc_map *map = NULL; 240 241 mutex_lock(&fl->mutex); 242 list_for_each_entry(map, &fl->maps, node) { 243 if (map->fd == fd) { 244 fastrpc_map_get(map); 245 *ppmap = map; 246 mutex_unlock(&fl->mutex); 247 return 0; 248 } 249 } 250 mutex_unlock(&fl->mutex); 251 252 return -ENOENT; 253 } 254 255 static void fastrpc_buf_free(struct fastrpc_buf *buf) 256 { 257 dma_free_coherent(buf->dev, buf->size, buf->virt, 258 FASTRPC_PHYS(buf->phys)); 259 kfree(buf); 260 } 261 262 static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, 263 u64 size, struct fastrpc_buf **obuf) 264 { 265 struct fastrpc_buf *buf; 266 267 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 268 if (!buf) 269 return -ENOMEM; 270 271 INIT_LIST_HEAD(&buf->attachments); 272 mutex_init(&buf->lock); 273 274 buf->fl = fl; 275 buf->virt = NULL; 276 buf->phys = 0; 277 buf->size = size; 278 buf->dev = dev; 279 280 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys, 281 GFP_KERNEL); 282 if (!buf->virt) { 283 mutex_destroy(&buf->lock); 284 kfree(buf); 285 return -ENOMEM; 286 } 287 288 if (fl->sctx && fl->sctx->sid) 289 buf->phys += ((u64)fl->sctx->sid << 32); 290 291 *obuf = buf; 292 293 return 0; 294 } 295 296 static void fastrpc_context_free(struct kref *ref) 297 { 298 struct fastrpc_invoke_ctx *ctx; 299 struct fastrpc_channel_ctx *cctx; 300 unsigned long flags; 301 int i; 302 303 ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount); 304 cctx = ctx->cctx; 305 306 for (i = 0; i < ctx->nscalars; i++) 307 fastrpc_map_put(ctx->maps[i]); 308 309 if (ctx->buf) 310 fastrpc_buf_free(ctx->buf); 311 312 spin_lock_irqsave(&cctx->lock, flags); 313 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4); 314 spin_unlock_irqrestore(&cctx->lock, flags); 315 316 kfree(ctx->maps); 317 kfree(ctx->olaps); 318 kfree(ctx); 319 } 320 321 static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx) 322 { 323 kref_get(&ctx->refcount); 324 } 325 326 static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx) 327 { 328 kref_put(&ctx->refcount, fastrpc_context_free); 329 } 330 331 static void fastrpc_context_put_wq(struct work_struct *work) 332 { 333 struct fastrpc_invoke_ctx *ctx = 334 container_of(work, struct fastrpc_invoke_ctx, put_work); 335 336 fastrpc_context_put(ctx); 337 } 338 339 #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1) 340 static int olaps_cmp(const void *a, const void *b) 341 { 342 struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a; 343 struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b; 344 /* sort with lowest starting buffer first */ 345 int st = CMP(pa->start, pb->start); 346 /* sort with highest ending buffer first */ 347 int ed = CMP(pb->end, pa->end); 348 349 return st == 0 ? ed : st; 350 } 351 352 static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx) 353 { 354 u64 max_end = 0; 355 int i; 356 357 for (i = 0; i < ctx->nbufs; ++i) { 358 ctx->olaps[i].start = ctx->args[i].ptr; 359 ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length; 360 ctx->olaps[i].raix = i; 361 } 362 363 sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL); 364 365 for (i = 0; i < ctx->nbufs; ++i) { 366 /* Falling inside previous range */ 367 if (ctx->olaps[i].start < max_end) { 368 ctx->olaps[i].mstart = max_end; 369 ctx->olaps[i].mend = ctx->olaps[i].end; 370 ctx->olaps[i].offset = max_end - ctx->olaps[i].start; 371 372 if (ctx->olaps[i].end > max_end) { 373 max_end = ctx->olaps[i].end; 374 } else { 375 ctx->olaps[i].mend = 0; 376 ctx->olaps[i].mstart = 0; 377 } 378 379 } else { 380 ctx->olaps[i].mend = ctx->olaps[i].end; 381 ctx->olaps[i].mstart = ctx->olaps[i].start; 382 ctx->olaps[i].offset = 0; 383 max_end = ctx->olaps[i].end; 384 } 385 } 386 } 387 388 static struct fastrpc_invoke_ctx *fastrpc_context_alloc( 389 struct fastrpc_user *user, u32 kernel, u32 sc, 390 struct fastrpc_invoke_args *args) 391 { 392 struct fastrpc_channel_ctx *cctx = user->cctx; 393 struct fastrpc_invoke_ctx *ctx = NULL; 394 unsigned long flags; 395 int ret; 396 397 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 398 if (!ctx) 399 return ERR_PTR(-ENOMEM); 400 401 INIT_LIST_HEAD(&ctx->node); 402 ctx->fl = user; 403 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc); 404 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) + 405 REMOTE_SCALARS_OUTBUFS(sc); 406 407 if (ctx->nscalars) { 408 ctx->maps = kcalloc(ctx->nscalars, 409 sizeof(*ctx->maps), GFP_KERNEL); 410 if (!ctx->maps) { 411 kfree(ctx); 412 return ERR_PTR(-ENOMEM); 413 } 414 ctx->olaps = kcalloc(ctx->nscalars, 415 sizeof(*ctx->olaps), GFP_KERNEL); 416 if (!ctx->olaps) { 417 kfree(ctx->maps); 418 kfree(ctx); 419 return ERR_PTR(-ENOMEM); 420 } 421 ctx->args = args; 422 fastrpc_get_buff_overlaps(ctx); 423 } 424 425 ctx->sc = sc; 426 ctx->retval = -1; 427 ctx->pid = current->pid; 428 ctx->tgid = user->tgid; 429 ctx->cctx = cctx; 430 init_completion(&ctx->work); 431 INIT_WORK(&ctx->put_work, fastrpc_context_put_wq); 432 433 spin_lock(&user->lock); 434 list_add_tail(&ctx->node, &user->pending); 435 spin_unlock(&user->lock); 436 437 spin_lock_irqsave(&cctx->lock, flags); 438 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1, 439 FASTRPC_CTX_MAX, GFP_ATOMIC); 440 if (ret < 0) { 441 spin_unlock_irqrestore(&cctx->lock, flags); 442 goto err_idr; 443 } 444 ctx->ctxid = ret << 4; 445 spin_unlock_irqrestore(&cctx->lock, flags); 446 447 kref_init(&ctx->refcount); 448 449 return ctx; 450 err_idr: 451 spin_lock(&user->lock); 452 list_del(&ctx->node); 453 spin_unlock(&user->lock); 454 kfree(ctx->maps); 455 kfree(ctx->olaps); 456 kfree(ctx); 457 458 return ERR_PTR(ret); 459 } 460 461 static struct sg_table * 462 fastrpc_map_dma_buf(struct dma_buf_attachment *attachment, 463 enum dma_data_direction dir) 464 { 465 struct fastrpc_dma_buf_attachment *a = attachment->priv; 466 struct sg_table *table; 467 468 table = &a->sgt; 469 470 if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir)) 471 return ERR_PTR(-ENOMEM); 472 473 return table; 474 } 475 476 static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach, 477 struct sg_table *table, 478 enum dma_data_direction dir) 479 { 480 dma_unmap_sg(attach->dev, table->sgl, table->nents, dir); 481 } 482 483 static void fastrpc_release(struct dma_buf *dmabuf) 484 { 485 struct fastrpc_buf *buffer = dmabuf->priv; 486 487 fastrpc_buf_free(buffer); 488 } 489 490 static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf, 491 struct dma_buf_attachment *attachment) 492 { 493 struct fastrpc_dma_buf_attachment *a; 494 struct fastrpc_buf *buffer = dmabuf->priv; 495 int ret; 496 497 a = kzalloc(sizeof(*a), GFP_KERNEL); 498 if (!a) 499 return -ENOMEM; 500 501 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt, 502 FASTRPC_PHYS(buffer->phys), buffer->size); 503 if (ret < 0) { 504 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n"); 505 return -EINVAL; 506 } 507 508 a->dev = attachment->dev; 509 INIT_LIST_HEAD(&a->node); 510 attachment->priv = a; 511 512 mutex_lock(&buffer->lock); 513 list_add(&a->node, &buffer->attachments); 514 mutex_unlock(&buffer->lock); 515 516 return 0; 517 } 518 519 static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf, 520 struct dma_buf_attachment *attachment) 521 { 522 struct fastrpc_dma_buf_attachment *a = attachment->priv; 523 struct fastrpc_buf *buffer = dmabuf->priv; 524 525 mutex_lock(&buffer->lock); 526 list_del(&a->node); 527 mutex_unlock(&buffer->lock); 528 kfree(a); 529 } 530 531 static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum) 532 { 533 struct fastrpc_buf *buf = dmabuf->priv; 534 535 return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL; 536 } 537 538 static void *fastrpc_vmap(struct dma_buf *dmabuf) 539 { 540 struct fastrpc_buf *buf = dmabuf->priv; 541 542 return buf->virt; 543 } 544 545 static int fastrpc_mmap(struct dma_buf *dmabuf, 546 struct vm_area_struct *vma) 547 { 548 struct fastrpc_buf *buf = dmabuf->priv; 549 size_t size = vma->vm_end - vma->vm_start; 550 551 return dma_mmap_coherent(buf->dev, vma, buf->virt, 552 FASTRPC_PHYS(buf->phys), size); 553 } 554 555 static const struct dma_buf_ops fastrpc_dma_buf_ops = { 556 .attach = fastrpc_dma_buf_attach, 557 .detach = fastrpc_dma_buf_detatch, 558 .map_dma_buf = fastrpc_map_dma_buf, 559 .unmap_dma_buf = fastrpc_unmap_dma_buf, 560 .mmap = fastrpc_mmap, 561 .map = fastrpc_kmap, 562 .vmap = fastrpc_vmap, 563 .release = fastrpc_release, 564 }; 565 566 static int fastrpc_map_create(struct fastrpc_user *fl, int fd, 567 u64 len, struct fastrpc_map **ppmap) 568 { 569 struct fastrpc_session_ctx *sess = fl->sctx; 570 struct fastrpc_map *map = NULL; 571 int err = 0; 572 573 if (!fastrpc_map_find(fl, fd, ppmap)) 574 return 0; 575 576 map = kzalloc(sizeof(*map), GFP_KERNEL); 577 if (!map) 578 return -ENOMEM; 579 580 INIT_LIST_HEAD(&map->node); 581 map->fl = fl; 582 map->fd = fd; 583 map->buf = dma_buf_get(fd); 584 if (IS_ERR(map->buf)) { 585 err = PTR_ERR(map->buf); 586 goto get_err; 587 } 588 589 map->attach = dma_buf_attach(map->buf, sess->dev); 590 if (IS_ERR(map->attach)) { 591 dev_err(sess->dev, "Failed to attach dmabuf\n"); 592 err = PTR_ERR(map->attach); 593 goto attach_err; 594 } 595 596 map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL); 597 if (IS_ERR(map->table)) { 598 err = PTR_ERR(map->table); 599 goto map_err; 600 } 601 602 map->phys = sg_dma_address(map->table->sgl); 603 map->phys += ((u64)fl->sctx->sid << 32); 604 map->size = len; 605 map->va = sg_virt(map->table->sgl); 606 map->len = len; 607 kref_init(&map->refcount); 608 609 spin_lock(&fl->lock); 610 list_add_tail(&map->node, &fl->maps); 611 spin_unlock(&fl->lock); 612 *ppmap = map; 613 614 return 0; 615 616 map_err: 617 dma_buf_detach(map->buf, map->attach); 618 attach_err: 619 dma_buf_put(map->buf); 620 get_err: 621 kfree(map); 622 623 return err; 624 } 625 626 /* 627 * Fastrpc payload buffer with metadata looks like: 628 * 629 * >>>>>> START of METADATA <<<<<<<<< 630 * +---------------------------------+ 631 * | Arguments | 632 * | type:(struct fastrpc_remote_arg)| 633 * | (0 - N) | 634 * +---------------------------------+ 635 * | Invoke Buffer list | 636 * | type:(struct fastrpc_invoke_buf)| 637 * | (0 - N) | 638 * +---------------------------------+ 639 * | Page info list | 640 * | type:(struct fastrpc_phy_page) | 641 * | (0 - N) | 642 * +---------------------------------+ 643 * | Optional info | 644 * |(can be specific to SoC/Firmware)| 645 * +---------------------------------+ 646 * >>>>>>>> END of METADATA <<<<<<<<< 647 * +---------------------------------+ 648 * | Inline ARGS | 649 * | (0-N) | 650 * +---------------------------------+ 651 */ 652 653 static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx) 654 { 655 int size = 0; 656 657 size = (sizeof(struct fastrpc_remote_arg) + 658 sizeof(struct fastrpc_invoke_buf) + 659 sizeof(struct fastrpc_phy_page)) * ctx->nscalars + 660 sizeof(u64) * FASTRPC_MAX_FDLIST + 661 sizeof(u32) * FASTRPC_MAX_CRCLIST; 662 663 return size; 664 } 665 666 static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen) 667 { 668 u64 size = 0; 669 int i; 670 671 size = ALIGN(metalen, FASTRPC_ALIGN); 672 for (i = 0; i < ctx->nscalars; i++) { 673 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) { 674 675 if (ctx->olaps[i].offset == 0) 676 size = ALIGN(size, FASTRPC_ALIGN); 677 678 size += (ctx->olaps[i].mend - ctx->olaps[i].mstart); 679 } 680 } 681 682 return size; 683 } 684 685 static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx) 686 { 687 struct device *dev = ctx->fl->sctx->dev; 688 int i, err; 689 690 for (i = 0; i < ctx->nscalars; ++i) { 691 /* Make sure reserved field is set to 0 */ 692 if (ctx->args[i].reserved) 693 return -EINVAL; 694 695 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 || 696 ctx->args[i].length == 0) 697 continue; 698 699 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, 700 ctx->args[i].length, &ctx->maps[i]); 701 if (err) { 702 dev_err(dev, "Error Creating map %d\n", err); 703 return -EINVAL; 704 } 705 706 } 707 return 0; 708 } 709 710 static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx) 711 { 712 struct device *dev = ctx->fl->sctx->dev; 713 struct fastrpc_remote_arg *rpra; 714 struct fastrpc_invoke_buf *list; 715 struct fastrpc_phy_page *pages; 716 int inbufs, i, oix, err = 0; 717 u64 len, rlen, pkt_size; 718 u64 pg_start, pg_end; 719 uintptr_t args; 720 int metalen; 721 722 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); 723 metalen = fastrpc_get_meta_size(ctx); 724 pkt_size = fastrpc_get_payload_size(ctx, metalen); 725 726 err = fastrpc_create_maps(ctx); 727 if (err) 728 return err; 729 730 ctx->msg_sz = pkt_size; 731 732 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf); 733 if (err) 734 return err; 735 736 rpra = ctx->buf->virt; 737 list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra); 738 pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) + 739 sizeof(*rpra)); 740 args = (uintptr_t)ctx->buf->virt + metalen; 741 rlen = pkt_size - metalen; 742 ctx->rpra = rpra; 743 744 for (oix = 0; oix < ctx->nbufs; ++oix) { 745 int mlen; 746 747 i = ctx->olaps[oix].raix; 748 len = ctx->args[i].length; 749 750 rpra[i].pv = 0; 751 rpra[i].len = len; 752 list[i].num = len ? 1 : 0; 753 list[i].pgidx = i; 754 755 if (!len) 756 continue; 757 758 if (ctx->maps[i]) { 759 struct vm_area_struct *vma = NULL; 760 761 rpra[i].pv = (u64) ctx->args[i].ptr; 762 pages[i].addr = ctx->maps[i]->phys; 763 764 vma = find_vma(current->mm, ctx->args[i].ptr); 765 if (vma) 766 pages[i].addr += ctx->args[i].ptr - 767 vma->vm_start; 768 769 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT; 770 pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >> 771 PAGE_SHIFT; 772 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; 773 774 } else { 775 776 if (ctx->olaps[oix].offset == 0) { 777 rlen -= ALIGN(args, FASTRPC_ALIGN) - args; 778 args = ALIGN(args, FASTRPC_ALIGN); 779 } 780 781 mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart; 782 783 if (rlen < mlen) 784 goto bail; 785 786 rpra[i].pv = args - ctx->olaps[oix].offset; 787 pages[i].addr = ctx->buf->phys - 788 ctx->olaps[oix].offset + 789 (pkt_size - rlen); 790 pages[i].addr = pages[i].addr & PAGE_MASK; 791 792 pg_start = (args & PAGE_MASK) >> PAGE_SHIFT; 793 pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT; 794 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; 795 args = args + mlen; 796 rlen -= mlen; 797 } 798 799 if (i < inbufs && !ctx->maps[i]) { 800 void *dst = (void *)(uintptr_t)rpra[i].pv; 801 void *src = (void *)(uintptr_t)ctx->args[i].ptr; 802 803 if (!kernel) { 804 if (copy_from_user(dst, (void __user *)src, 805 len)) { 806 err = -EFAULT; 807 goto bail; 808 } 809 } else { 810 memcpy(dst, src, len); 811 } 812 } 813 } 814 815 for (i = ctx->nbufs; i < ctx->nscalars; ++i) { 816 rpra[i].pv = (u64) ctx->args[i].ptr; 817 rpra[i].len = ctx->args[i].length; 818 list[i].num = ctx->args[i].length ? 1 : 0; 819 list[i].pgidx = i; 820 pages[i].addr = ctx->maps[i]->phys; 821 pages[i].size = ctx->maps[i]->size; 822 } 823 824 bail: 825 if (err) 826 dev_err(dev, "Error: get invoke args failed:%d\n", err); 827 828 return err; 829 } 830 831 static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, 832 u32 kernel) 833 { 834 struct fastrpc_remote_arg *rpra = ctx->rpra; 835 int i, inbufs; 836 837 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); 838 839 for (i = inbufs; i < ctx->nbufs; ++i) { 840 void *src = (void *)(uintptr_t)rpra[i].pv; 841 void *dst = (void *)(uintptr_t)ctx->args[i].ptr; 842 u64 len = rpra[i].len; 843 844 if (!kernel) { 845 if (copy_to_user((void __user *)dst, src, len)) 846 return -EFAULT; 847 } else { 848 memcpy(dst, src, len); 849 } 850 } 851 852 return 0; 853 } 854 855 static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, 856 struct fastrpc_invoke_ctx *ctx, 857 u32 kernel, uint32_t handle) 858 { 859 struct fastrpc_channel_ctx *cctx; 860 struct fastrpc_user *fl = ctx->fl; 861 struct fastrpc_msg *msg = &ctx->msg; 862 863 cctx = fl->cctx; 864 msg->pid = fl->tgid; 865 msg->tid = current->pid; 866 867 if (kernel) 868 msg->pid = 0; 869 870 msg->ctx = ctx->ctxid | fl->pd; 871 msg->handle = handle; 872 msg->sc = ctx->sc; 873 msg->addr = ctx->buf ? ctx->buf->phys : 0; 874 msg->size = roundup(ctx->msg_sz, PAGE_SIZE); 875 fastrpc_context_get(ctx); 876 877 return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg)); 878 } 879 880 static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, 881 u32 handle, u32 sc, 882 struct fastrpc_invoke_args *args) 883 { 884 struct fastrpc_invoke_ctx *ctx = NULL; 885 int err = 0; 886 887 if (!fl->sctx) 888 return -EINVAL; 889 890 ctx = fastrpc_context_alloc(fl, kernel, sc, args); 891 if (IS_ERR(ctx)) 892 return PTR_ERR(ctx); 893 894 if (ctx->nscalars) { 895 err = fastrpc_get_args(kernel, ctx); 896 if (err) 897 goto bail; 898 } 899 900 /* make sure that all CPU memory writes are seen by DSP */ 901 dma_wmb(); 902 /* Send invoke buffer to remote dsp */ 903 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle); 904 if (err) 905 goto bail; 906 907 /* Wait for remote dsp to respond or time out */ 908 err = wait_for_completion_interruptible(&ctx->work); 909 if (err) 910 goto bail; 911 912 /* Check the response from remote dsp */ 913 err = ctx->retval; 914 if (err) 915 goto bail; 916 917 if (ctx->nscalars) { 918 /* make sure that all memory writes by DSP are seen by CPU */ 919 dma_rmb(); 920 /* populate all the output buffers with results */ 921 err = fastrpc_put_args(ctx, kernel); 922 if (err) 923 goto bail; 924 } 925 926 bail: 927 /* We are done with this compute context, remove it from pending list */ 928 spin_lock(&fl->lock); 929 list_del(&ctx->node); 930 spin_unlock(&fl->lock); 931 fastrpc_context_put(ctx); 932 933 if (err) 934 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err); 935 936 return err; 937 } 938 939 static int fastrpc_init_create_process(struct fastrpc_user *fl, 940 char __user *argp) 941 { 942 struct fastrpc_init_create init; 943 struct fastrpc_invoke_args *args; 944 struct fastrpc_phy_page pages[1]; 945 struct fastrpc_map *map = NULL; 946 struct fastrpc_buf *imem = NULL; 947 int memlen; 948 int err; 949 struct { 950 int pgid; 951 u32 namelen; 952 u32 filelen; 953 u32 pageslen; 954 u32 attrs; 955 u32 siglen; 956 } inbuf; 957 u32 sc; 958 959 args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL); 960 if (!args) 961 return -ENOMEM; 962 963 if (copy_from_user(&init, argp, sizeof(init))) { 964 err = -EFAULT; 965 goto err; 966 } 967 968 if (init.filelen > INIT_FILELEN_MAX) { 969 err = -EINVAL; 970 goto err; 971 } 972 973 inbuf.pgid = fl->tgid; 974 inbuf.namelen = strlen(current->comm) + 1; 975 inbuf.filelen = init.filelen; 976 inbuf.pageslen = 1; 977 inbuf.attrs = init.attrs; 978 inbuf.siglen = init.siglen; 979 fl->pd = 1; 980 981 if (init.filelen && init.filefd) { 982 err = fastrpc_map_create(fl, init.filefd, init.filelen, &map); 983 if (err) 984 goto err; 985 } 986 987 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4), 988 1024 * 1024); 989 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen, 990 &imem); 991 if (err) 992 goto err_alloc; 993 994 fl->init_mem = imem; 995 args[0].ptr = (u64)(uintptr_t)&inbuf; 996 args[0].length = sizeof(inbuf); 997 args[0].fd = -1; 998 999 args[1].ptr = (u64)(uintptr_t)current->comm; 1000 args[1].length = inbuf.namelen; 1001 args[1].fd = -1; 1002 1003 args[2].ptr = (u64) init.file; 1004 args[2].length = inbuf.filelen; 1005 args[2].fd = init.filefd; 1006 1007 pages[0].addr = imem->phys; 1008 pages[0].size = imem->size; 1009 1010 args[3].ptr = (u64)(uintptr_t) pages; 1011 args[3].length = 1 * sizeof(*pages); 1012 args[3].fd = -1; 1013 1014 args[4].ptr = (u64)(uintptr_t)&inbuf.attrs; 1015 args[4].length = sizeof(inbuf.attrs); 1016 args[4].fd = -1; 1017 1018 args[5].ptr = (u64)(uintptr_t) &inbuf.siglen; 1019 args[5].length = sizeof(inbuf.siglen); 1020 args[5].fd = -1; 1021 1022 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0); 1023 if (init.attrs) 1024 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0); 1025 1026 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, 1027 sc, args); 1028 if (err) 1029 goto err_invoke; 1030 1031 kfree(args); 1032 1033 return 0; 1034 1035 err_invoke: 1036 fl->init_mem = NULL; 1037 fastrpc_buf_free(imem); 1038 err_alloc: 1039 if (map) { 1040 spin_lock(&fl->lock); 1041 list_del(&map->node); 1042 spin_unlock(&fl->lock); 1043 fastrpc_map_put(map); 1044 } 1045 err: 1046 kfree(args); 1047 1048 return err; 1049 } 1050 1051 static struct fastrpc_session_ctx *fastrpc_session_alloc( 1052 struct fastrpc_channel_ctx *cctx) 1053 { 1054 struct fastrpc_session_ctx *session = NULL; 1055 unsigned long flags; 1056 int i; 1057 1058 spin_lock_irqsave(&cctx->lock, flags); 1059 for (i = 0; i < cctx->sesscount; i++) { 1060 if (!cctx->session[i].used && cctx->session[i].valid) { 1061 cctx->session[i].used = true; 1062 session = &cctx->session[i]; 1063 break; 1064 } 1065 } 1066 spin_unlock_irqrestore(&cctx->lock, flags); 1067 1068 return session; 1069 } 1070 1071 static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx, 1072 struct fastrpc_session_ctx *session) 1073 { 1074 unsigned long flags; 1075 1076 spin_lock_irqsave(&cctx->lock, flags); 1077 session->used = false; 1078 spin_unlock_irqrestore(&cctx->lock, flags); 1079 } 1080 1081 static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl) 1082 { 1083 struct fastrpc_invoke_args args[1]; 1084 int tgid = 0; 1085 u32 sc; 1086 1087 tgid = fl->tgid; 1088 args[0].ptr = (u64)(uintptr_t) &tgid; 1089 args[0].length = sizeof(tgid); 1090 args[0].fd = -1; 1091 args[0].reserved = 0; 1092 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0); 1093 1094 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, 1095 sc, &args[0]); 1096 } 1097 1098 static int fastrpc_device_release(struct inode *inode, struct file *file) 1099 { 1100 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; 1101 struct fastrpc_channel_ctx *cctx = fl->cctx; 1102 struct fastrpc_invoke_ctx *ctx, *n; 1103 struct fastrpc_map *map, *m; 1104 unsigned long flags; 1105 1106 fastrpc_release_current_dsp_process(fl); 1107 1108 spin_lock_irqsave(&cctx->lock, flags); 1109 list_del(&fl->user); 1110 spin_unlock_irqrestore(&cctx->lock, flags); 1111 1112 if (fl->init_mem) 1113 fastrpc_buf_free(fl->init_mem); 1114 1115 list_for_each_entry_safe(ctx, n, &fl->pending, node) { 1116 list_del(&ctx->node); 1117 fastrpc_context_put(ctx); 1118 } 1119 1120 list_for_each_entry_safe(map, m, &fl->maps, node) { 1121 list_del(&map->node); 1122 fastrpc_map_put(map); 1123 } 1124 1125 fastrpc_session_free(cctx, fl->sctx); 1126 1127 mutex_destroy(&fl->mutex); 1128 kfree(fl); 1129 file->private_data = NULL; 1130 1131 return 0; 1132 } 1133 1134 static int fastrpc_device_open(struct inode *inode, struct file *filp) 1135 { 1136 struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data); 1137 struct fastrpc_user *fl = NULL; 1138 unsigned long flags; 1139 1140 fl = kzalloc(sizeof(*fl), GFP_KERNEL); 1141 if (!fl) 1142 return -ENOMEM; 1143 1144 filp->private_data = fl; 1145 spin_lock_init(&fl->lock); 1146 mutex_init(&fl->mutex); 1147 INIT_LIST_HEAD(&fl->pending); 1148 INIT_LIST_HEAD(&fl->maps); 1149 INIT_LIST_HEAD(&fl->user); 1150 fl->tgid = current->tgid; 1151 fl->cctx = cctx; 1152 1153 fl->sctx = fastrpc_session_alloc(cctx); 1154 if (!fl->sctx) { 1155 dev_err(&cctx->rpdev->dev, "No session available\n"); 1156 mutex_destroy(&fl->mutex); 1157 kfree(fl); 1158 1159 return -EBUSY; 1160 } 1161 1162 spin_lock_irqsave(&cctx->lock, flags); 1163 list_add_tail(&fl->user, &cctx->users); 1164 spin_unlock_irqrestore(&cctx->lock, flags); 1165 1166 return 0; 1167 } 1168 1169 static int fastrpc_dmabuf_free(struct fastrpc_user *fl, char __user *argp) 1170 { 1171 struct dma_buf *buf; 1172 int info; 1173 1174 if (copy_from_user(&info, argp, sizeof(info))) 1175 return -EFAULT; 1176 1177 buf = dma_buf_get(info); 1178 if (IS_ERR_OR_NULL(buf)) 1179 return -EINVAL; 1180 /* 1181 * one for the last get and other for the ALLOC_DMA_BUFF ioctl 1182 */ 1183 dma_buf_put(buf); 1184 dma_buf_put(buf); 1185 1186 return 0; 1187 } 1188 1189 static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) 1190 { 1191 struct fastrpc_alloc_dma_buf bp; 1192 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 1193 struct fastrpc_buf *buf = NULL; 1194 int err; 1195 1196 if (copy_from_user(&bp, argp, sizeof(bp))) 1197 return -EFAULT; 1198 1199 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf); 1200 if (err) 1201 return err; 1202 exp_info.ops = &fastrpc_dma_buf_ops; 1203 exp_info.size = bp.size; 1204 exp_info.flags = O_RDWR; 1205 exp_info.priv = buf; 1206 buf->dmabuf = dma_buf_export(&exp_info); 1207 if (IS_ERR(buf->dmabuf)) { 1208 err = PTR_ERR(buf->dmabuf); 1209 fastrpc_buf_free(buf); 1210 return err; 1211 } 1212 1213 bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE); 1214 if (bp.fd < 0) { 1215 dma_buf_put(buf->dmabuf); 1216 return -EINVAL; 1217 } 1218 1219 if (copy_to_user(argp, &bp, sizeof(bp))) { 1220 dma_buf_put(buf->dmabuf); 1221 return -EFAULT; 1222 } 1223 1224 get_dma_buf(buf->dmabuf); 1225 1226 return 0; 1227 } 1228 1229 static int fastrpc_init_attach(struct fastrpc_user *fl) 1230 { 1231 struct fastrpc_invoke_args args[1]; 1232 int tgid = fl->tgid; 1233 u32 sc; 1234 1235 args[0].ptr = (u64)(uintptr_t) &tgid; 1236 args[0].length = sizeof(tgid); 1237 args[0].fd = -1; 1238 args[0].reserved = 0; 1239 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0); 1240 fl->pd = 0; 1241 1242 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, 1243 sc, &args[0]); 1244 } 1245 1246 static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp) 1247 { 1248 struct fastrpc_invoke_args *args = NULL; 1249 struct fastrpc_invoke inv; 1250 u32 nscalars; 1251 int err; 1252 1253 if (copy_from_user(&inv, argp, sizeof(inv))) 1254 return -EFAULT; 1255 1256 /* nscalars is truncated here to max supported value */ 1257 nscalars = REMOTE_SCALARS_LENGTH(inv.sc); 1258 if (nscalars) { 1259 args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL); 1260 if (!args) 1261 return -ENOMEM; 1262 1263 if (copy_from_user(args, (void __user *)(uintptr_t)inv.args, 1264 nscalars * sizeof(*args))) { 1265 kfree(args); 1266 return -EFAULT; 1267 } 1268 } 1269 1270 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args); 1271 kfree(args); 1272 1273 return err; 1274 } 1275 1276 static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, 1277 unsigned long arg) 1278 { 1279 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; 1280 char __user *argp = (char __user *)arg; 1281 int err; 1282 1283 switch (cmd) { 1284 case FASTRPC_IOCTL_INVOKE: 1285 err = fastrpc_invoke(fl, argp); 1286 break; 1287 case FASTRPC_IOCTL_INIT_ATTACH: 1288 err = fastrpc_init_attach(fl); 1289 break; 1290 case FASTRPC_IOCTL_INIT_CREATE: 1291 err = fastrpc_init_create_process(fl, argp); 1292 break; 1293 case FASTRPC_IOCTL_FREE_DMA_BUFF: 1294 err = fastrpc_dmabuf_free(fl, argp); 1295 break; 1296 case FASTRPC_IOCTL_ALLOC_DMA_BUFF: 1297 err = fastrpc_dmabuf_alloc(fl, argp); 1298 break; 1299 default: 1300 err = -ENOTTY; 1301 break; 1302 } 1303 1304 return err; 1305 } 1306 1307 static const struct file_operations fastrpc_fops = { 1308 .open = fastrpc_device_open, 1309 .release = fastrpc_device_release, 1310 .unlocked_ioctl = fastrpc_device_ioctl, 1311 .compat_ioctl = fastrpc_device_ioctl, 1312 }; 1313 1314 static int fastrpc_cb_probe(struct platform_device *pdev) 1315 { 1316 struct fastrpc_channel_ctx *cctx; 1317 struct fastrpc_session_ctx *sess; 1318 struct device *dev = &pdev->dev; 1319 int i, sessions = 0; 1320 unsigned long flags; 1321 int rc; 1322 1323 cctx = dev_get_drvdata(dev->parent); 1324 if (!cctx) 1325 return -EINVAL; 1326 1327 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions); 1328 1329 spin_lock_irqsave(&cctx->lock, flags); 1330 sess = &cctx->session[cctx->sesscount]; 1331 sess->used = false; 1332 sess->valid = true; 1333 sess->dev = dev; 1334 dev_set_drvdata(dev, sess); 1335 1336 if (of_property_read_u32(dev->of_node, "reg", &sess->sid)) 1337 dev_info(dev, "FastRPC Session ID not specified in DT\n"); 1338 1339 if (sessions > 0) { 1340 struct fastrpc_session_ctx *dup_sess; 1341 1342 for (i = 1; i < sessions; i++) { 1343 if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS) 1344 break; 1345 dup_sess = &cctx->session[cctx->sesscount]; 1346 memcpy(dup_sess, sess, sizeof(*dup_sess)); 1347 } 1348 } 1349 cctx->sesscount++; 1350 spin_unlock_irqrestore(&cctx->lock, flags); 1351 rc = dma_set_mask(dev, DMA_BIT_MASK(32)); 1352 if (rc) { 1353 dev_err(dev, "32-bit DMA enable failed\n"); 1354 return rc; 1355 } 1356 1357 return 0; 1358 } 1359 1360 static int fastrpc_cb_remove(struct platform_device *pdev) 1361 { 1362 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent); 1363 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev); 1364 unsigned long flags; 1365 int i; 1366 1367 spin_lock_irqsave(&cctx->lock, flags); 1368 for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) { 1369 if (cctx->session[i].sid == sess->sid) { 1370 cctx->session[i].valid = false; 1371 cctx->sesscount--; 1372 } 1373 } 1374 spin_unlock_irqrestore(&cctx->lock, flags); 1375 1376 return 0; 1377 } 1378 1379 static const struct of_device_id fastrpc_match_table[] = { 1380 { .compatible = "qcom,fastrpc-compute-cb", }, 1381 {} 1382 }; 1383 1384 static struct platform_driver fastrpc_cb_driver = { 1385 .probe = fastrpc_cb_probe, 1386 .remove = fastrpc_cb_remove, 1387 .driver = { 1388 .name = "qcom,fastrpc-cb", 1389 .of_match_table = fastrpc_match_table, 1390 .suppress_bind_attrs = true, 1391 }, 1392 }; 1393 1394 static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) 1395 { 1396 struct device *rdev = &rpdev->dev; 1397 struct fastrpc_channel_ctx *data; 1398 int i, err, domain_id = -1; 1399 const char *domain; 1400 1401 data = devm_kzalloc(rdev, sizeof(*data), GFP_KERNEL); 1402 if (!data) 1403 return -ENOMEM; 1404 1405 err = of_property_read_string(rdev->of_node, "label", &domain); 1406 if (err) { 1407 dev_info(rdev, "FastRPC Domain not specified in DT\n"); 1408 return err; 1409 } 1410 1411 for (i = 0; i <= CDSP_DOMAIN_ID; i++) { 1412 if (!strcmp(domains[i], domain)) { 1413 domain_id = i; 1414 break; 1415 } 1416 } 1417 1418 if (domain_id < 0) { 1419 dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id); 1420 return -EINVAL; 1421 } 1422 1423 data->miscdev.minor = MISC_DYNAMIC_MINOR; 1424 data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s", 1425 domains[domain_id]); 1426 data->miscdev.fops = &fastrpc_fops; 1427 err = misc_register(&data->miscdev); 1428 if (err) 1429 return err; 1430 1431 dev_set_drvdata(&rpdev->dev, data); 1432 dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32)); 1433 INIT_LIST_HEAD(&data->users); 1434 spin_lock_init(&data->lock); 1435 idr_init(&data->ctx_idr); 1436 data->domain_id = domain_id; 1437 data->rpdev = rpdev; 1438 1439 return of_platform_populate(rdev->of_node, NULL, NULL, rdev); 1440 } 1441 1442 static void fastrpc_notify_users(struct fastrpc_user *user) 1443 { 1444 struct fastrpc_invoke_ctx *ctx; 1445 1446 spin_lock(&user->lock); 1447 list_for_each_entry(ctx, &user->pending, node) 1448 complete(&ctx->work); 1449 spin_unlock(&user->lock); 1450 } 1451 1452 static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) 1453 { 1454 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); 1455 struct fastrpc_user *user; 1456 unsigned long flags; 1457 1458 spin_lock_irqsave(&cctx->lock, flags); 1459 list_for_each_entry(user, &cctx->users, user) 1460 fastrpc_notify_users(user); 1461 spin_unlock_irqrestore(&cctx->lock, flags); 1462 1463 misc_deregister(&cctx->miscdev); 1464 of_platform_depopulate(&rpdev->dev); 1465 kfree(cctx); 1466 } 1467 1468 static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data, 1469 int len, void *priv, u32 addr) 1470 { 1471 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); 1472 struct fastrpc_invoke_rsp *rsp = data; 1473 struct fastrpc_invoke_ctx *ctx; 1474 unsigned long flags; 1475 unsigned long ctxid; 1476 1477 if (len < sizeof(*rsp)) 1478 return -EINVAL; 1479 1480 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4); 1481 1482 spin_lock_irqsave(&cctx->lock, flags); 1483 ctx = idr_find(&cctx->ctx_idr, ctxid); 1484 spin_unlock_irqrestore(&cctx->lock, flags); 1485 1486 if (!ctx) { 1487 dev_err(&rpdev->dev, "No context ID matches response\n"); 1488 return -ENOENT; 1489 } 1490 1491 ctx->retval = rsp->retval; 1492 complete(&ctx->work); 1493 1494 /* 1495 * The DMA buffer associated with the context cannot be freed in 1496 * interrupt context so schedule it through a worker thread to 1497 * avoid a kernel BUG. 1498 */ 1499 schedule_work(&ctx->put_work); 1500 1501 return 0; 1502 } 1503 1504 static const struct of_device_id fastrpc_rpmsg_of_match[] = { 1505 { .compatible = "qcom,fastrpc" }, 1506 { }, 1507 }; 1508 MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match); 1509 1510 static struct rpmsg_driver fastrpc_driver = { 1511 .probe = fastrpc_rpmsg_probe, 1512 .remove = fastrpc_rpmsg_remove, 1513 .callback = fastrpc_rpmsg_callback, 1514 .drv = { 1515 .name = "qcom,fastrpc", 1516 .of_match_table = fastrpc_rpmsg_of_match, 1517 }, 1518 }; 1519 1520 static int fastrpc_init(void) 1521 { 1522 int ret; 1523 1524 ret = platform_driver_register(&fastrpc_cb_driver); 1525 if (ret < 0) { 1526 pr_err("fastrpc: failed to register cb driver\n"); 1527 return ret; 1528 } 1529 1530 ret = register_rpmsg_driver(&fastrpc_driver); 1531 if (ret < 0) { 1532 pr_err("fastrpc: failed to register rpmsg driver\n"); 1533 platform_driver_unregister(&fastrpc_cb_driver); 1534 return ret; 1535 } 1536 1537 return 0; 1538 } 1539 module_init(fastrpc_init); 1540 1541 static void fastrpc_exit(void) 1542 { 1543 platform_driver_unregister(&fastrpc_cb_driver); 1544 unregister_rpmsg_driver(&fastrpc_driver); 1545 } 1546 module_exit(fastrpc_exit); 1547 1548 MODULE_LICENSE("GPL v2"); 1549