1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. 4 * Copyright (c) 2010-2012 Broadcom. All rights reserved. 5 */ 6 7 #include <linux/cdev.h> 8 #include <linux/fs.h> 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/compat.h> 12 #include <linux/miscdevice.h> 13 14 #include "vchiq_core.h" 15 #include "vchiq_ioctl.h" 16 #include "vchiq_arm.h" 17 #include "vchiq_debugfs.h" 18 19 static const char *const ioctl_names[] = { 20 "CONNECT", 21 "SHUTDOWN", 22 "CREATE_SERVICE", 23 "REMOVE_SERVICE", 24 "QUEUE_MESSAGE", 25 "QUEUE_BULK_TRANSMIT", 26 "QUEUE_BULK_RECEIVE", 27 "AWAIT_COMPLETION", 28 "DEQUEUE_MESSAGE", 29 "GET_CLIENT_ID", 30 "GET_CONFIG", 31 "CLOSE_SERVICE", 32 "USE_SERVICE", 33 "RELEASE_SERVICE", 34 "SET_SERVICE_OPTION", 35 "DUMP_PHYS_MEM", 36 "LIB_VERSION", 37 "CLOSE_DELIVERED" 38 }; 39 40 static_assert(ARRAY_SIZE(ioctl_names) == (VCHIQ_IOC_MAX + 1)); 41 42 static void 43 user_service_free(void *userdata) 44 { 45 kfree(userdata); 46 } 47 48 static void close_delivered(struct user_service *user_service) 49 { 50 vchiq_log_info(vchiq_arm_log_level, 51 "%s(handle=%x)", 52 __func__, user_service->service->handle); 53 54 if (user_service->close_pending) { 55 /* Allow the underlying service to be culled */ 56 vchiq_service_put(user_service->service); 57 58 /* Wake the user-thread blocked in close_ or remove_service */ 59 complete(&user_service->close_event); 60 61 user_service->close_pending = 0; 62 } 63 } 64 65 struct vchiq_io_copy_callback_context { 66 struct vchiq_element *element; 67 size_t element_offset; 68 unsigned long elements_to_go; 69 }; 70 71 static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest, 72 size_t offset, size_t maxsize) 73 { 74 struct vchiq_io_copy_callback_context *cc = context; 75 size_t total_bytes_copied = 0; 76 size_t bytes_this_round; 77 78 while (total_bytes_copied < maxsize) { 79 if (!cc->elements_to_go) 80 return total_bytes_copied; 81 82 if (!cc->element->size) { 83 cc->elements_to_go--; 84 cc->element++; 85 cc->element_offset = 0; 86 continue; 87 } 88 89 bytes_this_round = min(cc->element->size - cc->element_offset, 90 maxsize - total_bytes_copied); 91 92 if (copy_from_user(dest + total_bytes_copied, 93 cc->element->data + cc->element_offset, 94 bytes_this_round)) 95 return -EFAULT; 96 97 cc->element_offset += bytes_this_round; 98 total_bytes_copied += bytes_this_round; 99 100 if (cc->element_offset == cc->element->size) { 101 cc->elements_to_go--; 102 cc->element++; 103 cc->element_offset = 0; 104 } 105 } 106 107 return maxsize; 108 } 109 110 static int 111 vchiq_ioc_queue_message(unsigned int handle, struct vchiq_element *elements, 112 unsigned long count) 113 { 114 struct vchiq_io_copy_callback_context context; 115 enum vchiq_status status = VCHIQ_SUCCESS; 116 unsigned long i; 117 size_t total_size = 0; 118 119 context.element = elements; 120 context.element_offset = 0; 121 context.elements_to_go = count; 122 123 for (i = 0; i < count; i++) { 124 if (!elements[i].data && elements[i].size != 0) 125 return -EFAULT; 126 127 total_size += elements[i].size; 128 } 129 130 status = vchiq_queue_message(handle, vchiq_ioc_copy_element_data, 131 &context, total_size); 132 133 if (status == VCHIQ_ERROR) 134 return -EIO; 135 else if (status == VCHIQ_RETRY) 136 return -EINTR; 137 return 0; 138 } 139 140 static int vchiq_ioc_create_service(struct vchiq_instance *instance, 141 struct vchiq_create_service *args) 142 { 143 struct user_service *user_service = NULL; 144 struct vchiq_service *service; 145 enum vchiq_status status = VCHIQ_SUCCESS; 146 struct vchiq_service_params_kernel params; 147 int srvstate; 148 149 if (args->is_open && !instance->connected) 150 return -ENOTCONN; 151 152 user_service = kmalloc(sizeof(*user_service), GFP_KERNEL); 153 if (!user_service) 154 return -ENOMEM; 155 156 if (args->is_open) { 157 srvstate = VCHIQ_SRVSTATE_OPENING; 158 } else { 159 srvstate = instance->connected ? 160 VCHIQ_SRVSTATE_LISTENING : VCHIQ_SRVSTATE_HIDDEN; 161 } 162 163 params = (struct vchiq_service_params_kernel) { 164 .fourcc = args->params.fourcc, 165 .callback = service_callback, 166 .userdata = user_service, 167 .version = args->params.version, 168 .version_min = args->params.version_min, 169 }; 170 service = vchiq_add_service_internal(instance->state, ¶ms, 171 srvstate, instance, 172 user_service_free); 173 if (!service) { 174 kfree(user_service); 175 return -EEXIST; 176 } 177 178 user_service->service = service; 179 user_service->userdata = args->params.userdata; 180 user_service->instance = instance; 181 user_service->is_vchi = (args->is_vchi != 0); 182 user_service->dequeue_pending = 0; 183 user_service->close_pending = 0; 184 user_service->message_available_pos = instance->completion_remove - 1; 185 user_service->msg_insert = 0; 186 user_service->msg_remove = 0; 187 init_completion(&user_service->insert_event); 188 init_completion(&user_service->remove_event); 189 init_completion(&user_service->close_event); 190 191 if (args->is_open) { 192 status = vchiq_open_service_internal(service, instance->pid); 193 if (status != VCHIQ_SUCCESS) { 194 vchiq_remove_service(service->handle); 195 return (status == VCHIQ_RETRY) ? 196 -EINTR : -EIO; 197 } 198 } 199 args->handle = service->handle; 200 201 return 0; 202 } 203 204 static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance, 205 struct vchiq_dequeue_message *args) 206 { 207 struct user_service *user_service; 208 struct vchiq_service *service; 209 struct vchiq_header *header; 210 int ret; 211 212 DEBUG_INITIALISE(g_state.local); 213 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 214 service = find_service_for_instance(instance, args->handle); 215 if (!service) 216 return -EINVAL; 217 218 user_service = (struct user_service *)service->base.userdata; 219 if (user_service->is_vchi == 0) { 220 ret = -EINVAL; 221 goto out; 222 } 223 224 spin_lock(&msg_queue_spinlock); 225 if (user_service->msg_remove == user_service->msg_insert) { 226 if (!args->blocking) { 227 spin_unlock(&msg_queue_spinlock); 228 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 229 ret = -EWOULDBLOCK; 230 goto out; 231 } 232 user_service->dequeue_pending = 1; 233 ret = 0; 234 do { 235 spin_unlock(&msg_queue_spinlock); 236 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 237 if (wait_for_completion_interruptible(&user_service->insert_event)) { 238 vchiq_log_info(vchiq_arm_log_level, 239 "DEQUEUE_MESSAGE interrupted"); 240 ret = -EINTR; 241 break; 242 } 243 spin_lock(&msg_queue_spinlock); 244 } while (user_service->msg_remove == user_service->msg_insert); 245 246 if (ret) 247 goto out; 248 } 249 250 if (WARN_ON_ONCE((int)(user_service->msg_insert - 251 user_service->msg_remove) < 0)) { 252 spin_unlock(&msg_queue_spinlock); 253 ret = -EINVAL; 254 goto out; 255 } 256 257 header = user_service->msg_queue[user_service->msg_remove & 258 (MSG_QUEUE_SIZE - 1)]; 259 user_service->msg_remove++; 260 spin_unlock(&msg_queue_spinlock); 261 262 complete(&user_service->remove_event); 263 if (!header) { 264 ret = -ENOTCONN; 265 } else if (header->size <= args->bufsize) { 266 /* Copy to user space if msgbuf is not NULL */ 267 if (!args->buf || (copy_to_user(args->buf, header->data, header->size) == 0)) { 268 ret = header->size; 269 vchiq_release_message(service->handle, header); 270 } else { 271 ret = -EFAULT; 272 } 273 } else { 274 vchiq_log_error(vchiq_arm_log_level, 275 "header %pK: bufsize %x < size %x", 276 header, args->bufsize, header->size); 277 WARN(1, "invalid size\n"); 278 ret = -EMSGSIZE; 279 } 280 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 281 out: 282 vchiq_service_put(service); 283 return ret; 284 } 285 286 static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance, 287 struct vchiq_queue_bulk_transfer *args, 288 enum vchiq_bulk_dir dir, 289 enum vchiq_bulk_mode __user *mode) 290 { 291 struct vchiq_service *service; 292 struct bulk_waiter_node *waiter = NULL; 293 bool found = false; 294 void *userdata; 295 int status = 0; 296 int ret; 297 298 service = find_service_for_instance(instance, args->handle); 299 if (!service) 300 return -EINVAL; 301 302 if (args->mode == VCHIQ_BULK_MODE_BLOCKING) { 303 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL); 304 if (!waiter) { 305 ret = -ENOMEM; 306 goto out; 307 } 308 309 userdata = &waiter->bulk_waiter; 310 } else if (args->mode == VCHIQ_BULK_MODE_WAITING) { 311 mutex_lock(&instance->bulk_waiter_list_mutex); 312 list_for_each_entry(waiter, &instance->bulk_waiter_list, 313 list) { 314 if (waiter->pid == current->pid) { 315 list_del(&waiter->list); 316 found = true; 317 break; 318 } 319 } 320 mutex_unlock(&instance->bulk_waiter_list_mutex); 321 if (!found) { 322 vchiq_log_error(vchiq_arm_log_level, 323 "no bulk_waiter found for pid %d", current->pid); 324 ret = -ESRCH; 325 goto out; 326 } 327 vchiq_log_info(vchiq_arm_log_level, 328 "found bulk_waiter %pK for pid %d", waiter, current->pid); 329 userdata = &waiter->bulk_waiter; 330 } else { 331 userdata = args->userdata; 332 } 333 334 status = vchiq_bulk_transfer(args->handle, NULL, args->data, args->size, 335 userdata, args->mode, dir); 336 337 if (!waiter) { 338 ret = 0; 339 goto out; 340 } 341 342 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || 343 !waiter->bulk_waiter.bulk) { 344 if (waiter->bulk_waiter.bulk) { 345 /* Cancel the signal when the transfer completes. */ 346 spin_lock(&bulk_waiter_spinlock); 347 waiter->bulk_waiter.bulk->userdata = NULL; 348 spin_unlock(&bulk_waiter_spinlock); 349 } 350 kfree(waiter); 351 ret = 0; 352 } else { 353 const enum vchiq_bulk_mode mode_waiting = 354 VCHIQ_BULK_MODE_WAITING; 355 waiter->pid = current->pid; 356 mutex_lock(&instance->bulk_waiter_list_mutex); 357 list_add(&waiter->list, &instance->bulk_waiter_list); 358 mutex_unlock(&instance->bulk_waiter_list_mutex); 359 vchiq_log_info(vchiq_arm_log_level, 360 "saved bulk_waiter %pK for pid %d", waiter, current->pid); 361 362 ret = put_user(mode_waiting, mode); 363 } 364 out: 365 vchiq_service_put(service); 366 if (ret) 367 return ret; 368 else if (status == VCHIQ_ERROR) 369 return -EIO; 370 else if (status == VCHIQ_RETRY) 371 return -EINTR; 372 return 0; 373 } 374 375 /* read a user pointer value from an array pointers in user space */ 376 static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int index) 377 { 378 int ret; 379 380 if (in_compat_syscall()) { 381 compat_uptr_t ptr32; 382 compat_uptr_t __user *uptr = ubuf; 383 384 ret = get_user(ptr32, uptr + index); 385 if (ret) 386 return ret; 387 388 *buf = compat_ptr(ptr32); 389 } else { 390 uintptr_t ptr, __user *uptr = ubuf; 391 392 ret = get_user(ptr, uptr + index); 393 394 if (ret) 395 return ret; 396 397 *buf = (void __user *)ptr; 398 } 399 400 return 0; 401 } 402 403 struct vchiq_completion_data32 { 404 enum vchiq_reason reason; 405 compat_uptr_t header; 406 compat_uptr_t service_userdata; 407 compat_uptr_t bulk_userdata; 408 }; 409 410 static int vchiq_put_completion(struct vchiq_completion_data __user *buf, 411 struct vchiq_completion_data *completion, 412 int index) 413 { 414 struct vchiq_completion_data32 __user *buf32 = (void __user *)buf; 415 416 if (in_compat_syscall()) { 417 struct vchiq_completion_data32 tmp = { 418 .reason = completion->reason, 419 .header = ptr_to_compat(completion->header), 420 .service_userdata = ptr_to_compat(completion->service_userdata), 421 .bulk_userdata = ptr_to_compat(completion->bulk_userdata), 422 }; 423 if (copy_to_user(&buf32[index], &tmp, sizeof(tmp))) 424 return -EFAULT; 425 } else { 426 if (copy_to_user(&buf[index], completion, sizeof(*completion))) 427 return -EFAULT; 428 } 429 430 return 0; 431 } 432 433 static int vchiq_ioc_await_completion(struct vchiq_instance *instance, 434 struct vchiq_await_completion *args, 435 int __user *msgbufcountp) 436 { 437 int msgbufcount; 438 int remove; 439 int ret; 440 441 DEBUG_INITIALISE(g_state.local); 442 443 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 444 if (!instance->connected) 445 return -ENOTCONN; 446 447 mutex_lock(&instance->completion_mutex); 448 449 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 450 while ((instance->completion_remove == instance->completion_insert) && !instance->closing) { 451 int rc; 452 453 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 454 mutex_unlock(&instance->completion_mutex); 455 rc = wait_for_completion_interruptible(&instance->insert_event); 456 mutex_lock(&instance->completion_mutex); 457 if (rc) { 458 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 459 vchiq_log_info(vchiq_arm_log_level, 460 "AWAIT_COMPLETION interrupted"); 461 ret = -EINTR; 462 goto out; 463 } 464 } 465 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 466 467 msgbufcount = args->msgbufcount; 468 remove = instance->completion_remove; 469 470 for (ret = 0; ret < args->count; ret++) { 471 struct vchiq_completion_data_kernel *completion; 472 struct vchiq_completion_data user_completion; 473 struct vchiq_service *service; 474 struct user_service *user_service; 475 struct vchiq_header *header; 476 477 if (remove == instance->completion_insert) 478 break; 479 480 completion = &instance->completions[remove & (MAX_COMPLETIONS - 1)]; 481 482 /* 483 * A read memory barrier is needed to stop 484 * prefetch of a stale completion record 485 */ 486 rmb(); 487 488 service = completion->service_userdata; 489 user_service = service->base.userdata; 490 491 memset(&user_completion, 0, sizeof(user_completion)); 492 user_completion = (struct vchiq_completion_data) { 493 .reason = completion->reason, 494 .service_userdata = user_service->userdata, 495 }; 496 497 header = completion->header; 498 if (header) { 499 void __user *msgbuf; 500 int msglen; 501 502 msglen = header->size + sizeof(struct vchiq_header); 503 /* This must be a VCHIQ-style service */ 504 if (args->msgbufsize < msglen) { 505 vchiq_log_error(vchiq_arm_log_level, 506 "header %pK: msgbufsize %x < msglen %x", 507 header, args->msgbufsize, msglen); 508 WARN(1, "invalid message size\n"); 509 if (ret == 0) 510 ret = -EMSGSIZE; 511 break; 512 } 513 if (msgbufcount <= 0) 514 /* Stall here for lack of a buffer for the message. */ 515 break; 516 /* Get the pointer from user space */ 517 msgbufcount--; 518 if (vchiq_get_user_ptr(&msgbuf, args->msgbufs, 519 msgbufcount)) { 520 if (ret == 0) 521 ret = -EFAULT; 522 break; 523 } 524 525 /* Copy the message to user space */ 526 if (copy_to_user(msgbuf, header, msglen)) { 527 if (ret == 0) 528 ret = -EFAULT; 529 break; 530 } 531 532 /* Now it has been copied, the message can be released. */ 533 vchiq_release_message(service->handle, header); 534 535 /* The completion must point to the msgbuf. */ 536 user_completion.header = msgbuf; 537 } 538 539 if ((completion->reason == VCHIQ_SERVICE_CLOSED) && 540 !instance->use_close_delivered) 541 vchiq_service_put(service); 542 543 /* 544 * FIXME: address space mismatch, does bulk_userdata 545 * actually point to user or kernel memory? 546 */ 547 user_completion.bulk_userdata = completion->bulk_userdata; 548 549 if (vchiq_put_completion(args->buf, &user_completion, ret)) { 550 if (ret == 0) 551 ret = -EFAULT; 552 break; 553 } 554 555 /* 556 * Ensure that the above copy has completed 557 * before advancing the remove pointer. 558 */ 559 mb(); 560 remove++; 561 instance->completion_remove = remove; 562 } 563 564 if (msgbufcount != args->msgbufcount) { 565 if (put_user(msgbufcount, msgbufcountp)) 566 ret = -EFAULT; 567 } 568 out: 569 if (ret) 570 complete(&instance->remove_event); 571 mutex_unlock(&instance->completion_mutex); 572 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 573 574 return ret; 575 } 576 577 static long 578 vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 579 { 580 struct vchiq_instance *instance = file->private_data; 581 enum vchiq_status status = VCHIQ_SUCCESS; 582 struct vchiq_service *service = NULL; 583 long ret = 0; 584 int i, rc; 585 586 vchiq_log_trace(vchiq_arm_log_level, 587 "%s - instance %pK, cmd %s, arg %lx", __func__, instance, 588 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ? 589 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg); 590 591 switch (cmd) { 592 case VCHIQ_IOC_SHUTDOWN: 593 if (!instance->connected) 594 break; 595 596 /* Remove all services */ 597 i = 0; 598 while ((service = next_service_by_instance(instance->state, 599 instance, &i))) { 600 status = vchiq_remove_service(service->handle); 601 vchiq_service_put(service); 602 if (status != VCHIQ_SUCCESS) 603 break; 604 } 605 service = NULL; 606 607 if (status == VCHIQ_SUCCESS) { 608 /* Wake the completion thread and ask it to exit */ 609 instance->closing = 1; 610 complete(&instance->insert_event); 611 } 612 613 break; 614 615 case VCHIQ_IOC_CONNECT: 616 if (instance->connected) { 617 ret = -EINVAL; 618 break; 619 } 620 rc = mutex_lock_killable(&instance->state->mutex); 621 if (rc) { 622 vchiq_log_error(vchiq_arm_log_level, 623 "vchiq: connect: could not lock mutex for state %d: %d", 624 instance->state->id, rc); 625 ret = -EINTR; 626 break; 627 } 628 status = vchiq_connect_internal(instance->state, instance); 629 mutex_unlock(&instance->state->mutex); 630 631 if (status == VCHIQ_SUCCESS) 632 instance->connected = 1; 633 else 634 vchiq_log_error(vchiq_arm_log_level, 635 "vchiq: could not connect: %d", status); 636 break; 637 638 case VCHIQ_IOC_CREATE_SERVICE: { 639 struct vchiq_create_service __user *argp; 640 struct vchiq_create_service args; 641 642 argp = (void __user *)arg; 643 if (copy_from_user(&args, argp, sizeof(args))) { 644 ret = -EFAULT; 645 break; 646 } 647 648 ret = vchiq_ioc_create_service(instance, &args); 649 if (ret < 0) 650 break; 651 652 if (put_user(args.handle, &argp->handle)) { 653 vchiq_remove_service(args.handle); 654 ret = -EFAULT; 655 } 656 } break; 657 658 case VCHIQ_IOC_CLOSE_SERVICE: 659 case VCHIQ_IOC_REMOVE_SERVICE: { 660 unsigned int handle = (unsigned int)arg; 661 struct user_service *user_service; 662 663 service = find_service_for_instance(instance, handle); 664 if (!service) { 665 ret = -EINVAL; 666 break; 667 } 668 669 user_service = service->base.userdata; 670 671 /* 672 * close_pending is false on first entry, and when the 673 * wait in vchiq_close_service has been interrupted. 674 */ 675 if (!user_service->close_pending) { 676 status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ? 677 vchiq_close_service(service->handle) : 678 vchiq_remove_service(service->handle); 679 if (status != VCHIQ_SUCCESS) 680 break; 681 } 682 683 /* 684 * close_pending is true once the underlying service 685 * has been closed until the client library calls the 686 * CLOSE_DELIVERED ioctl, signalling close_event. 687 */ 688 if (user_service->close_pending && 689 wait_for_completion_interruptible(&user_service->close_event)) 690 status = VCHIQ_RETRY; 691 break; 692 } 693 694 case VCHIQ_IOC_USE_SERVICE: 695 case VCHIQ_IOC_RELEASE_SERVICE: { 696 unsigned int handle = (unsigned int)arg; 697 698 service = find_service_for_instance(instance, handle); 699 if (service) { 700 ret = (cmd == VCHIQ_IOC_USE_SERVICE) ? 701 vchiq_use_service_internal(service) : 702 vchiq_release_service_internal(service); 703 if (ret) { 704 vchiq_log_error(vchiq_susp_log_level, 705 "%s: cmd %s returned error %ld for service %c%c%c%c:%03d", 706 __func__, (cmd == VCHIQ_IOC_USE_SERVICE) ? 707 "VCHIQ_IOC_USE_SERVICE" : 708 "VCHIQ_IOC_RELEASE_SERVICE", 709 ret, 710 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 711 service->client_id); 712 } 713 } else { 714 ret = -EINVAL; 715 } 716 } break; 717 718 case VCHIQ_IOC_QUEUE_MESSAGE: { 719 struct vchiq_queue_message args; 720 721 if (copy_from_user(&args, (const void __user *)arg, 722 sizeof(args))) { 723 ret = -EFAULT; 724 break; 725 } 726 727 service = find_service_for_instance(instance, args.handle); 728 729 if (service && (args.count <= MAX_ELEMENTS)) { 730 /* Copy elements into kernel space */ 731 struct vchiq_element elements[MAX_ELEMENTS]; 732 733 if (copy_from_user(elements, args.elements, 734 args.count * sizeof(struct vchiq_element)) == 0) 735 ret = vchiq_ioc_queue_message(args.handle, elements, 736 args.count); 737 else 738 ret = -EFAULT; 739 } else { 740 ret = -EINVAL; 741 } 742 } break; 743 744 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT: 745 case VCHIQ_IOC_QUEUE_BULK_RECEIVE: { 746 struct vchiq_queue_bulk_transfer args; 747 struct vchiq_queue_bulk_transfer __user *argp; 748 749 enum vchiq_bulk_dir dir = 750 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ? 751 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE; 752 753 argp = (void __user *)arg; 754 if (copy_from_user(&args, argp, sizeof(args))) { 755 ret = -EFAULT; 756 break; 757 } 758 759 ret = vchiq_irq_queue_bulk_tx_rx(instance, &args, 760 dir, &argp->mode); 761 } break; 762 763 case VCHIQ_IOC_AWAIT_COMPLETION: { 764 struct vchiq_await_completion args; 765 struct vchiq_await_completion __user *argp; 766 767 argp = (void __user *)arg; 768 if (copy_from_user(&args, argp, sizeof(args))) { 769 ret = -EFAULT; 770 break; 771 } 772 773 ret = vchiq_ioc_await_completion(instance, &args, 774 &argp->msgbufcount); 775 } break; 776 777 case VCHIQ_IOC_DEQUEUE_MESSAGE: { 778 struct vchiq_dequeue_message args; 779 780 if (copy_from_user(&args, (const void __user *)arg, 781 sizeof(args))) { 782 ret = -EFAULT; 783 break; 784 } 785 786 ret = vchiq_ioc_dequeue_message(instance, &args); 787 } break; 788 789 case VCHIQ_IOC_GET_CLIENT_ID: { 790 unsigned int handle = (unsigned int)arg; 791 792 ret = vchiq_get_client_id(handle); 793 } break; 794 795 case VCHIQ_IOC_GET_CONFIG: { 796 struct vchiq_get_config args; 797 struct vchiq_config config; 798 799 if (copy_from_user(&args, (const void __user *)arg, 800 sizeof(args))) { 801 ret = -EFAULT; 802 break; 803 } 804 if (args.config_size > sizeof(config)) { 805 ret = -EINVAL; 806 break; 807 } 808 809 vchiq_get_config(&config); 810 if (copy_to_user(args.pconfig, &config, args.config_size)) { 811 ret = -EFAULT; 812 break; 813 } 814 } break; 815 816 case VCHIQ_IOC_SET_SERVICE_OPTION: { 817 struct vchiq_set_service_option args; 818 819 if (copy_from_user(&args, (const void __user *)arg, 820 sizeof(args))) { 821 ret = -EFAULT; 822 break; 823 } 824 825 service = find_service_for_instance(instance, args.handle); 826 if (!service) { 827 ret = -EINVAL; 828 break; 829 } 830 831 ret = vchiq_set_service_option(args.handle, args.option, 832 args.value); 833 } break; 834 835 case VCHIQ_IOC_LIB_VERSION: { 836 unsigned int lib_version = (unsigned int)arg; 837 838 if (lib_version < VCHIQ_VERSION_MIN) 839 ret = -EINVAL; 840 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED) 841 instance->use_close_delivered = 1; 842 } break; 843 844 case VCHIQ_IOC_CLOSE_DELIVERED: { 845 unsigned int handle = (unsigned int)arg; 846 847 service = find_closed_service_for_instance(instance, handle); 848 if (service) { 849 struct user_service *user_service = 850 (struct user_service *)service->base.userdata; 851 close_delivered(user_service); 852 } else { 853 ret = -EINVAL; 854 } 855 } break; 856 857 default: 858 ret = -ENOTTY; 859 break; 860 } 861 862 if (service) 863 vchiq_service_put(service); 864 865 if (ret == 0) { 866 if (status == VCHIQ_ERROR) 867 ret = -EIO; 868 else if (status == VCHIQ_RETRY) 869 ret = -EINTR; 870 } 871 872 if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK)) 873 vchiq_log_info(vchiq_arm_log_level, 874 " ioctl instance %pK, cmd %s -> status %d, %ld", 875 instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ? 876 ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret); 877 else 878 vchiq_log_trace(vchiq_arm_log_level, 879 " ioctl instance %pK, cmd %s -> status %d, %ld", 880 instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ? 881 ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret); 882 883 return ret; 884 } 885 886 #if defined(CONFIG_COMPAT) 887 888 struct vchiq_service_params32 { 889 int fourcc; 890 compat_uptr_t callback; 891 compat_uptr_t userdata; 892 short version; /* Increment for non-trivial changes */ 893 short version_min; /* Update for incompatible changes */ 894 }; 895 896 struct vchiq_create_service32 { 897 struct vchiq_service_params32 params; 898 int is_open; 899 int is_vchi; 900 unsigned int handle; /* OUT */ 901 }; 902 903 #define VCHIQ_IOC_CREATE_SERVICE32 \ 904 _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32) 905 906 static long 907 vchiq_compat_ioctl_create_service(struct file *file, unsigned int cmd, 908 struct vchiq_create_service32 __user *ptrargs32) 909 { 910 struct vchiq_create_service args; 911 struct vchiq_create_service32 args32; 912 long ret; 913 914 if (copy_from_user(&args32, ptrargs32, sizeof(args32))) 915 return -EFAULT; 916 917 args = (struct vchiq_create_service) { 918 .params = { 919 .fourcc = args32.params.fourcc, 920 .callback = compat_ptr(args32.params.callback), 921 .userdata = compat_ptr(args32.params.userdata), 922 .version = args32.params.version, 923 .version_min = args32.params.version_min, 924 }, 925 .is_open = args32.is_open, 926 .is_vchi = args32.is_vchi, 927 .handle = args32.handle, 928 }; 929 930 ret = vchiq_ioc_create_service(file->private_data, &args); 931 if (ret < 0) 932 return ret; 933 934 if (put_user(args.handle, &ptrargs32->handle)) { 935 vchiq_remove_service(args.handle); 936 return -EFAULT; 937 } 938 939 return 0; 940 } 941 942 struct vchiq_element32 { 943 compat_uptr_t data; 944 unsigned int size; 945 }; 946 947 struct vchiq_queue_message32 { 948 unsigned int handle; 949 unsigned int count; 950 compat_uptr_t elements; 951 }; 952 953 #define VCHIQ_IOC_QUEUE_MESSAGE32 \ 954 _IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message32) 955 956 static long 957 vchiq_compat_ioctl_queue_message(struct file *file, 958 unsigned int cmd, 959 struct vchiq_queue_message32 __user *arg) 960 { 961 struct vchiq_queue_message args; 962 struct vchiq_queue_message32 args32; 963 struct vchiq_service *service; 964 int ret; 965 966 if (copy_from_user(&args32, arg, sizeof(args32))) 967 return -EFAULT; 968 969 args = (struct vchiq_queue_message) { 970 .handle = args32.handle, 971 .count = args32.count, 972 .elements = compat_ptr(args32.elements), 973 }; 974 975 if (args32.count > MAX_ELEMENTS) 976 return -EINVAL; 977 978 service = find_service_for_instance(file->private_data, args.handle); 979 if (!service) 980 return -EINVAL; 981 982 if (args32.elements && args32.count) { 983 struct vchiq_element32 element32[MAX_ELEMENTS]; 984 struct vchiq_element elements[MAX_ELEMENTS]; 985 unsigned int count; 986 987 if (copy_from_user(&element32, args.elements, 988 sizeof(element32))) { 989 vchiq_service_put(service); 990 return -EFAULT; 991 } 992 993 for (count = 0; count < args32.count; count++) { 994 elements[count].data = 995 compat_ptr(element32[count].data); 996 elements[count].size = element32[count].size; 997 } 998 ret = vchiq_ioc_queue_message(args.handle, elements, 999 args.count); 1000 } else { 1001 ret = -EINVAL; 1002 } 1003 vchiq_service_put(service); 1004 1005 return ret; 1006 } 1007 1008 struct vchiq_queue_bulk_transfer32 { 1009 unsigned int handle; 1010 compat_uptr_t data; 1011 unsigned int size; 1012 compat_uptr_t userdata; 1013 enum vchiq_bulk_mode mode; 1014 }; 1015 1016 #define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \ 1017 _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32) 1018 #define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \ 1019 _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32) 1020 1021 static long 1022 vchiq_compat_ioctl_queue_bulk(struct file *file, 1023 unsigned int cmd, 1024 struct vchiq_queue_bulk_transfer32 __user *argp) 1025 { 1026 struct vchiq_queue_bulk_transfer32 args32; 1027 struct vchiq_queue_bulk_transfer args; 1028 enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) ? 1029 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE; 1030 1031 if (copy_from_user(&args32, argp, sizeof(args32))) 1032 return -EFAULT; 1033 1034 args = (struct vchiq_queue_bulk_transfer) { 1035 .handle = args32.handle, 1036 .data = compat_ptr(args32.data), 1037 .size = args32.size, 1038 .userdata = compat_ptr(args32.userdata), 1039 .mode = args32.mode, 1040 }; 1041 1042 return vchiq_irq_queue_bulk_tx_rx(file->private_data, &args, 1043 dir, &argp->mode); 1044 } 1045 1046 struct vchiq_await_completion32 { 1047 unsigned int count; 1048 compat_uptr_t buf; 1049 unsigned int msgbufsize; 1050 unsigned int msgbufcount; /* IN/OUT */ 1051 compat_uptr_t msgbufs; 1052 }; 1053 1054 #define VCHIQ_IOC_AWAIT_COMPLETION32 \ 1055 _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32) 1056 1057 static long 1058 vchiq_compat_ioctl_await_completion(struct file *file, 1059 unsigned int cmd, 1060 struct vchiq_await_completion32 __user *argp) 1061 { 1062 struct vchiq_await_completion args; 1063 struct vchiq_await_completion32 args32; 1064 1065 if (copy_from_user(&args32, argp, sizeof(args32))) 1066 return -EFAULT; 1067 1068 args = (struct vchiq_await_completion) { 1069 .count = args32.count, 1070 .buf = compat_ptr(args32.buf), 1071 .msgbufsize = args32.msgbufsize, 1072 .msgbufcount = args32.msgbufcount, 1073 .msgbufs = compat_ptr(args32.msgbufs), 1074 }; 1075 1076 return vchiq_ioc_await_completion(file->private_data, &args, 1077 &argp->msgbufcount); 1078 } 1079 1080 struct vchiq_dequeue_message32 { 1081 unsigned int handle; 1082 int blocking; 1083 unsigned int bufsize; 1084 compat_uptr_t buf; 1085 }; 1086 1087 #define VCHIQ_IOC_DEQUEUE_MESSAGE32 \ 1088 _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32) 1089 1090 static long 1091 vchiq_compat_ioctl_dequeue_message(struct file *file, 1092 unsigned int cmd, 1093 struct vchiq_dequeue_message32 __user *arg) 1094 { 1095 struct vchiq_dequeue_message32 args32; 1096 struct vchiq_dequeue_message args; 1097 1098 if (copy_from_user(&args32, arg, sizeof(args32))) 1099 return -EFAULT; 1100 1101 args = (struct vchiq_dequeue_message) { 1102 .handle = args32.handle, 1103 .blocking = args32.blocking, 1104 .bufsize = args32.bufsize, 1105 .buf = compat_ptr(args32.buf), 1106 }; 1107 1108 return vchiq_ioc_dequeue_message(file->private_data, &args); 1109 } 1110 1111 struct vchiq_get_config32 { 1112 unsigned int config_size; 1113 compat_uptr_t pconfig; 1114 }; 1115 1116 #define VCHIQ_IOC_GET_CONFIG32 \ 1117 _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32) 1118 1119 static long 1120 vchiq_compat_ioctl_get_config(struct file *file, 1121 unsigned int cmd, 1122 struct vchiq_get_config32 __user *arg) 1123 { 1124 struct vchiq_get_config32 args32; 1125 struct vchiq_config config; 1126 void __user *ptr; 1127 1128 if (copy_from_user(&args32, arg, sizeof(args32))) 1129 return -EFAULT; 1130 if (args32.config_size > sizeof(config)) 1131 return -EINVAL; 1132 1133 vchiq_get_config(&config); 1134 ptr = compat_ptr(args32.pconfig); 1135 if (copy_to_user(ptr, &config, args32.config_size)) 1136 return -EFAULT; 1137 1138 return 0; 1139 } 1140 1141 static long 1142 vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1143 { 1144 void __user *argp = compat_ptr(arg); 1145 1146 switch (cmd) { 1147 case VCHIQ_IOC_CREATE_SERVICE32: 1148 return vchiq_compat_ioctl_create_service(file, cmd, argp); 1149 case VCHIQ_IOC_QUEUE_MESSAGE32: 1150 return vchiq_compat_ioctl_queue_message(file, cmd, argp); 1151 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32: 1152 case VCHIQ_IOC_QUEUE_BULK_RECEIVE32: 1153 return vchiq_compat_ioctl_queue_bulk(file, cmd, argp); 1154 case VCHIQ_IOC_AWAIT_COMPLETION32: 1155 return vchiq_compat_ioctl_await_completion(file, cmd, argp); 1156 case VCHIQ_IOC_DEQUEUE_MESSAGE32: 1157 return vchiq_compat_ioctl_dequeue_message(file, cmd, argp); 1158 case VCHIQ_IOC_GET_CONFIG32: 1159 return vchiq_compat_ioctl_get_config(file, cmd, argp); 1160 default: 1161 return vchiq_ioctl(file, cmd, (unsigned long)argp); 1162 } 1163 } 1164 1165 #endif 1166 1167 static int vchiq_open(struct inode *inode, struct file *file) 1168 { 1169 struct vchiq_state *state = vchiq_get_state(); 1170 struct vchiq_instance *instance; 1171 1172 vchiq_log_info(vchiq_arm_log_level, "vchiq_open"); 1173 1174 if (!state) { 1175 vchiq_log_error(vchiq_arm_log_level, 1176 "vchiq has no connection to VideoCore"); 1177 return -ENOTCONN; 1178 } 1179 1180 instance = kzalloc(sizeof(*instance), GFP_KERNEL); 1181 if (!instance) 1182 return -ENOMEM; 1183 1184 instance->state = state; 1185 instance->pid = current->tgid; 1186 1187 vchiq_debugfs_add_instance(instance); 1188 1189 init_completion(&instance->insert_event); 1190 init_completion(&instance->remove_event); 1191 mutex_init(&instance->completion_mutex); 1192 mutex_init(&instance->bulk_waiter_list_mutex); 1193 INIT_LIST_HEAD(&instance->bulk_waiter_list); 1194 1195 file->private_data = instance; 1196 1197 return 0; 1198 } 1199 1200 static int vchiq_release(struct inode *inode, struct file *file) 1201 { 1202 struct vchiq_instance *instance = file->private_data; 1203 struct vchiq_state *state = vchiq_get_state(); 1204 struct vchiq_service *service; 1205 int ret = 0; 1206 int i; 1207 1208 vchiq_log_info(vchiq_arm_log_level, "%s: instance=%lx", __func__, 1209 (unsigned long)instance); 1210 1211 if (!state) { 1212 ret = -EPERM; 1213 goto out; 1214 } 1215 1216 /* Ensure videocore is awake to allow termination. */ 1217 vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ); 1218 1219 mutex_lock(&instance->completion_mutex); 1220 1221 /* Wake the completion thread and ask it to exit */ 1222 instance->closing = 1; 1223 complete(&instance->insert_event); 1224 1225 mutex_unlock(&instance->completion_mutex); 1226 1227 /* Wake the slot handler if the completion queue is full. */ 1228 complete(&instance->remove_event); 1229 1230 /* Mark all services for termination... */ 1231 i = 0; 1232 while ((service = next_service_by_instance(state, instance, &i))) { 1233 struct user_service *user_service = service->base.userdata; 1234 1235 /* Wake the slot handler if the msg queue is full. */ 1236 complete(&user_service->remove_event); 1237 1238 vchiq_terminate_service_internal(service); 1239 vchiq_service_put(service); 1240 } 1241 1242 /* ...and wait for them to die */ 1243 i = 0; 1244 while ((service = next_service_by_instance(state, instance, &i))) { 1245 struct user_service *user_service = service->base.userdata; 1246 1247 wait_for_completion(&service->remove_event); 1248 1249 if (WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE)) { 1250 vchiq_service_put(service); 1251 break; 1252 } 1253 1254 spin_lock(&msg_queue_spinlock); 1255 1256 while (user_service->msg_remove != user_service->msg_insert) { 1257 struct vchiq_header *header; 1258 int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1); 1259 1260 header = user_service->msg_queue[m]; 1261 user_service->msg_remove++; 1262 spin_unlock(&msg_queue_spinlock); 1263 1264 if (header) 1265 vchiq_release_message(service->handle, header); 1266 spin_lock(&msg_queue_spinlock); 1267 } 1268 1269 spin_unlock(&msg_queue_spinlock); 1270 1271 vchiq_service_put(service); 1272 } 1273 1274 /* Release any closed services */ 1275 while (instance->completion_remove != instance->completion_insert) { 1276 struct vchiq_completion_data_kernel *completion; 1277 struct vchiq_service *service; 1278 1279 completion = &instance->completions[instance->completion_remove 1280 & (MAX_COMPLETIONS - 1)]; 1281 service = completion->service_userdata; 1282 if (completion->reason == VCHIQ_SERVICE_CLOSED) { 1283 struct user_service *user_service = 1284 service->base.userdata; 1285 1286 /* Wake any blocked user-thread */ 1287 if (instance->use_close_delivered) 1288 complete(&user_service->close_event); 1289 vchiq_service_put(service); 1290 } 1291 instance->completion_remove++; 1292 } 1293 1294 /* Release the PEER service count. */ 1295 vchiq_release_internal(instance->state, NULL); 1296 1297 free_bulk_waiter(instance); 1298 1299 vchiq_debugfs_remove_instance(instance); 1300 1301 kfree(instance); 1302 file->private_data = NULL; 1303 1304 out: 1305 return ret; 1306 } 1307 1308 static ssize_t 1309 vchiq_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 1310 { 1311 struct dump_context context; 1312 int err; 1313 1314 context.buf = buf; 1315 context.actual = 0; 1316 context.space = count; 1317 context.offset = *ppos; 1318 1319 err = vchiq_dump_state(&context, &g_state); 1320 if (err) 1321 return err; 1322 1323 *ppos += context.actual; 1324 1325 return context.actual; 1326 } 1327 1328 static const struct file_operations 1329 vchiq_fops = { 1330 .owner = THIS_MODULE, 1331 .unlocked_ioctl = vchiq_ioctl, 1332 #if defined(CONFIG_COMPAT) 1333 .compat_ioctl = vchiq_compat_ioctl, 1334 #endif 1335 .open = vchiq_open, 1336 .release = vchiq_release, 1337 .read = vchiq_read 1338 }; 1339 1340 static struct miscdevice vchiq_miscdev = { 1341 .fops = &vchiq_fops, 1342 .minor = MISC_DYNAMIC_MINOR, 1343 .name = "vchiq", 1344 1345 }; 1346 1347 /** 1348 * vchiq_register_chrdev - Register the char driver for vchiq 1349 * and create the necessary class and 1350 * device files in userspace. 1351 * @parent The parent of the char device. 1352 * 1353 * Returns 0 on success else returns the error code. 1354 */ 1355 int vchiq_register_chrdev(struct device *parent) 1356 { 1357 vchiq_miscdev.parent = parent; 1358 1359 return misc_register(&vchiq_miscdev); 1360 } 1361 1362 /** 1363 * vchiq_deregister_chrdev - Deregister and cleanup the vchiq char 1364 * driver and device files 1365 */ 1366 void vchiq_deregister_chrdev(void) 1367 { 1368 misc_deregister(&vchiq_miscdev); 1369 } 1370