1 /** 2 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. 3 * Copyright (c) 2010-2012 Broadcom. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The names of the above-listed copyright holders may not be used 15 * to endorse or promote products derived from this software without 16 * specific prior written permission. 17 * 18 * ALTERNATIVELY, this software may be distributed under the terms of the 19 * GNU General Public License ("GPL") version 2, as published by the Free 20 * Software Foundation. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 23 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 27 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 29 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 31 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/module.h> 37 #include <linux/sched/signal.h> 38 #include <linux/types.h> 39 #include <linux/errno.h> 40 #include <linux/cdev.h> 41 #include <linux/fs.h> 42 #include <linux/device.h> 43 #include <linux/mm.h> 44 #include <linux/highmem.h> 45 #include <linux/pagemap.h> 46 #include <linux/bug.h> 47 #include <linux/semaphore.h> 48 #include <linux/list.h> 49 #include <linux/of.h> 50 #include <linux/platform_device.h> 51 #include <linux/compat.h> 52 #include <soc/bcm2835/raspberrypi-firmware.h> 53 54 #include "vchiq_core.h" 55 #include "vchiq_ioctl.h" 56 #include "vchiq_arm.h" 57 #include "vchiq_debugfs.h" 58 #include "vchiq_killable.h" 59 60 #define DEVICE_NAME "vchiq" 61 62 /* Override the default prefix, which would be vchiq_arm (from the filename) */ 63 #undef MODULE_PARAM_PREFIX 64 #define MODULE_PARAM_PREFIX DEVICE_NAME "." 65 66 #define VCHIQ_MINOR 0 67 68 /* Some per-instance constants */ 69 #define MAX_COMPLETIONS 128 70 #define MAX_SERVICES 64 71 #define MAX_ELEMENTS 8 72 #define MSG_QUEUE_SIZE 128 73 74 #define KEEPALIVE_VER 1 75 #define KEEPALIVE_VER_MIN KEEPALIVE_VER 76 77 /* Run time control of log level, based on KERN_XXX level. */ 78 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT; 79 int vchiq_susp_log_level = VCHIQ_LOG_ERROR; 80 81 #define SUSPEND_TIMER_TIMEOUT_MS 100 82 #define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000 83 84 #define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */ 85 static const char *const suspend_state_names[] = { 86 "VC_SUSPEND_FORCE_CANCELED", 87 "VC_SUSPEND_REJECTED", 88 "VC_SUSPEND_FAILED", 89 "VC_SUSPEND_IDLE", 90 "VC_SUSPEND_REQUESTED", 91 "VC_SUSPEND_IN_PROGRESS", 92 "VC_SUSPEND_SUSPENDED" 93 }; 94 #define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */ 95 static const char *const resume_state_names[] = { 96 "VC_RESUME_FAILED", 97 "VC_RESUME_IDLE", 98 "VC_RESUME_REQUESTED", 99 "VC_RESUME_IN_PROGRESS", 100 "VC_RESUME_RESUMED" 101 }; 102 /* The number of times we allow force suspend to timeout before actually 103 ** _forcing_ suspend. This is to cater for SW which fails to release vchiq 104 ** correctly - we don't want to prevent ARM suspend indefinitely in this case. 105 */ 106 #define FORCE_SUSPEND_FAIL_MAX 8 107 108 /* The time in ms allowed for videocore to go idle when force suspend has been 109 * requested */ 110 #define FORCE_SUSPEND_TIMEOUT_MS 200 111 112 static void suspend_timer_callback(struct timer_list *t); 113 114 typedef struct user_service_struct { 115 VCHIQ_SERVICE_T *service; 116 void *userdata; 117 VCHIQ_INSTANCE_T instance; 118 char is_vchi; 119 char dequeue_pending; 120 char close_pending; 121 int message_available_pos; 122 int msg_insert; 123 int msg_remove; 124 struct semaphore insert_event; 125 struct semaphore remove_event; 126 struct semaphore close_event; 127 VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE]; 128 } USER_SERVICE_T; 129 130 struct bulk_waiter_node { 131 struct bulk_waiter bulk_waiter; 132 int pid; 133 struct list_head list; 134 }; 135 136 struct vchiq_instance_struct { 137 VCHIQ_STATE_T *state; 138 VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS]; 139 int completion_insert; 140 int completion_remove; 141 struct semaphore insert_event; 142 struct semaphore remove_event; 143 struct mutex completion_mutex; 144 145 int connected; 146 int closing; 147 int pid; 148 int mark; 149 int use_close_delivered; 150 int trace; 151 152 struct list_head bulk_waiter_list; 153 struct mutex bulk_waiter_list_mutex; 154 155 VCHIQ_DEBUGFS_NODE_T debugfs_node; 156 }; 157 158 typedef struct dump_context_struct { 159 char __user *buf; 160 size_t actual; 161 size_t space; 162 loff_t offset; 163 } DUMP_CONTEXT_T; 164 165 static struct cdev vchiq_cdev; 166 static dev_t vchiq_devid; 167 static VCHIQ_STATE_T g_state; 168 static struct class *vchiq_class; 169 static struct device *vchiq_dev; 170 static DEFINE_SPINLOCK(msg_queue_spinlock); 171 172 static const char *const ioctl_names[] = { 173 "CONNECT", 174 "SHUTDOWN", 175 "CREATE_SERVICE", 176 "REMOVE_SERVICE", 177 "QUEUE_MESSAGE", 178 "QUEUE_BULK_TRANSMIT", 179 "QUEUE_BULK_RECEIVE", 180 "AWAIT_COMPLETION", 181 "DEQUEUE_MESSAGE", 182 "GET_CLIENT_ID", 183 "GET_CONFIG", 184 "CLOSE_SERVICE", 185 "USE_SERVICE", 186 "RELEASE_SERVICE", 187 "SET_SERVICE_OPTION", 188 "DUMP_PHYS_MEM", 189 "LIB_VERSION", 190 "CLOSE_DELIVERED" 191 }; 192 193 vchiq_static_assert(ARRAY_SIZE(ioctl_names) == 194 (VCHIQ_IOC_MAX + 1)); 195 196 /**************************************************************************** 197 * 198 * add_completion 199 * 200 ***************************************************************************/ 201 202 static VCHIQ_STATUS_T 203 add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason, 204 VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service, 205 void *bulk_userdata) 206 { 207 VCHIQ_COMPLETION_DATA_T *completion; 208 int insert; 209 210 DEBUG_INITIALISE(g_state.local) 211 212 insert = instance->completion_insert; 213 while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) { 214 /* Out of space - wait for the client */ 215 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 216 vchiq_log_trace(vchiq_arm_log_level, 217 "add_completion - completion queue full"); 218 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT); 219 if (down_interruptible(&instance->remove_event) != 0) { 220 vchiq_log_info(vchiq_arm_log_level, 221 "service_callback interrupted"); 222 return VCHIQ_RETRY; 223 } else if (instance->closing) { 224 vchiq_log_info(vchiq_arm_log_level, 225 "service_callback closing"); 226 return VCHIQ_SUCCESS; 227 } 228 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 229 } 230 231 completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)]; 232 233 completion->header = header; 234 completion->reason = reason; 235 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */ 236 completion->service_userdata = user_service->service; 237 completion->bulk_userdata = bulk_userdata; 238 239 if (reason == VCHIQ_SERVICE_CLOSED) { 240 /* Take an extra reference, to be held until 241 this CLOSED notification is delivered. */ 242 lock_service(user_service->service); 243 if (instance->use_close_delivered) 244 user_service->close_pending = 1; 245 } 246 247 /* A write barrier is needed here to ensure that the entire completion 248 record is written out before the insert point. */ 249 wmb(); 250 251 if (reason == VCHIQ_MESSAGE_AVAILABLE) 252 user_service->message_available_pos = insert; 253 254 insert++; 255 instance->completion_insert = insert; 256 257 up(&instance->insert_event); 258 259 return VCHIQ_SUCCESS; 260 } 261 262 /**************************************************************************** 263 * 264 * service_callback 265 * 266 ***************************************************************************/ 267 268 static VCHIQ_STATUS_T 269 service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header, 270 VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata) 271 { 272 /* How do we ensure the callback goes to the right client? 273 ** The service_user data points to a USER_SERVICE_T record containing 274 ** the original callback and the user state structure, which contains a 275 ** circular buffer for completion records. 276 */ 277 USER_SERVICE_T *user_service; 278 VCHIQ_SERVICE_T *service; 279 VCHIQ_INSTANCE_T instance; 280 bool skip_completion = false; 281 282 DEBUG_INITIALISE(g_state.local) 283 284 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 285 286 service = handle_to_service(handle); 287 BUG_ON(!service); 288 user_service = (USER_SERVICE_T *)service->base.userdata; 289 instance = user_service->instance; 290 291 if (!instance || instance->closing) 292 return VCHIQ_SUCCESS; 293 294 vchiq_log_trace(vchiq_arm_log_level, 295 "service_callback - service %lx(%d,%p), reason %d, header %lx, " 296 "instance %lx, bulk_userdata %lx", 297 (unsigned long)user_service, 298 service->localport, user_service->userdata, 299 reason, (unsigned long)header, 300 (unsigned long)instance, (unsigned long)bulk_userdata); 301 302 if (header && user_service->is_vchi) { 303 spin_lock(&msg_queue_spinlock); 304 while (user_service->msg_insert == 305 (user_service->msg_remove + MSG_QUEUE_SIZE)) { 306 spin_unlock(&msg_queue_spinlock); 307 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 308 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT); 309 vchiq_log_trace(vchiq_arm_log_level, 310 "service_callback - msg queue full"); 311 /* If there is no MESSAGE_AVAILABLE in the completion 312 ** queue, add one 313 */ 314 if ((user_service->message_available_pos - 315 instance->completion_remove) < 0) { 316 VCHIQ_STATUS_T status; 317 318 vchiq_log_info(vchiq_arm_log_level, 319 "Inserting extra MESSAGE_AVAILABLE"); 320 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 321 status = add_completion(instance, reason, 322 NULL, user_service, bulk_userdata); 323 if (status != VCHIQ_SUCCESS) { 324 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 325 return status; 326 } 327 } 328 329 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 330 if (down_interruptible(&user_service->remove_event) 331 != 0) { 332 vchiq_log_info(vchiq_arm_log_level, 333 "service_callback interrupted"); 334 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 335 return VCHIQ_RETRY; 336 } else if (instance->closing) { 337 vchiq_log_info(vchiq_arm_log_level, 338 "service_callback closing"); 339 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 340 return VCHIQ_ERROR; 341 } 342 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 343 spin_lock(&msg_queue_spinlock); 344 } 345 346 user_service->msg_queue[user_service->msg_insert & 347 (MSG_QUEUE_SIZE - 1)] = header; 348 user_service->msg_insert++; 349 350 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if 351 ** there is a MESSAGE_AVAILABLE in the completion queue then 352 ** bypass the completion queue. 353 */ 354 if (((user_service->message_available_pos - 355 instance->completion_remove) >= 0) || 356 user_service->dequeue_pending) { 357 user_service->dequeue_pending = 0; 358 skip_completion = true; 359 } 360 361 spin_unlock(&msg_queue_spinlock); 362 up(&user_service->insert_event); 363 364 header = NULL; 365 } 366 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 367 368 if (skip_completion) 369 return VCHIQ_SUCCESS; 370 371 return add_completion(instance, reason, header, user_service, 372 bulk_userdata); 373 } 374 375 /**************************************************************************** 376 * 377 * user_service_free 378 * 379 ***************************************************************************/ 380 static void 381 user_service_free(void *userdata) 382 { 383 kfree(userdata); 384 } 385 386 /**************************************************************************** 387 * 388 * close_delivered 389 * 390 ***************************************************************************/ 391 static void close_delivered(USER_SERVICE_T *user_service) 392 { 393 vchiq_log_info(vchiq_arm_log_level, 394 "close_delivered(handle=%x)", 395 user_service->service->handle); 396 397 if (user_service->close_pending) { 398 /* Allow the underlying service to be culled */ 399 unlock_service(user_service->service); 400 401 /* Wake the user-thread blocked in close_ or remove_service */ 402 up(&user_service->close_event); 403 404 user_service->close_pending = 0; 405 } 406 } 407 408 struct vchiq_io_copy_callback_context { 409 struct vchiq_element *current_element; 410 size_t current_element_offset; 411 unsigned long elements_to_go; 412 size_t current_offset; 413 }; 414 415 static ssize_t 416 vchiq_ioc_copy_element_data( 417 void *context, 418 void *dest, 419 size_t offset, 420 size_t maxsize) 421 { 422 long res; 423 size_t bytes_this_round; 424 struct vchiq_io_copy_callback_context *copy_context = 425 (struct vchiq_io_copy_callback_context *)context; 426 427 if (offset != copy_context->current_offset) 428 return 0; 429 430 if (!copy_context->elements_to_go) 431 return 0; 432 433 /* 434 * Complex logic here to handle the case of 0 size elements 435 * in the middle of the array of elements. 436 * 437 * Need to skip over these 0 size elements. 438 */ 439 while (1) { 440 bytes_this_round = min(copy_context->current_element->size - 441 copy_context->current_element_offset, 442 maxsize); 443 444 if (bytes_this_round) 445 break; 446 447 copy_context->elements_to_go--; 448 copy_context->current_element++; 449 copy_context->current_element_offset = 0; 450 451 if (!copy_context->elements_to_go) 452 return 0; 453 } 454 455 res = copy_from_user(dest, 456 copy_context->current_element->data + 457 copy_context->current_element_offset, 458 bytes_this_round); 459 460 if (res != 0) 461 return -EFAULT; 462 463 copy_context->current_element_offset += bytes_this_round; 464 copy_context->current_offset += bytes_this_round; 465 466 /* 467 * Check if done with current element, and if so advance to the next. 468 */ 469 if (copy_context->current_element_offset == 470 copy_context->current_element->size) { 471 copy_context->elements_to_go--; 472 copy_context->current_element++; 473 copy_context->current_element_offset = 0; 474 } 475 476 return bytes_this_round; 477 } 478 479 /************************************************************************** 480 * 481 * vchiq_ioc_queue_message 482 * 483 **************************************************************************/ 484 static VCHIQ_STATUS_T 485 vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle, 486 struct vchiq_element *elements, 487 unsigned long count) 488 { 489 struct vchiq_io_copy_callback_context context; 490 unsigned long i; 491 size_t total_size = 0; 492 493 context.current_element = elements; 494 context.current_element_offset = 0; 495 context.elements_to_go = count; 496 context.current_offset = 0; 497 498 for (i = 0; i < count; i++) { 499 if (!elements[i].data && elements[i].size != 0) 500 return -EFAULT; 501 502 total_size += elements[i].size; 503 } 504 505 return vchiq_queue_message(handle, vchiq_ioc_copy_element_data, 506 &context, total_size); 507 } 508 509 /**************************************************************************** 510 * 511 * vchiq_ioctl 512 * 513 ***************************************************************************/ 514 static long 515 vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 516 { 517 VCHIQ_INSTANCE_T instance = file->private_data; 518 VCHIQ_STATUS_T status = VCHIQ_SUCCESS; 519 VCHIQ_SERVICE_T *service = NULL; 520 long ret = 0; 521 int i, rc; 522 523 DEBUG_INITIALISE(g_state.local) 524 525 vchiq_log_trace(vchiq_arm_log_level, 526 "vchiq_ioctl - instance %pK, cmd %s, arg %lx", 527 instance, 528 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && 529 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ? 530 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg); 531 532 switch (cmd) { 533 case VCHIQ_IOC_SHUTDOWN: 534 if (!instance->connected) 535 break; 536 537 /* Remove all services */ 538 i = 0; 539 while ((service = next_service_by_instance(instance->state, 540 instance, &i)) != NULL) { 541 status = vchiq_remove_service(service->handle); 542 unlock_service(service); 543 if (status != VCHIQ_SUCCESS) 544 break; 545 } 546 service = NULL; 547 548 if (status == VCHIQ_SUCCESS) { 549 /* Wake the completion thread and ask it to exit */ 550 instance->closing = 1; 551 up(&instance->insert_event); 552 } 553 554 break; 555 556 case VCHIQ_IOC_CONNECT: 557 if (instance->connected) { 558 ret = -EINVAL; 559 break; 560 } 561 rc = mutex_lock_killable(&instance->state->mutex); 562 if (rc != 0) { 563 vchiq_log_error(vchiq_arm_log_level, 564 "vchiq: connect: could not lock mutex for " 565 "state %d: %d", 566 instance->state->id, rc); 567 ret = -EINTR; 568 break; 569 } 570 status = vchiq_connect_internal(instance->state, instance); 571 mutex_unlock(&instance->state->mutex); 572 573 if (status == VCHIQ_SUCCESS) 574 instance->connected = 1; 575 else 576 vchiq_log_error(vchiq_arm_log_level, 577 "vchiq: could not connect: %d", status); 578 break; 579 580 case VCHIQ_IOC_CREATE_SERVICE: { 581 VCHIQ_CREATE_SERVICE_T args; 582 USER_SERVICE_T *user_service = NULL; 583 void *userdata; 584 int srvstate; 585 586 if (copy_from_user 587 (&args, (const void __user *)arg, 588 sizeof(args)) != 0) { 589 ret = -EFAULT; 590 break; 591 } 592 593 user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL); 594 if (!user_service) { 595 ret = -ENOMEM; 596 break; 597 } 598 599 if (args.is_open) { 600 if (!instance->connected) { 601 ret = -ENOTCONN; 602 kfree(user_service); 603 break; 604 } 605 srvstate = VCHIQ_SRVSTATE_OPENING; 606 } else { 607 srvstate = 608 instance->connected ? 609 VCHIQ_SRVSTATE_LISTENING : 610 VCHIQ_SRVSTATE_HIDDEN; 611 } 612 613 userdata = args.params.userdata; 614 args.params.callback = service_callback; 615 args.params.userdata = user_service; 616 service = vchiq_add_service_internal( 617 instance->state, 618 &args.params, srvstate, 619 instance, user_service_free); 620 621 if (service != NULL) { 622 user_service->service = service; 623 user_service->userdata = userdata; 624 user_service->instance = instance; 625 user_service->is_vchi = (args.is_vchi != 0); 626 user_service->dequeue_pending = 0; 627 user_service->close_pending = 0; 628 user_service->message_available_pos = 629 instance->completion_remove - 1; 630 user_service->msg_insert = 0; 631 user_service->msg_remove = 0; 632 sema_init(&user_service->insert_event, 0); 633 sema_init(&user_service->remove_event, 0); 634 sema_init(&user_service->close_event, 0); 635 636 if (args.is_open) { 637 status = vchiq_open_service_internal 638 (service, instance->pid); 639 if (status != VCHIQ_SUCCESS) { 640 vchiq_remove_service(service->handle); 641 service = NULL; 642 ret = (status == VCHIQ_RETRY) ? 643 -EINTR : -EIO; 644 break; 645 } 646 } 647 648 if (copy_to_user((void __user *) 649 &(((VCHIQ_CREATE_SERVICE_T __user *) 650 arg)->handle), 651 (const void *)&service->handle, 652 sizeof(service->handle)) != 0) { 653 ret = -EFAULT; 654 vchiq_remove_service(service->handle); 655 } 656 657 service = NULL; 658 } else { 659 ret = -EEXIST; 660 kfree(user_service); 661 } 662 } break; 663 664 case VCHIQ_IOC_CLOSE_SERVICE: { 665 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg; 666 667 service = find_service_for_instance(instance, handle); 668 if (service != NULL) { 669 USER_SERVICE_T *user_service = 670 (USER_SERVICE_T *)service->base.userdata; 671 /* close_pending is false on first entry, and when the 672 wait in vchiq_close_service has been interrupted. */ 673 if (!user_service->close_pending) { 674 status = vchiq_close_service(service->handle); 675 if (status != VCHIQ_SUCCESS) 676 break; 677 } 678 679 /* close_pending is true once the underlying service 680 has been closed until the client library calls the 681 CLOSE_DELIVERED ioctl, signalling close_event. */ 682 if (user_service->close_pending && 683 down_interruptible(&user_service->close_event)) 684 status = VCHIQ_RETRY; 685 } else 686 ret = -EINVAL; 687 } break; 688 689 case VCHIQ_IOC_REMOVE_SERVICE: { 690 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg; 691 692 service = find_service_for_instance(instance, handle); 693 if (service != NULL) { 694 USER_SERVICE_T *user_service = 695 (USER_SERVICE_T *)service->base.userdata; 696 /* close_pending is false on first entry, and when the 697 wait in vchiq_close_service has been interrupted. */ 698 if (!user_service->close_pending) { 699 status = vchiq_remove_service(service->handle); 700 if (status != VCHIQ_SUCCESS) 701 break; 702 } 703 704 /* close_pending is true once the underlying service 705 has been closed until the client library calls the 706 CLOSE_DELIVERED ioctl, signalling close_event. */ 707 if (user_service->close_pending && 708 down_interruptible(&user_service->close_event)) 709 status = VCHIQ_RETRY; 710 } else 711 ret = -EINVAL; 712 } break; 713 714 case VCHIQ_IOC_USE_SERVICE: 715 case VCHIQ_IOC_RELEASE_SERVICE: { 716 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg; 717 718 service = find_service_for_instance(instance, handle); 719 if (service != NULL) { 720 status = (cmd == VCHIQ_IOC_USE_SERVICE) ? 721 vchiq_use_service_internal(service) : 722 vchiq_release_service_internal(service); 723 if (status != VCHIQ_SUCCESS) { 724 vchiq_log_error(vchiq_susp_log_level, 725 "%s: cmd %s returned error %d for " 726 "service %c%c%c%c:%03d", 727 __func__, 728 (cmd == VCHIQ_IOC_USE_SERVICE) ? 729 "VCHIQ_IOC_USE_SERVICE" : 730 "VCHIQ_IOC_RELEASE_SERVICE", 731 status, 732 VCHIQ_FOURCC_AS_4CHARS( 733 service->base.fourcc), 734 service->client_id); 735 ret = -EINVAL; 736 } 737 } else 738 ret = -EINVAL; 739 } break; 740 741 case VCHIQ_IOC_QUEUE_MESSAGE: { 742 VCHIQ_QUEUE_MESSAGE_T args; 743 744 if (copy_from_user 745 (&args, (const void __user *)arg, 746 sizeof(args)) != 0) { 747 ret = -EFAULT; 748 break; 749 } 750 751 service = find_service_for_instance(instance, args.handle); 752 753 if ((service != NULL) && (args.count <= MAX_ELEMENTS)) { 754 /* Copy elements into kernel space */ 755 struct vchiq_element elements[MAX_ELEMENTS]; 756 757 if (copy_from_user(elements, args.elements, 758 args.count * sizeof(struct vchiq_element)) == 0) 759 status = vchiq_ioc_queue_message 760 (args.handle, 761 elements, args.count); 762 else 763 ret = -EFAULT; 764 } else { 765 ret = -EINVAL; 766 } 767 } break; 768 769 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT: 770 case VCHIQ_IOC_QUEUE_BULK_RECEIVE: { 771 VCHIQ_QUEUE_BULK_TRANSFER_T args; 772 struct bulk_waiter_node *waiter = NULL; 773 774 VCHIQ_BULK_DIR_T dir = 775 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ? 776 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE; 777 778 if (copy_from_user 779 (&args, (const void __user *)arg, 780 sizeof(args)) != 0) { 781 ret = -EFAULT; 782 break; 783 } 784 785 service = find_service_for_instance(instance, args.handle); 786 if (!service) { 787 ret = -EINVAL; 788 break; 789 } 790 791 if (args.mode == VCHIQ_BULK_MODE_BLOCKING) { 792 waiter = kzalloc(sizeof(struct bulk_waiter_node), 793 GFP_KERNEL); 794 if (!waiter) { 795 ret = -ENOMEM; 796 break; 797 } 798 args.userdata = &waiter->bulk_waiter; 799 } else if (args.mode == VCHIQ_BULK_MODE_WAITING) { 800 struct list_head *pos; 801 802 mutex_lock(&instance->bulk_waiter_list_mutex); 803 list_for_each(pos, &instance->bulk_waiter_list) { 804 if (list_entry(pos, struct bulk_waiter_node, 805 list)->pid == current->pid) { 806 waiter = list_entry(pos, 807 struct bulk_waiter_node, 808 list); 809 list_del(pos); 810 break; 811 } 812 813 } 814 mutex_unlock(&instance->bulk_waiter_list_mutex); 815 if (!waiter) { 816 vchiq_log_error(vchiq_arm_log_level, 817 "no bulk_waiter found for pid %d", 818 current->pid); 819 ret = -ESRCH; 820 break; 821 } 822 vchiq_log_info(vchiq_arm_log_level, 823 "found bulk_waiter %pK for pid %d", waiter, 824 current->pid); 825 args.userdata = &waiter->bulk_waiter; 826 } 827 status = vchiq_bulk_transfer 828 (args.handle, 829 VCHI_MEM_HANDLE_INVALID, 830 args.data, args.size, 831 args.userdata, args.mode, 832 dir); 833 if (!waiter) 834 break; 835 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || 836 !waiter->bulk_waiter.bulk) { 837 if (waiter->bulk_waiter.bulk) { 838 /* Cancel the signal when the transfer 839 ** completes. */ 840 spin_lock(&bulk_waiter_spinlock); 841 waiter->bulk_waiter.bulk->userdata = NULL; 842 spin_unlock(&bulk_waiter_spinlock); 843 } 844 kfree(waiter); 845 } else { 846 const VCHIQ_BULK_MODE_T mode_waiting = 847 VCHIQ_BULK_MODE_WAITING; 848 waiter->pid = current->pid; 849 mutex_lock(&instance->bulk_waiter_list_mutex); 850 list_add(&waiter->list, &instance->bulk_waiter_list); 851 mutex_unlock(&instance->bulk_waiter_list_mutex); 852 vchiq_log_info(vchiq_arm_log_level, 853 "saved bulk_waiter %pK for pid %d", 854 waiter, current->pid); 855 856 if (copy_to_user((void __user *) 857 &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *) 858 arg)->mode), 859 (const void *)&mode_waiting, 860 sizeof(mode_waiting)) != 0) 861 ret = -EFAULT; 862 } 863 } break; 864 865 case VCHIQ_IOC_AWAIT_COMPLETION: { 866 VCHIQ_AWAIT_COMPLETION_T args; 867 868 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 869 if (!instance->connected) { 870 ret = -ENOTCONN; 871 break; 872 } 873 874 if (copy_from_user(&args, (const void __user *)arg, 875 sizeof(args)) != 0) { 876 ret = -EFAULT; 877 break; 878 } 879 880 mutex_lock(&instance->completion_mutex); 881 882 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 883 while ((instance->completion_remove == 884 instance->completion_insert) 885 && !instance->closing) { 886 int rc; 887 888 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 889 mutex_unlock(&instance->completion_mutex); 890 rc = down_interruptible(&instance->insert_event); 891 mutex_lock(&instance->completion_mutex); 892 if (rc != 0) { 893 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 894 vchiq_log_info(vchiq_arm_log_level, 895 "AWAIT_COMPLETION interrupted"); 896 ret = -EINTR; 897 break; 898 } 899 } 900 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 901 902 if (ret == 0) { 903 int msgbufcount = args.msgbufcount; 904 int remove = instance->completion_remove; 905 906 for (ret = 0; ret < args.count; ret++) { 907 VCHIQ_COMPLETION_DATA_T *completion; 908 VCHIQ_SERVICE_T *service; 909 USER_SERVICE_T *user_service; 910 VCHIQ_HEADER_T *header; 911 912 if (remove == instance->completion_insert) 913 break; 914 915 completion = &instance->completions[ 916 remove & (MAX_COMPLETIONS - 1)]; 917 918 /* 919 * A read memory barrier is needed to stop 920 * prefetch of a stale completion record 921 */ 922 rmb(); 923 924 service = completion->service_userdata; 925 user_service = service->base.userdata; 926 completion->service_userdata = 927 user_service->userdata; 928 929 header = completion->header; 930 if (header) { 931 void __user *msgbuf; 932 int msglen; 933 934 msglen = header->size + 935 sizeof(VCHIQ_HEADER_T); 936 /* This must be a VCHIQ-style service */ 937 if (args.msgbufsize < msglen) { 938 vchiq_log_error( 939 vchiq_arm_log_level, 940 "header %pK: msgbufsize %x < msglen %x", 941 header, args.msgbufsize, 942 msglen); 943 WARN(1, "invalid message " 944 "size\n"); 945 if (ret == 0) 946 ret = -EMSGSIZE; 947 break; 948 } 949 if (msgbufcount <= 0) 950 /* Stall here for lack of a 951 ** buffer for the message. */ 952 break; 953 /* Get the pointer from user space */ 954 msgbufcount--; 955 if (copy_from_user(&msgbuf, 956 (const void __user *) 957 &args.msgbufs[msgbufcount], 958 sizeof(msgbuf)) != 0) { 959 if (ret == 0) 960 ret = -EFAULT; 961 break; 962 } 963 964 /* Copy the message to user space */ 965 if (copy_to_user(msgbuf, header, 966 msglen) != 0) { 967 if (ret == 0) 968 ret = -EFAULT; 969 break; 970 } 971 972 /* Now it has been copied, the message 973 ** can be released. */ 974 vchiq_release_message(service->handle, 975 header); 976 977 /* The completion must point to the 978 ** msgbuf. */ 979 completion->header = msgbuf; 980 } 981 982 if ((completion->reason == 983 VCHIQ_SERVICE_CLOSED) && 984 !instance->use_close_delivered) 985 unlock_service(service); 986 987 if (copy_to_user((void __user *)( 988 (size_t)args.buf + 989 ret * sizeof(VCHIQ_COMPLETION_DATA_T)), 990 completion, 991 sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) { 992 if (ret == 0) 993 ret = -EFAULT; 994 break; 995 } 996 997 /* 998 * Ensure that the above copy has completed 999 * before advancing the remove pointer. 1000 */ 1001 mb(); 1002 remove++; 1003 instance->completion_remove = remove; 1004 } 1005 1006 if (msgbufcount != args.msgbufcount) { 1007 if (copy_to_user((void __user *) 1008 &((VCHIQ_AWAIT_COMPLETION_T *)arg)-> 1009 msgbufcount, 1010 &msgbufcount, 1011 sizeof(msgbufcount)) != 0) { 1012 ret = -EFAULT; 1013 } 1014 } 1015 } 1016 1017 if (ret != 0) 1018 up(&instance->remove_event); 1019 mutex_unlock(&instance->completion_mutex); 1020 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 1021 } break; 1022 1023 case VCHIQ_IOC_DEQUEUE_MESSAGE: { 1024 VCHIQ_DEQUEUE_MESSAGE_T args; 1025 USER_SERVICE_T *user_service; 1026 VCHIQ_HEADER_T *header; 1027 1028 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 1029 if (copy_from_user 1030 (&args, (const void __user *)arg, 1031 sizeof(args)) != 0) { 1032 ret = -EFAULT; 1033 break; 1034 } 1035 service = find_service_for_instance(instance, args.handle); 1036 if (!service) { 1037 ret = -EINVAL; 1038 break; 1039 } 1040 user_service = (USER_SERVICE_T *)service->base.userdata; 1041 if (user_service->is_vchi == 0) { 1042 ret = -EINVAL; 1043 break; 1044 } 1045 1046 spin_lock(&msg_queue_spinlock); 1047 if (user_service->msg_remove == user_service->msg_insert) { 1048 if (!args.blocking) { 1049 spin_unlock(&msg_queue_spinlock); 1050 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 1051 ret = -EWOULDBLOCK; 1052 break; 1053 } 1054 user_service->dequeue_pending = 1; 1055 do { 1056 spin_unlock(&msg_queue_spinlock); 1057 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 1058 if (down_interruptible( 1059 &user_service->insert_event) != 0) { 1060 vchiq_log_info(vchiq_arm_log_level, 1061 "DEQUEUE_MESSAGE interrupted"); 1062 ret = -EINTR; 1063 break; 1064 } 1065 spin_lock(&msg_queue_spinlock); 1066 } while (user_service->msg_remove == 1067 user_service->msg_insert); 1068 1069 if (ret) 1070 break; 1071 } 1072 1073 BUG_ON((int)(user_service->msg_insert - 1074 user_service->msg_remove) < 0); 1075 1076 header = user_service->msg_queue[user_service->msg_remove & 1077 (MSG_QUEUE_SIZE - 1)]; 1078 user_service->msg_remove++; 1079 spin_unlock(&msg_queue_spinlock); 1080 1081 up(&user_service->remove_event); 1082 if (header == NULL) 1083 ret = -ENOTCONN; 1084 else if (header->size <= args.bufsize) { 1085 /* Copy to user space if msgbuf is not NULL */ 1086 if ((args.buf == NULL) || 1087 (copy_to_user((void __user *)args.buf, 1088 header->data, 1089 header->size) == 0)) { 1090 ret = header->size; 1091 vchiq_release_message( 1092 service->handle, 1093 header); 1094 } else 1095 ret = -EFAULT; 1096 } else { 1097 vchiq_log_error(vchiq_arm_log_level, 1098 "header %pK: bufsize %x < size %x", 1099 header, args.bufsize, header->size); 1100 WARN(1, "invalid size\n"); 1101 ret = -EMSGSIZE; 1102 } 1103 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 1104 } break; 1105 1106 case VCHIQ_IOC_GET_CLIENT_ID: { 1107 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg; 1108 1109 ret = vchiq_get_client_id(handle); 1110 } break; 1111 1112 case VCHIQ_IOC_GET_CONFIG: { 1113 VCHIQ_GET_CONFIG_T args; 1114 VCHIQ_CONFIG_T config; 1115 1116 if (copy_from_user(&args, (const void __user *)arg, 1117 sizeof(args)) != 0) { 1118 ret = -EFAULT; 1119 break; 1120 } 1121 if (args.config_size > sizeof(config)) { 1122 ret = -EINVAL; 1123 break; 1124 } 1125 status = vchiq_get_config(instance, args.config_size, &config); 1126 if (status == VCHIQ_SUCCESS) { 1127 if (copy_to_user((void __user *)args.pconfig, 1128 &config, args.config_size) != 0) { 1129 ret = -EFAULT; 1130 break; 1131 } 1132 } 1133 } break; 1134 1135 case VCHIQ_IOC_SET_SERVICE_OPTION: { 1136 VCHIQ_SET_SERVICE_OPTION_T args; 1137 1138 if (copy_from_user( 1139 &args, (const void __user *)arg, 1140 sizeof(args)) != 0) { 1141 ret = -EFAULT; 1142 break; 1143 } 1144 1145 service = find_service_for_instance(instance, args.handle); 1146 if (!service) { 1147 ret = -EINVAL; 1148 break; 1149 } 1150 1151 status = vchiq_set_service_option( 1152 args.handle, args.option, args.value); 1153 } break; 1154 1155 case VCHIQ_IOC_LIB_VERSION: { 1156 unsigned int lib_version = (unsigned int)arg; 1157 1158 if (lib_version < VCHIQ_VERSION_MIN) 1159 ret = -EINVAL; 1160 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED) 1161 instance->use_close_delivered = 1; 1162 } break; 1163 1164 case VCHIQ_IOC_CLOSE_DELIVERED: { 1165 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg; 1166 1167 service = find_closed_service_for_instance(instance, handle); 1168 if (service != NULL) { 1169 USER_SERVICE_T *user_service = 1170 (USER_SERVICE_T *)service->base.userdata; 1171 close_delivered(user_service); 1172 } else 1173 ret = -EINVAL; 1174 } break; 1175 1176 default: 1177 ret = -ENOTTY; 1178 break; 1179 } 1180 1181 if (service) 1182 unlock_service(service); 1183 1184 if (ret == 0) { 1185 if (status == VCHIQ_ERROR) 1186 ret = -EIO; 1187 else if (status == VCHIQ_RETRY) 1188 ret = -EINTR; 1189 } 1190 1191 if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) && 1192 (ret != -EWOULDBLOCK)) 1193 vchiq_log_info(vchiq_arm_log_level, 1194 " ioctl instance %lx, cmd %s -> status %d, %ld", 1195 (unsigned long)instance, 1196 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ? 1197 ioctl_names[_IOC_NR(cmd)] : 1198 "<invalid>", 1199 status, ret); 1200 else 1201 vchiq_log_trace(vchiq_arm_log_level, 1202 " ioctl instance %lx, cmd %s -> status %d, %ld", 1203 (unsigned long)instance, 1204 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ? 1205 ioctl_names[_IOC_NR(cmd)] : 1206 "<invalid>", 1207 status, ret); 1208 1209 return ret; 1210 } 1211 1212 #if defined(CONFIG_COMPAT) 1213 1214 struct vchiq_service_params32 { 1215 int fourcc; 1216 compat_uptr_t callback; 1217 compat_uptr_t userdata; 1218 short version; /* Increment for non-trivial changes */ 1219 short version_min; /* Update for incompatible changes */ 1220 }; 1221 1222 struct vchiq_create_service32 { 1223 struct vchiq_service_params32 params; 1224 int is_open; 1225 int is_vchi; 1226 unsigned int handle; /* OUT */ 1227 }; 1228 1229 #define VCHIQ_IOC_CREATE_SERVICE32 \ 1230 _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32) 1231 1232 static long 1233 vchiq_compat_ioctl_create_service( 1234 struct file *file, 1235 unsigned int cmd, 1236 unsigned long arg) 1237 { 1238 VCHIQ_CREATE_SERVICE_T __user *args; 1239 struct vchiq_create_service32 __user *ptrargs32 = 1240 (struct vchiq_create_service32 __user *)arg; 1241 struct vchiq_create_service32 args32; 1242 long ret; 1243 1244 args = compat_alloc_user_space(sizeof(*args)); 1245 if (!args) 1246 return -EFAULT; 1247 1248 if (copy_from_user(&args32, 1249 (struct vchiq_create_service32 __user *)arg, 1250 sizeof(args32))) 1251 return -EFAULT; 1252 1253 if (put_user(args32.params.fourcc, &args->params.fourcc) || 1254 put_user(compat_ptr(args32.params.callback), 1255 &args->params.callback) || 1256 put_user(compat_ptr(args32.params.userdata), 1257 &args->params.userdata) || 1258 put_user(args32.params.version, &args->params.version) || 1259 put_user(args32.params.version_min, 1260 &args->params.version_min) || 1261 put_user(args32.is_open, &args->is_open) || 1262 put_user(args32.is_vchi, &args->is_vchi) || 1263 put_user(args32.handle, &args->handle)) 1264 return -EFAULT; 1265 1266 ret = vchiq_ioctl(file, VCHIQ_IOC_CREATE_SERVICE, (unsigned long)args); 1267 1268 if (ret < 0) 1269 return ret; 1270 1271 if (get_user(args32.handle, &args->handle)) 1272 return -EFAULT; 1273 1274 if (copy_to_user(&ptrargs32->handle, 1275 &args32.handle, 1276 sizeof(args32.handle))) 1277 return -EFAULT; 1278 1279 return 0; 1280 } 1281 1282 struct vchiq_element32 { 1283 compat_uptr_t data; 1284 unsigned int size; 1285 }; 1286 1287 struct vchiq_queue_message32 { 1288 unsigned int handle; 1289 unsigned int count; 1290 compat_uptr_t elements; 1291 }; 1292 1293 #define VCHIQ_IOC_QUEUE_MESSAGE32 \ 1294 _IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message32) 1295 1296 static long 1297 vchiq_compat_ioctl_queue_message(struct file *file, 1298 unsigned int cmd, 1299 unsigned long arg) 1300 { 1301 VCHIQ_QUEUE_MESSAGE_T *args; 1302 struct vchiq_element *elements; 1303 struct vchiq_queue_message32 args32; 1304 unsigned int count; 1305 1306 if (copy_from_user(&args32, 1307 (struct vchiq_queue_message32 __user *)arg, 1308 sizeof(args32))) 1309 return -EFAULT; 1310 1311 args = compat_alloc_user_space(sizeof(*args) + 1312 (sizeof(*elements) * MAX_ELEMENTS)); 1313 1314 if (!args) 1315 return -EFAULT; 1316 1317 if (put_user(args32.handle, &args->handle) || 1318 put_user(args32.count, &args->count) || 1319 put_user(compat_ptr(args32.elements), &args->elements)) 1320 return -EFAULT; 1321 1322 if (args32.count > MAX_ELEMENTS) 1323 return -EINVAL; 1324 1325 if (args32.elements && args32.count) { 1326 struct vchiq_element32 tempelement32[MAX_ELEMENTS]; 1327 1328 elements = (struct vchiq_element __user *)(args + 1); 1329 1330 if (copy_from_user(&tempelement32, 1331 compat_ptr(args32.elements), 1332 sizeof(tempelement32))) 1333 return -EFAULT; 1334 1335 for (count = 0; count < args32.count; count++) { 1336 if (put_user(compat_ptr(tempelement32[count].data), 1337 &elements[count].data) || 1338 put_user(tempelement32[count].size, 1339 &elements[count].size)) 1340 return -EFAULT; 1341 } 1342 1343 if (put_user(elements, &args->elements)) 1344 return -EFAULT; 1345 } 1346 1347 return vchiq_ioctl(file, VCHIQ_IOC_QUEUE_MESSAGE, (unsigned long)args); 1348 } 1349 1350 struct vchiq_queue_bulk_transfer32 { 1351 unsigned int handle; 1352 compat_uptr_t data; 1353 unsigned int size; 1354 compat_uptr_t userdata; 1355 VCHIQ_BULK_MODE_T mode; 1356 }; 1357 1358 #define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \ 1359 _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32) 1360 #define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \ 1361 _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32) 1362 1363 static long 1364 vchiq_compat_ioctl_queue_bulk(struct file *file, 1365 unsigned int cmd, 1366 unsigned long arg) 1367 { 1368 VCHIQ_QUEUE_BULK_TRANSFER_T *args; 1369 struct vchiq_queue_bulk_transfer32 args32; 1370 struct vchiq_queue_bulk_transfer32 *ptrargs32 = 1371 (struct vchiq_queue_bulk_transfer32 *)arg; 1372 long ret; 1373 1374 args = compat_alloc_user_space(sizeof(*args)); 1375 if (!args) 1376 return -EFAULT; 1377 1378 if (copy_from_user(&args32, 1379 (struct vchiq_queue_bulk_transfer32 __user *)arg, 1380 sizeof(args32))) 1381 return -EFAULT; 1382 1383 if (put_user(args32.handle, &args->handle) || 1384 put_user(compat_ptr(args32.data), &args->data) || 1385 put_user(args32.size, &args->size) || 1386 put_user(compat_ptr(args32.userdata), &args->userdata) || 1387 put_user(args32.mode, &args->mode)) 1388 return -EFAULT; 1389 1390 if (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) 1391 cmd = VCHIQ_IOC_QUEUE_BULK_TRANSMIT; 1392 else 1393 cmd = VCHIQ_IOC_QUEUE_BULK_RECEIVE; 1394 1395 ret = vchiq_ioctl(file, cmd, (unsigned long)args); 1396 1397 if (ret < 0) 1398 return ret; 1399 1400 if (get_user(args32.mode, &args->mode)) 1401 return -EFAULT; 1402 1403 if (copy_to_user(&ptrargs32->mode, 1404 &args32.mode, 1405 sizeof(args32.mode))) 1406 return -EFAULT; 1407 1408 return 0; 1409 } 1410 1411 struct vchiq_completion_data32 { 1412 VCHIQ_REASON_T reason; 1413 compat_uptr_t header; 1414 compat_uptr_t service_userdata; 1415 compat_uptr_t bulk_userdata; 1416 }; 1417 1418 struct vchiq_await_completion32 { 1419 unsigned int count; 1420 compat_uptr_t buf; 1421 unsigned int msgbufsize; 1422 unsigned int msgbufcount; /* IN/OUT */ 1423 compat_uptr_t msgbufs; 1424 }; 1425 1426 #define VCHIQ_IOC_AWAIT_COMPLETION32 \ 1427 _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32) 1428 1429 static long 1430 vchiq_compat_ioctl_await_completion(struct file *file, 1431 unsigned int cmd, 1432 unsigned long arg) 1433 { 1434 VCHIQ_AWAIT_COMPLETION_T *args; 1435 VCHIQ_COMPLETION_DATA_T *completion; 1436 VCHIQ_COMPLETION_DATA_T completiontemp; 1437 struct vchiq_await_completion32 args32; 1438 struct vchiq_completion_data32 completion32; 1439 unsigned int *msgbufcount32; 1440 compat_uptr_t msgbuf32; 1441 void *msgbuf; 1442 void **msgbufptr; 1443 long ret; 1444 1445 args = compat_alloc_user_space(sizeof(*args) + 1446 sizeof(*completion) + 1447 sizeof(*msgbufptr)); 1448 if (!args) 1449 return -EFAULT; 1450 1451 completion = (VCHIQ_COMPLETION_DATA_T *)(args + 1); 1452 msgbufptr = (void __user **)(completion + 1); 1453 1454 if (copy_from_user(&args32, 1455 (struct vchiq_completion_data32 *)arg, 1456 sizeof(args32))) 1457 return -EFAULT; 1458 1459 if (put_user(args32.count, &args->count) || 1460 put_user(compat_ptr(args32.buf), &args->buf) || 1461 put_user(args32.msgbufsize, &args->msgbufsize) || 1462 put_user(args32.msgbufcount, &args->msgbufcount) || 1463 put_user(compat_ptr(args32.msgbufs), &args->msgbufs)) 1464 return -EFAULT; 1465 1466 /* These are simple cases, so just fall into the native handler */ 1467 if (!args32.count || !args32.buf || !args32.msgbufcount) 1468 return vchiq_ioctl(file, 1469 VCHIQ_IOC_AWAIT_COMPLETION, 1470 (unsigned long)args); 1471 1472 /* 1473 * These are the more complex cases. Typical applications of this 1474 * ioctl will use a very large count, with a very large msgbufcount. 1475 * Since the native ioctl can asynchronously fill in the returned 1476 * buffers and the application can in theory begin processing messages 1477 * even before the ioctl returns, a bit of a trick is used here. 1478 * 1479 * By forcing both count and msgbufcount to be 1, it forces the native 1480 * ioctl to only claim at most 1 message is available. This tricks 1481 * the calling application into thinking only 1 message was actually 1482 * available in the queue so like all good applications it will retry 1483 * waiting until all the required messages are received. 1484 * 1485 * This trick has been tested and proven to work with vchiq_test, 1486 * Minecraft_PI, the "hello pi" examples, and various other 1487 * applications that are included in Raspbian. 1488 */ 1489 1490 if (copy_from_user(&msgbuf32, 1491 compat_ptr(args32.msgbufs) + 1492 (sizeof(compat_uptr_t) * 1493 (args32.msgbufcount - 1)), 1494 sizeof(msgbuf32))) 1495 return -EFAULT; 1496 1497 msgbuf = compat_ptr(msgbuf32); 1498 1499 if (copy_to_user(msgbufptr, 1500 &msgbuf, 1501 sizeof(msgbuf))) 1502 return -EFAULT; 1503 1504 if (copy_to_user(&args->msgbufs, 1505 &msgbufptr, 1506 sizeof(msgbufptr))) 1507 return -EFAULT; 1508 1509 if (put_user(1U, &args->count) || 1510 put_user(completion, &args->buf) || 1511 put_user(1U, &args->msgbufcount)) 1512 return -EFAULT; 1513 1514 ret = vchiq_ioctl(file, 1515 VCHIQ_IOC_AWAIT_COMPLETION, 1516 (unsigned long)args); 1517 1518 /* 1519 * An return value of 0 here means that no messages where available 1520 * in the message queue. In this case the native ioctl does not 1521 * return any data to the application at all. Not even to update 1522 * msgbufcount. This functionality needs to be kept here for 1523 * compatibility. 1524 * 1525 * Of course, < 0 means that an error occurred and no data is being 1526 * returned. 1527 * 1528 * Since count and msgbufcount was forced to 1, that means 1529 * the only other possible return value is 1. Meaning that 1 message 1530 * was available, so that multiple message case does not need to be 1531 * handled here. 1532 */ 1533 if (ret <= 0) 1534 return ret; 1535 1536 if (copy_from_user(&completiontemp, completion, sizeof(*completion))) 1537 return -EFAULT; 1538 1539 completion32.reason = completiontemp.reason; 1540 completion32.header = ptr_to_compat(completiontemp.header); 1541 completion32.service_userdata = 1542 ptr_to_compat(completiontemp.service_userdata); 1543 completion32.bulk_userdata = 1544 ptr_to_compat(completiontemp.bulk_userdata); 1545 1546 if (copy_to_user(compat_ptr(args32.buf), 1547 &completion32, 1548 sizeof(completion32))) 1549 return -EFAULT; 1550 1551 args32.msgbufcount--; 1552 1553 msgbufcount32 = 1554 &((struct vchiq_await_completion32 __user *)arg)->msgbufcount; 1555 1556 if (copy_to_user(msgbufcount32, 1557 &args32.msgbufcount, 1558 sizeof(args32.msgbufcount))) 1559 return -EFAULT; 1560 1561 return 1; 1562 } 1563 1564 struct vchiq_dequeue_message32 { 1565 unsigned int handle; 1566 int blocking; 1567 unsigned int bufsize; 1568 compat_uptr_t buf; 1569 }; 1570 1571 #define VCHIQ_IOC_DEQUEUE_MESSAGE32 \ 1572 _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32) 1573 1574 static long 1575 vchiq_compat_ioctl_dequeue_message(struct file *file, 1576 unsigned int cmd, 1577 unsigned long arg) 1578 { 1579 VCHIQ_DEQUEUE_MESSAGE_T *args; 1580 struct vchiq_dequeue_message32 args32; 1581 1582 args = compat_alloc_user_space(sizeof(*args)); 1583 if (!args) 1584 return -EFAULT; 1585 1586 if (copy_from_user(&args32, 1587 (struct vchiq_dequeue_message32 *)arg, 1588 sizeof(args32))) 1589 return -EFAULT; 1590 1591 if (put_user(args32.handle, &args->handle) || 1592 put_user(args32.blocking, &args->blocking) || 1593 put_user(args32.bufsize, &args->bufsize) || 1594 put_user(compat_ptr(args32.buf), &args->buf)) 1595 return -EFAULT; 1596 1597 return vchiq_ioctl(file, VCHIQ_IOC_DEQUEUE_MESSAGE, 1598 (unsigned long)args); 1599 } 1600 1601 struct vchiq_get_config32 { 1602 unsigned int config_size; 1603 compat_uptr_t pconfig; 1604 }; 1605 1606 #define VCHIQ_IOC_GET_CONFIG32 \ 1607 _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32) 1608 1609 static long 1610 vchiq_compat_ioctl_get_config(struct file *file, 1611 unsigned int cmd, 1612 unsigned long arg) 1613 { 1614 VCHIQ_GET_CONFIG_T *args; 1615 struct vchiq_get_config32 args32; 1616 1617 args = compat_alloc_user_space(sizeof(*args)); 1618 if (!args) 1619 return -EFAULT; 1620 1621 if (copy_from_user(&args32, 1622 (struct vchiq_get_config32 *)arg, 1623 sizeof(args32))) 1624 return -EFAULT; 1625 1626 if (put_user(args32.config_size, &args->config_size) || 1627 put_user(compat_ptr(args32.pconfig), &args->pconfig)) 1628 return -EFAULT; 1629 1630 return vchiq_ioctl(file, VCHIQ_IOC_GET_CONFIG, (unsigned long)args); 1631 } 1632 1633 static long 1634 vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1635 { 1636 switch (cmd) { 1637 case VCHIQ_IOC_CREATE_SERVICE32: 1638 return vchiq_compat_ioctl_create_service(file, cmd, arg); 1639 case VCHIQ_IOC_QUEUE_MESSAGE32: 1640 return vchiq_compat_ioctl_queue_message(file, cmd, arg); 1641 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32: 1642 case VCHIQ_IOC_QUEUE_BULK_RECEIVE32: 1643 return vchiq_compat_ioctl_queue_bulk(file, cmd, arg); 1644 case VCHIQ_IOC_AWAIT_COMPLETION32: 1645 return vchiq_compat_ioctl_await_completion(file, cmd, arg); 1646 case VCHIQ_IOC_DEQUEUE_MESSAGE32: 1647 return vchiq_compat_ioctl_dequeue_message(file, cmd, arg); 1648 case VCHIQ_IOC_GET_CONFIG32: 1649 return vchiq_compat_ioctl_get_config(file, cmd, arg); 1650 default: 1651 return vchiq_ioctl(file, cmd, arg); 1652 } 1653 } 1654 1655 #endif 1656 1657 /**************************************************************************** 1658 * 1659 * vchiq_open 1660 * 1661 ***************************************************************************/ 1662 1663 static int 1664 vchiq_open(struct inode *inode, struct file *file) 1665 { 1666 int dev = iminor(inode) & 0x0f; 1667 1668 vchiq_log_info(vchiq_arm_log_level, "vchiq_open"); 1669 switch (dev) { 1670 case VCHIQ_MINOR: { 1671 int ret; 1672 VCHIQ_STATE_T *state = vchiq_get_state(); 1673 VCHIQ_INSTANCE_T instance; 1674 1675 if (!state) { 1676 vchiq_log_error(vchiq_arm_log_level, 1677 "vchiq has no connection to VideoCore"); 1678 return -ENOTCONN; 1679 } 1680 1681 instance = kzalloc(sizeof(*instance), GFP_KERNEL); 1682 if (!instance) 1683 return -ENOMEM; 1684 1685 instance->state = state; 1686 instance->pid = current->tgid; 1687 1688 ret = vchiq_debugfs_add_instance(instance); 1689 if (ret != 0) { 1690 kfree(instance); 1691 return ret; 1692 } 1693 1694 sema_init(&instance->insert_event, 0); 1695 sema_init(&instance->remove_event, 0); 1696 mutex_init(&instance->completion_mutex); 1697 mutex_init(&instance->bulk_waiter_list_mutex); 1698 INIT_LIST_HEAD(&instance->bulk_waiter_list); 1699 1700 file->private_data = instance; 1701 } break; 1702 1703 default: 1704 vchiq_log_error(vchiq_arm_log_level, 1705 "Unknown minor device: %d", dev); 1706 return -ENXIO; 1707 } 1708 1709 return 0; 1710 } 1711 1712 /**************************************************************************** 1713 * 1714 * vchiq_release 1715 * 1716 ***************************************************************************/ 1717 1718 static int 1719 vchiq_release(struct inode *inode, struct file *file) 1720 { 1721 int dev = iminor(inode) & 0x0f; 1722 int ret = 0; 1723 1724 switch (dev) { 1725 case VCHIQ_MINOR: { 1726 VCHIQ_INSTANCE_T instance = file->private_data; 1727 VCHIQ_STATE_T *state = vchiq_get_state(); 1728 VCHIQ_SERVICE_T *service; 1729 int i; 1730 1731 vchiq_log_info(vchiq_arm_log_level, 1732 "vchiq_release: instance=%lx", 1733 (unsigned long)instance); 1734 1735 if (!state) { 1736 ret = -EPERM; 1737 goto out; 1738 } 1739 1740 /* Ensure videocore is awake to allow termination. */ 1741 vchiq_use_internal(instance->state, NULL, 1742 USE_TYPE_VCHIQ); 1743 1744 mutex_lock(&instance->completion_mutex); 1745 1746 /* Wake the completion thread and ask it to exit */ 1747 instance->closing = 1; 1748 up(&instance->insert_event); 1749 1750 mutex_unlock(&instance->completion_mutex); 1751 1752 /* Wake the slot handler if the completion queue is full. */ 1753 up(&instance->remove_event); 1754 1755 /* Mark all services for termination... */ 1756 i = 0; 1757 while ((service = next_service_by_instance(state, instance, 1758 &i)) != NULL) { 1759 USER_SERVICE_T *user_service = service->base.userdata; 1760 1761 /* Wake the slot handler if the msg queue is full. */ 1762 up(&user_service->remove_event); 1763 1764 vchiq_terminate_service_internal(service); 1765 unlock_service(service); 1766 } 1767 1768 /* ...and wait for them to die */ 1769 i = 0; 1770 while ((service = next_service_by_instance(state, instance, &i)) 1771 != NULL) { 1772 USER_SERVICE_T *user_service = service->base.userdata; 1773 1774 down(&service->remove_event); 1775 1776 BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE); 1777 1778 spin_lock(&msg_queue_spinlock); 1779 1780 while (user_service->msg_remove != 1781 user_service->msg_insert) { 1782 VCHIQ_HEADER_T *header = user_service-> 1783 msg_queue[user_service->msg_remove & 1784 (MSG_QUEUE_SIZE - 1)]; 1785 user_service->msg_remove++; 1786 spin_unlock(&msg_queue_spinlock); 1787 1788 if (header) 1789 vchiq_release_message( 1790 service->handle, 1791 header); 1792 spin_lock(&msg_queue_spinlock); 1793 } 1794 1795 spin_unlock(&msg_queue_spinlock); 1796 1797 unlock_service(service); 1798 } 1799 1800 /* Release any closed services */ 1801 while (instance->completion_remove != 1802 instance->completion_insert) { 1803 VCHIQ_COMPLETION_DATA_T *completion; 1804 VCHIQ_SERVICE_T *service; 1805 1806 completion = &instance->completions[ 1807 instance->completion_remove & 1808 (MAX_COMPLETIONS - 1)]; 1809 service = completion->service_userdata; 1810 if (completion->reason == VCHIQ_SERVICE_CLOSED) { 1811 USER_SERVICE_T *user_service = 1812 service->base.userdata; 1813 1814 /* Wake any blocked user-thread */ 1815 if (instance->use_close_delivered) 1816 up(&user_service->close_event); 1817 unlock_service(service); 1818 } 1819 instance->completion_remove++; 1820 } 1821 1822 /* Release the PEER service count. */ 1823 vchiq_release_internal(instance->state, NULL); 1824 1825 { 1826 struct list_head *pos, *next; 1827 1828 list_for_each_safe(pos, next, 1829 &instance->bulk_waiter_list) { 1830 struct bulk_waiter_node *waiter; 1831 1832 waiter = list_entry(pos, 1833 struct bulk_waiter_node, 1834 list); 1835 list_del(pos); 1836 vchiq_log_info(vchiq_arm_log_level, 1837 "bulk_waiter - cleaned up %pK for pid %d", 1838 waiter, waiter->pid); 1839 kfree(waiter); 1840 } 1841 } 1842 1843 vchiq_debugfs_remove_instance(instance); 1844 1845 kfree(instance); 1846 file->private_data = NULL; 1847 } break; 1848 1849 default: 1850 vchiq_log_error(vchiq_arm_log_level, 1851 "Unknown minor device: %d", dev); 1852 ret = -ENXIO; 1853 } 1854 1855 out: 1856 return ret; 1857 } 1858 1859 /**************************************************************************** 1860 * 1861 * vchiq_dump 1862 * 1863 ***************************************************************************/ 1864 1865 void 1866 vchiq_dump(void *dump_context, const char *str, int len) 1867 { 1868 DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context; 1869 1870 if (context->actual < context->space) { 1871 int copy_bytes; 1872 1873 if (context->offset > 0) { 1874 int skip_bytes = min(len, (int)context->offset); 1875 1876 str += skip_bytes; 1877 len -= skip_bytes; 1878 context->offset -= skip_bytes; 1879 if (context->offset > 0) 1880 return; 1881 } 1882 copy_bytes = min(len, (int)(context->space - context->actual)); 1883 if (copy_bytes == 0) 1884 return; 1885 if (copy_to_user(context->buf + context->actual, str, 1886 copy_bytes)) 1887 context->actual = -EFAULT; 1888 context->actual += copy_bytes; 1889 len -= copy_bytes; 1890 1891 /* If tne terminating NUL is included in the length, then it 1892 ** marks the end of a line and should be replaced with a 1893 ** carriage return. */ 1894 if ((len == 0) && (str[copy_bytes - 1] == '\0')) { 1895 char cr = '\n'; 1896 1897 if (copy_to_user(context->buf + context->actual - 1, 1898 &cr, 1)) 1899 context->actual = -EFAULT; 1900 } 1901 } 1902 } 1903 1904 /**************************************************************************** 1905 * 1906 * vchiq_dump_platform_instance_state 1907 * 1908 ***************************************************************************/ 1909 1910 void 1911 vchiq_dump_platform_instances(void *dump_context) 1912 { 1913 VCHIQ_STATE_T *state = vchiq_get_state(); 1914 char buf[80]; 1915 int len; 1916 int i; 1917 1918 /* There is no list of instances, so instead scan all services, 1919 marking those that have been dumped. */ 1920 1921 for (i = 0; i < state->unused_service; i++) { 1922 VCHIQ_SERVICE_T *service = state->services[i]; 1923 VCHIQ_INSTANCE_T instance; 1924 1925 if (service && (service->base.callback == service_callback)) { 1926 instance = service->instance; 1927 if (instance) 1928 instance->mark = 0; 1929 } 1930 } 1931 1932 for (i = 0; i < state->unused_service; i++) { 1933 VCHIQ_SERVICE_T *service = state->services[i]; 1934 VCHIQ_INSTANCE_T instance; 1935 1936 if (service && (service->base.callback == service_callback)) { 1937 instance = service->instance; 1938 if (instance && !instance->mark) { 1939 len = snprintf(buf, sizeof(buf), 1940 "Instance %pK: pid %d,%s completions %d/%d", 1941 instance, instance->pid, 1942 instance->connected ? " connected, " : 1943 "", 1944 instance->completion_insert - 1945 instance->completion_remove, 1946 MAX_COMPLETIONS); 1947 1948 vchiq_dump(dump_context, buf, len + 1); 1949 1950 instance->mark = 1; 1951 } 1952 } 1953 } 1954 } 1955 1956 /**************************************************************************** 1957 * 1958 * vchiq_dump_platform_service_state 1959 * 1960 ***************************************************************************/ 1961 1962 void 1963 vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service) 1964 { 1965 USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata; 1966 char buf[80]; 1967 int len; 1968 1969 len = snprintf(buf, sizeof(buf), " instance %pK", service->instance); 1970 1971 if ((service->base.callback == service_callback) && 1972 user_service->is_vchi) { 1973 len += snprintf(buf + len, sizeof(buf) - len, 1974 ", %d/%d messages", 1975 user_service->msg_insert - user_service->msg_remove, 1976 MSG_QUEUE_SIZE); 1977 1978 if (user_service->dequeue_pending) 1979 len += snprintf(buf + len, sizeof(buf) - len, 1980 " (dequeue pending)"); 1981 } 1982 1983 vchiq_dump(dump_context, buf, len + 1); 1984 } 1985 1986 /**************************************************************************** 1987 * 1988 * vchiq_read 1989 * 1990 ***************************************************************************/ 1991 1992 static ssize_t 1993 vchiq_read(struct file *file, char __user *buf, 1994 size_t count, loff_t *ppos) 1995 { 1996 DUMP_CONTEXT_T context; 1997 1998 context.buf = buf; 1999 context.actual = 0; 2000 context.space = count; 2001 context.offset = *ppos; 2002 2003 vchiq_dump_state(&context, &g_state); 2004 2005 *ppos += context.actual; 2006 2007 return context.actual; 2008 } 2009 2010 VCHIQ_STATE_T * 2011 vchiq_get_state(void) 2012 { 2013 2014 if (g_state.remote == NULL) 2015 printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__); 2016 else if (g_state.remote->initialised != 1) 2017 printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n", 2018 __func__, g_state.remote->initialised); 2019 2020 return ((g_state.remote != NULL) && 2021 (g_state.remote->initialised == 1)) ? &g_state : NULL; 2022 } 2023 2024 static const struct file_operations 2025 vchiq_fops = { 2026 .owner = THIS_MODULE, 2027 .unlocked_ioctl = vchiq_ioctl, 2028 #if defined(CONFIG_COMPAT) 2029 .compat_ioctl = vchiq_compat_ioctl, 2030 #endif 2031 .open = vchiq_open, 2032 .release = vchiq_release, 2033 .read = vchiq_read 2034 }; 2035 2036 /* 2037 * Autosuspend related functionality 2038 */ 2039 2040 int 2041 vchiq_videocore_wanted(VCHIQ_STATE_T *state) 2042 { 2043 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2044 2045 if (!arm_state) 2046 /* autosuspend not supported - always return wanted */ 2047 return 1; 2048 else if (arm_state->blocked_count) 2049 return 1; 2050 else if (!arm_state->videocore_use_count) 2051 /* usage count zero - check for override unless we're forcing */ 2052 if (arm_state->resume_blocked) 2053 return 0; 2054 else 2055 return vchiq_platform_videocore_wanted(state); 2056 else 2057 /* non-zero usage count - videocore still required */ 2058 return 1; 2059 } 2060 2061 static VCHIQ_STATUS_T 2062 vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason, 2063 VCHIQ_HEADER_T *header, 2064 VCHIQ_SERVICE_HANDLE_T service_user, 2065 void *bulk_user) 2066 { 2067 vchiq_log_error(vchiq_susp_log_level, 2068 "%s callback reason %d", __func__, reason); 2069 return 0; 2070 } 2071 2072 static int 2073 vchiq_keepalive_thread_func(void *v) 2074 { 2075 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v; 2076 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2077 2078 VCHIQ_STATUS_T status; 2079 VCHIQ_INSTANCE_T instance; 2080 VCHIQ_SERVICE_HANDLE_T ka_handle; 2081 2082 VCHIQ_SERVICE_PARAMS_T params = { 2083 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'), 2084 .callback = vchiq_keepalive_vchiq_callback, 2085 .version = KEEPALIVE_VER, 2086 .version_min = KEEPALIVE_VER_MIN 2087 }; 2088 2089 status = vchiq_initialise(&instance); 2090 if (status != VCHIQ_SUCCESS) { 2091 vchiq_log_error(vchiq_susp_log_level, 2092 "%s vchiq_initialise failed %d", __func__, status); 2093 goto exit; 2094 } 2095 2096 status = vchiq_connect(instance); 2097 if (status != VCHIQ_SUCCESS) { 2098 vchiq_log_error(vchiq_susp_log_level, 2099 "%s vchiq_connect failed %d", __func__, status); 2100 goto shutdown; 2101 } 2102 2103 status = vchiq_add_service(instance, ¶ms, &ka_handle); 2104 if (status != VCHIQ_SUCCESS) { 2105 vchiq_log_error(vchiq_susp_log_level, 2106 "%s vchiq_open_service failed %d", __func__, status); 2107 goto shutdown; 2108 } 2109 2110 while (1) { 2111 long rc = 0, uc = 0; 2112 2113 if (wait_for_completion_interruptible(&arm_state->ka_evt) 2114 != 0) { 2115 vchiq_log_error(vchiq_susp_log_level, 2116 "%s interrupted", __func__); 2117 flush_signals(current); 2118 continue; 2119 } 2120 2121 /* read and clear counters. Do release_count then use_count to 2122 * prevent getting more releases than uses */ 2123 rc = atomic_xchg(&arm_state->ka_release_count, 0); 2124 uc = atomic_xchg(&arm_state->ka_use_count, 0); 2125 2126 /* Call use/release service the requisite number of times. 2127 * Process use before release so use counts don't go negative */ 2128 while (uc--) { 2129 atomic_inc(&arm_state->ka_use_ack_count); 2130 status = vchiq_use_service(ka_handle); 2131 if (status != VCHIQ_SUCCESS) { 2132 vchiq_log_error(vchiq_susp_log_level, 2133 "%s vchiq_use_service error %d", 2134 __func__, status); 2135 } 2136 } 2137 while (rc--) { 2138 status = vchiq_release_service(ka_handle); 2139 if (status != VCHIQ_SUCCESS) { 2140 vchiq_log_error(vchiq_susp_log_level, 2141 "%s vchiq_release_service error %d", 2142 __func__, status); 2143 } 2144 } 2145 } 2146 2147 shutdown: 2148 vchiq_shutdown(instance); 2149 exit: 2150 return 0; 2151 } 2152 2153 VCHIQ_STATUS_T 2154 vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state) 2155 { 2156 if (arm_state) { 2157 rwlock_init(&arm_state->susp_res_lock); 2158 2159 init_completion(&arm_state->ka_evt); 2160 atomic_set(&arm_state->ka_use_count, 0); 2161 atomic_set(&arm_state->ka_use_ack_count, 0); 2162 atomic_set(&arm_state->ka_release_count, 0); 2163 2164 init_completion(&arm_state->vc_suspend_complete); 2165 2166 init_completion(&arm_state->vc_resume_complete); 2167 /* Initialise to 'done' state. We only want to block on resume 2168 * completion while videocore is suspended. */ 2169 set_resume_state(arm_state, VC_RESUME_RESUMED); 2170 2171 init_completion(&arm_state->resume_blocker); 2172 /* Initialise to 'done' state. We only want to block on this 2173 * completion while resume is blocked */ 2174 complete_all(&arm_state->resume_blocker); 2175 2176 init_completion(&arm_state->blocked_blocker); 2177 /* Initialise to 'done' state. We only want to block on this 2178 * completion while things are waiting on the resume blocker */ 2179 complete_all(&arm_state->blocked_blocker); 2180 2181 arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS; 2182 arm_state->suspend_timer_running = 0; 2183 arm_state->state = state; 2184 timer_setup(&arm_state->suspend_timer, suspend_timer_callback, 2185 0); 2186 2187 arm_state->first_connect = 0; 2188 2189 } 2190 return VCHIQ_SUCCESS; 2191 } 2192 2193 /* 2194 ** Functions to modify the state variables; 2195 ** set_suspend_state 2196 ** set_resume_state 2197 ** 2198 ** There are more state variables than we might like, so ensure they remain in 2199 ** step. Suspend and resume state are maintained separately, since most of 2200 ** these state machines can operate independently. However, there are a few 2201 ** states where state transitions in one state machine cause a reset to the 2202 ** other state machine. In addition, there are some completion events which 2203 ** need to occur on state machine reset and end-state(s), so these are also 2204 ** dealt with in these functions. 2205 ** 2206 ** In all states we set the state variable according to the input, but in some 2207 ** cases we perform additional steps outlined below; 2208 ** 2209 ** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time. 2210 ** The suspend completion is completed after any suspend 2211 ** attempt. When we reset the state machine we also reset 2212 ** the completion. This reset occurs when videocore is 2213 ** resumed, and also if we initiate suspend after a suspend 2214 ** failure. 2215 ** 2216 ** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for 2217 ** suspend - ie from this point on we must try to suspend 2218 ** before resuming can occur. We therefore also reset the 2219 ** resume state machine to VC_RESUME_IDLE in this state. 2220 ** 2221 ** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call 2222 ** complete_all on the suspend completion to notify 2223 ** anything waiting for suspend to happen. 2224 ** 2225 ** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also 2226 ** initiate resume, so no need to alter resume state. 2227 ** We call complete_all on the suspend completion to notify 2228 ** of suspend rejection. 2229 ** 2230 ** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the 2231 ** suspend completion and reset the resume state machine. 2232 ** 2233 ** VC_RESUME_IDLE - Initialise the resume completion at the same time. The 2234 ** resume completion is in it's 'done' state whenever 2235 ** videcore is running. Therefore, the VC_RESUME_IDLE 2236 ** state implies that videocore is suspended. 2237 ** Hence, any thread which needs to wait until videocore is 2238 ** running can wait on this completion - it will only block 2239 ** if videocore is suspended. 2240 ** 2241 ** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running. 2242 ** Call complete_all on the resume completion to unblock 2243 ** any threads waiting for resume. Also reset the suspend 2244 ** state machine to it's idle state. 2245 ** 2246 ** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists. 2247 */ 2248 2249 void 2250 set_suspend_state(VCHIQ_ARM_STATE_T *arm_state, 2251 enum vc_suspend_status new_state) 2252 { 2253 /* set the state in all cases */ 2254 arm_state->vc_suspend_state = new_state; 2255 2256 /* state specific additional actions */ 2257 switch (new_state) { 2258 case VC_SUSPEND_FORCE_CANCELED: 2259 complete_all(&arm_state->vc_suspend_complete); 2260 break; 2261 case VC_SUSPEND_REJECTED: 2262 complete_all(&arm_state->vc_suspend_complete); 2263 break; 2264 case VC_SUSPEND_FAILED: 2265 complete_all(&arm_state->vc_suspend_complete); 2266 arm_state->vc_resume_state = VC_RESUME_RESUMED; 2267 complete_all(&arm_state->vc_resume_complete); 2268 break; 2269 case VC_SUSPEND_IDLE: 2270 reinit_completion(&arm_state->vc_suspend_complete); 2271 break; 2272 case VC_SUSPEND_REQUESTED: 2273 break; 2274 case VC_SUSPEND_IN_PROGRESS: 2275 set_resume_state(arm_state, VC_RESUME_IDLE); 2276 break; 2277 case VC_SUSPEND_SUSPENDED: 2278 complete_all(&arm_state->vc_suspend_complete); 2279 break; 2280 default: 2281 BUG(); 2282 break; 2283 } 2284 } 2285 2286 void 2287 set_resume_state(VCHIQ_ARM_STATE_T *arm_state, 2288 enum vc_resume_status new_state) 2289 { 2290 /* set the state in all cases */ 2291 arm_state->vc_resume_state = new_state; 2292 2293 /* state specific additional actions */ 2294 switch (new_state) { 2295 case VC_RESUME_FAILED: 2296 break; 2297 case VC_RESUME_IDLE: 2298 reinit_completion(&arm_state->vc_resume_complete); 2299 break; 2300 case VC_RESUME_REQUESTED: 2301 break; 2302 case VC_RESUME_IN_PROGRESS: 2303 break; 2304 case VC_RESUME_RESUMED: 2305 complete_all(&arm_state->vc_resume_complete); 2306 set_suspend_state(arm_state, VC_SUSPEND_IDLE); 2307 break; 2308 default: 2309 BUG(); 2310 break; 2311 } 2312 } 2313 2314 /* should be called with the write lock held */ 2315 inline void 2316 start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state) 2317 { 2318 del_timer(&arm_state->suspend_timer); 2319 arm_state->suspend_timer.expires = jiffies + 2320 msecs_to_jiffies(arm_state-> 2321 suspend_timer_timeout); 2322 add_timer(&arm_state->suspend_timer); 2323 arm_state->suspend_timer_running = 1; 2324 } 2325 2326 /* should be called with the write lock held */ 2327 static inline void 2328 stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state) 2329 { 2330 if (arm_state->suspend_timer_running) { 2331 del_timer(&arm_state->suspend_timer); 2332 arm_state->suspend_timer_running = 0; 2333 } 2334 } 2335 2336 static inline int 2337 need_resume(VCHIQ_STATE_T *state) 2338 { 2339 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2340 2341 return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) && 2342 (arm_state->vc_resume_state < VC_RESUME_REQUESTED) && 2343 vchiq_videocore_wanted(state); 2344 } 2345 2346 static int 2347 block_resume(VCHIQ_ARM_STATE_T *arm_state) 2348 { 2349 int status = VCHIQ_SUCCESS; 2350 const unsigned long timeout_val = 2351 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS); 2352 int resume_count = 0; 2353 2354 /* Allow any threads which were blocked by the last force suspend to 2355 * complete if they haven't already. Only give this one shot; if 2356 * blocked_count is incremented after blocked_blocker is completed 2357 * (which only happens when blocked_count hits 0) then those threads 2358 * will have to wait until next time around */ 2359 if (arm_state->blocked_count) { 2360 reinit_completion(&arm_state->blocked_blocker); 2361 write_unlock_bh(&arm_state->susp_res_lock); 2362 vchiq_log_info(vchiq_susp_log_level, "%s wait for previously " 2363 "blocked clients", __func__); 2364 if (wait_for_completion_interruptible_timeout( 2365 &arm_state->blocked_blocker, timeout_val) 2366 <= 0) { 2367 vchiq_log_error(vchiq_susp_log_level, "%s wait for " 2368 "previously blocked clients failed", __func__); 2369 status = VCHIQ_ERROR; 2370 write_lock_bh(&arm_state->susp_res_lock); 2371 goto out; 2372 } 2373 vchiq_log_info(vchiq_susp_log_level, "%s previously blocked " 2374 "clients resumed", __func__); 2375 write_lock_bh(&arm_state->susp_res_lock); 2376 } 2377 2378 /* We need to wait for resume to complete if it's in process */ 2379 while (arm_state->vc_resume_state != VC_RESUME_RESUMED && 2380 arm_state->vc_resume_state > VC_RESUME_IDLE) { 2381 if (resume_count > 1) { 2382 status = VCHIQ_ERROR; 2383 vchiq_log_error(vchiq_susp_log_level, "%s waited too " 2384 "many times for resume", __func__); 2385 goto out; 2386 } 2387 write_unlock_bh(&arm_state->susp_res_lock); 2388 vchiq_log_info(vchiq_susp_log_level, "%s wait for resume", 2389 __func__); 2390 if (wait_for_completion_interruptible_timeout( 2391 &arm_state->vc_resume_complete, timeout_val) 2392 <= 0) { 2393 vchiq_log_error(vchiq_susp_log_level, "%s wait for " 2394 "resume failed (%s)", __func__, 2395 resume_state_names[arm_state->vc_resume_state + 2396 VC_RESUME_NUM_OFFSET]); 2397 status = VCHIQ_ERROR; 2398 write_lock_bh(&arm_state->susp_res_lock); 2399 goto out; 2400 } 2401 vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__); 2402 write_lock_bh(&arm_state->susp_res_lock); 2403 resume_count++; 2404 } 2405 reinit_completion(&arm_state->resume_blocker); 2406 arm_state->resume_blocked = 1; 2407 2408 out: 2409 return status; 2410 } 2411 2412 static inline void 2413 unblock_resume(VCHIQ_ARM_STATE_T *arm_state) 2414 { 2415 complete_all(&arm_state->resume_blocker); 2416 arm_state->resume_blocked = 0; 2417 } 2418 2419 /* Initiate suspend via slot handler. Should be called with the write lock 2420 * held */ 2421 VCHIQ_STATUS_T 2422 vchiq_arm_vcsuspend(VCHIQ_STATE_T *state) 2423 { 2424 VCHIQ_STATUS_T status = VCHIQ_ERROR; 2425 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2426 2427 if (!arm_state) 2428 goto out; 2429 2430 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2431 status = VCHIQ_SUCCESS; 2432 2433 switch (arm_state->vc_suspend_state) { 2434 case VC_SUSPEND_REQUESTED: 2435 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already " 2436 "requested", __func__); 2437 break; 2438 case VC_SUSPEND_IN_PROGRESS: 2439 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in " 2440 "progress", __func__); 2441 break; 2442 2443 default: 2444 /* We don't expect to be in other states, so log but continue 2445 * anyway */ 2446 vchiq_log_error(vchiq_susp_log_level, 2447 "%s unexpected suspend state %s", __func__, 2448 suspend_state_names[arm_state->vc_suspend_state + 2449 VC_SUSPEND_NUM_OFFSET]); 2450 /* fall through */ 2451 case VC_SUSPEND_REJECTED: 2452 case VC_SUSPEND_FAILED: 2453 /* Ensure any idle state actions have been run */ 2454 set_suspend_state(arm_state, VC_SUSPEND_IDLE); 2455 /* fall through */ 2456 case VC_SUSPEND_IDLE: 2457 vchiq_log_info(vchiq_susp_log_level, 2458 "%s: suspending", __func__); 2459 set_suspend_state(arm_state, VC_SUSPEND_REQUESTED); 2460 /* kick the slot handler thread to initiate suspend */ 2461 request_poll(state, NULL, 0); 2462 break; 2463 } 2464 2465 out: 2466 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status); 2467 return status; 2468 } 2469 2470 void 2471 vchiq_platform_check_suspend(VCHIQ_STATE_T *state) 2472 { 2473 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2474 int susp = 0; 2475 2476 if (!arm_state) 2477 goto out; 2478 2479 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2480 2481 write_lock_bh(&arm_state->susp_res_lock); 2482 if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED && 2483 arm_state->vc_resume_state == VC_RESUME_RESUMED) { 2484 set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS); 2485 susp = 1; 2486 } 2487 write_unlock_bh(&arm_state->susp_res_lock); 2488 2489 if (susp) 2490 vchiq_platform_suspend(state); 2491 2492 out: 2493 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__); 2494 return; 2495 } 2496 2497 static void 2498 output_timeout_error(VCHIQ_STATE_T *state) 2499 { 2500 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2501 char err[50] = ""; 2502 int vc_use_count = arm_state->videocore_use_count; 2503 int active_services = state->unused_service; 2504 int i; 2505 2506 if (!arm_state->videocore_use_count) { 2507 snprintf(err, sizeof(err), " Videocore usecount is 0"); 2508 goto output_msg; 2509 } 2510 for (i = 0; i < active_services; i++) { 2511 VCHIQ_SERVICE_T *service_ptr = state->services[i]; 2512 2513 if (service_ptr && service_ptr->service_use_count && 2514 (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) { 2515 snprintf(err, sizeof(err), " %c%c%c%c(%d) service has " 2516 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS( 2517 service_ptr->base.fourcc), 2518 service_ptr->client_id, 2519 service_ptr->service_use_count, 2520 service_ptr->service_use_count == 2521 vc_use_count ? "" : " (+ more)"); 2522 break; 2523 } 2524 } 2525 2526 output_msg: 2527 vchiq_log_error(vchiq_susp_log_level, 2528 "timed out waiting for vc suspend (%d).%s", 2529 arm_state->autosuspend_override, err); 2530 2531 } 2532 2533 /* Try to get videocore into suspended state, regardless of autosuspend state. 2534 ** We don't actually force suspend, since videocore may get into a bad state 2535 ** if we force suspend at a bad time. Instead, we wait for autosuspend to 2536 ** determine a good point to suspend. If this doesn't happen within 100ms we 2537 ** report failure. 2538 ** 2539 ** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if 2540 ** videocore failed to suspend in time or VCHIQ_ERROR if interrupted. 2541 */ 2542 VCHIQ_STATUS_T 2543 vchiq_arm_force_suspend(VCHIQ_STATE_T *state) 2544 { 2545 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2546 VCHIQ_STATUS_T status = VCHIQ_ERROR; 2547 long rc = 0; 2548 int repeat = -1; 2549 2550 if (!arm_state) 2551 goto out; 2552 2553 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2554 2555 write_lock_bh(&arm_state->susp_res_lock); 2556 2557 status = block_resume(arm_state); 2558 if (status != VCHIQ_SUCCESS) 2559 goto unlock; 2560 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) { 2561 /* Already suspended - just block resume and exit */ 2562 vchiq_log_info(vchiq_susp_log_level, "%s already suspended", 2563 __func__); 2564 status = VCHIQ_SUCCESS; 2565 goto unlock; 2566 } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) { 2567 /* initiate suspend immediately in the case that we're waiting 2568 * for the timeout */ 2569 stop_suspend_timer(arm_state); 2570 if (!vchiq_videocore_wanted(state)) { 2571 vchiq_log_info(vchiq_susp_log_level, "%s videocore " 2572 "idle, initiating suspend", __func__); 2573 status = vchiq_arm_vcsuspend(state); 2574 } else if (arm_state->autosuspend_override < 2575 FORCE_SUSPEND_FAIL_MAX) { 2576 vchiq_log_info(vchiq_susp_log_level, "%s letting " 2577 "videocore go idle", __func__); 2578 status = VCHIQ_SUCCESS; 2579 } else { 2580 vchiq_log_warning(vchiq_susp_log_level, "%s failed too " 2581 "many times - attempting suspend", __func__); 2582 status = vchiq_arm_vcsuspend(state); 2583 } 2584 } else { 2585 vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend " 2586 "in progress - wait for completion", __func__); 2587 status = VCHIQ_SUCCESS; 2588 } 2589 2590 /* Wait for suspend to happen due to system idle (not forced..) */ 2591 if (status != VCHIQ_SUCCESS) 2592 goto unblock_resume; 2593 2594 do { 2595 write_unlock_bh(&arm_state->susp_res_lock); 2596 2597 rc = wait_for_completion_interruptible_timeout( 2598 &arm_state->vc_suspend_complete, 2599 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS)); 2600 2601 write_lock_bh(&arm_state->susp_res_lock); 2602 if (rc < 0) { 2603 vchiq_log_warning(vchiq_susp_log_level, "%s " 2604 "interrupted waiting for suspend", __func__); 2605 status = VCHIQ_ERROR; 2606 goto unblock_resume; 2607 } else if (rc == 0) { 2608 if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) { 2609 /* Repeat timeout once if in progress */ 2610 if (repeat < 0) { 2611 repeat = 1; 2612 continue; 2613 } 2614 } 2615 arm_state->autosuspend_override++; 2616 output_timeout_error(state); 2617 2618 status = VCHIQ_RETRY; 2619 goto unblock_resume; 2620 } 2621 } while (0 < (repeat--)); 2622 2623 /* Check and report state in case we need to abort ARM suspend */ 2624 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) { 2625 status = VCHIQ_RETRY; 2626 vchiq_log_error(vchiq_susp_log_level, 2627 "%s videocore suspend failed (state %s)", __func__, 2628 suspend_state_names[arm_state->vc_suspend_state + 2629 VC_SUSPEND_NUM_OFFSET]); 2630 /* Reset the state only if it's still in an error state. 2631 * Something could have already initiated another suspend. */ 2632 if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE) 2633 set_suspend_state(arm_state, VC_SUSPEND_IDLE); 2634 2635 goto unblock_resume; 2636 } 2637 2638 /* successfully suspended - unlock and exit */ 2639 goto unlock; 2640 2641 unblock_resume: 2642 /* all error states need to unblock resume before exit */ 2643 unblock_resume(arm_state); 2644 2645 unlock: 2646 write_unlock_bh(&arm_state->susp_res_lock); 2647 2648 out: 2649 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status); 2650 return status; 2651 } 2652 2653 void 2654 vchiq_check_suspend(VCHIQ_STATE_T *state) 2655 { 2656 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2657 2658 if (!arm_state) 2659 goto out; 2660 2661 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2662 2663 write_lock_bh(&arm_state->susp_res_lock); 2664 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED && 2665 arm_state->first_connect && 2666 !vchiq_videocore_wanted(state)) { 2667 vchiq_arm_vcsuspend(state); 2668 } 2669 write_unlock_bh(&arm_state->susp_res_lock); 2670 2671 out: 2672 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__); 2673 return; 2674 } 2675 2676 int 2677 vchiq_arm_allow_resume(VCHIQ_STATE_T *state) 2678 { 2679 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2680 int resume = 0; 2681 int ret = -1; 2682 2683 if (!arm_state) 2684 goto out; 2685 2686 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2687 2688 write_lock_bh(&arm_state->susp_res_lock); 2689 unblock_resume(arm_state); 2690 resume = vchiq_check_resume(state); 2691 write_unlock_bh(&arm_state->susp_res_lock); 2692 2693 if (resume) { 2694 if (wait_for_completion_interruptible( 2695 &arm_state->vc_resume_complete) < 0) { 2696 vchiq_log_error(vchiq_susp_log_level, 2697 "%s interrupted", __func__); 2698 /* failed, cannot accurately derive suspend 2699 * state, so exit early. */ 2700 goto out; 2701 } 2702 } 2703 2704 read_lock_bh(&arm_state->susp_res_lock); 2705 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) { 2706 vchiq_log_info(vchiq_susp_log_level, 2707 "%s: Videocore remains suspended", __func__); 2708 } else { 2709 vchiq_log_info(vchiq_susp_log_level, 2710 "%s: Videocore resumed", __func__); 2711 ret = 0; 2712 } 2713 read_unlock_bh(&arm_state->susp_res_lock); 2714 out: 2715 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret); 2716 return ret; 2717 } 2718 2719 /* This function should be called with the write lock held */ 2720 int 2721 vchiq_check_resume(VCHIQ_STATE_T *state) 2722 { 2723 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2724 int resume = 0; 2725 2726 if (!arm_state) 2727 goto out; 2728 2729 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2730 2731 if (need_resume(state)) { 2732 set_resume_state(arm_state, VC_RESUME_REQUESTED); 2733 request_poll(state, NULL, 0); 2734 resume = 1; 2735 } 2736 2737 out: 2738 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__); 2739 return resume; 2740 } 2741 2742 VCHIQ_STATUS_T 2743 vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, 2744 enum USE_TYPE_E use_type) 2745 { 2746 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2747 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS; 2748 char entity[16]; 2749 int *entity_uc; 2750 int local_uc, local_entity_uc; 2751 2752 if (!arm_state) 2753 goto out; 2754 2755 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2756 2757 if (use_type == USE_TYPE_VCHIQ) { 2758 sprintf(entity, "VCHIQ: "); 2759 entity_uc = &arm_state->peer_use_count; 2760 } else if (service) { 2761 sprintf(entity, "%c%c%c%c:%03d", 2762 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 2763 service->client_id); 2764 entity_uc = &service->service_use_count; 2765 } else { 2766 vchiq_log_error(vchiq_susp_log_level, "%s null service " 2767 "ptr", __func__); 2768 ret = VCHIQ_ERROR; 2769 goto out; 2770 } 2771 2772 write_lock_bh(&arm_state->susp_res_lock); 2773 while (arm_state->resume_blocked) { 2774 /* If we call 'use' while force suspend is waiting for suspend, 2775 * then we're about to block the thread which the force is 2776 * waiting to complete, so we're bound to just time out. In this 2777 * case, set the suspend state such that the wait will be 2778 * canceled, so we can complete as quickly as possible. */ 2779 if (arm_state->resume_blocked && arm_state->vc_suspend_state == 2780 VC_SUSPEND_IDLE) { 2781 set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED); 2782 break; 2783 } 2784 /* If suspend is already in progress then we need to block */ 2785 if (!try_wait_for_completion(&arm_state->resume_blocker)) { 2786 /* Indicate that there are threads waiting on the resume 2787 * blocker. These need to be allowed to complete before 2788 * a _second_ call to force suspend can complete, 2789 * otherwise low priority threads might never actually 2790 * continue */ 2791 arm_state->blocked_count++; 2792 write_unlock_bh(&arm_state->susp_res_lock); 2793 vchiq_log_info(vchiq_susp_log_level, "%s %s resume " 2794 "blocked - waiting...", __func__, entity); 2795 if (wait_for_completion_killable( 2796 &arm_state->resume_blocker) != 0) { 2797 vchiq_log_error(vchiq_susp_log_level, "%s %s " 2798 "wait for resume blocker interrupted", 2799 __func__, entity); 2800 ret = VCHIQ_ERROR; 2801 write_lock_bh(&arm_state->susp_res_lock); 2802 arm_state->blocked_count--; 2803 write_unlock_bh(&arm_state->susp_res_lock); 2804 goto out; 2805 } 2806 vchiq_log_info(vchiq_susp_log_level, "%s %s resume " 2807 "unblocked", __func__, entity); 2808 write_lock_bh(&arm_state->susp_res_lock); 2809 if (--arm_state->blocked_count == 0) 2810 complete_all(&arm_state->blocked_blocker); 2811 } 2812 } 2813 2814 stop_suspend_timer(arm_state); 2815 2816 local_uc = ++arm_state->videocore_use_count; 2817 local_entity_uc = ++(*entity_uc); 2818 2819 /* If there's a pending request which hasn't yet been serviced then 2820 * just clear it. If we're past VC_SUSPEND_REQUESTED state then 2821 * vc_resume_complete will block until we either resume or fail to 2822 * suspend */ 2823 if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED) 2824 set_suspend_state(arm_state, VC_SUSPEND_IDLE); 2825 2826 if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) { 2827 set_resume_state(arm_state, VC_RESUME_REQUESTED); 2828 vchiq_log_info(vchiq_susp_log_level, 2829 "%s %s count %d, state count %d", 2830 __func__, entity, local_entity_uc, local_uc); 2831 request_poll(state, NULL, 0); 2832 } else 2833 vchiq_log_trace(vchiq_susp_log_level, 2834 "%s %s count %d, state count %d", 2835 __func__, entity, *entity_uc, local_uc); 2836 2837 write_unlock_bh(&arm_state->susp_res_lock); 2838 2839 /* Completion is in a done state when we're not suspended, so this won't 2840 * block for the non-suspended case. */ 2841 if (!try_wait_for_completion(&arm_state->vc_resume_complete)) { 2842 vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume", 2843 __func__, entity); 2844 if (wait_for_completion_killable( 2845 &arm_state->vc_resume_complete) != 0) { 2846 vchiq_log_error(vchiq_susp_log_level, "%s %s wait for " 2847 "resume interrupted", __func__, entity); 2848 ret = VCHIQ_ERROR; 2849 goto out; 2850 } 2851 vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__, 2852 entity); 2853 } 2854 2855 if (ret == VCHIQ_SUCCESS) { 2856 VCHIQ_STATUS_T status = VCHIQ_SUCCESS; 2857 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0); 2858 2859 while (ack_cnt && (status == VCHIQ_SUCCESS)) { 2860 /* Send the use notify to videocore */ 2861 status = vchiq_send_remote_use_active(state); 2862 if (status == VCHIQ_SUCCESS) 2863 ack_cnt--; 2864 else 2865 atomic_add(ack_cnt, 2866 &arm_state->ka_use_ack_count); 2867 } 2868 } 2869 2870 out: 2871 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret); 2872 return ret; 2873 } 2874 2875 VCHIQ_STATUS_T 2876 vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service) 2877 { 2878 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2879 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS; 2880 char entity[16]; 2881 int *entity_uc; 2882 int local_uc, local_entity_uc; 2883 2884 if (!arm_state) 2885 goto out; 2886 2887 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2888 2889 if (service) { 2890 sprintf(entity, "%c%c%c%c:%03d", 2891 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 2892 service->client_id); 2893 entity_uc = &service->service_use_count; 2894 } else { 2895 sprintf(entity, "PEER: "); 2896 entity_uc = &arm_state->peer_use_count; 2897 } 2898 2899 write_lock_bh(&arm_state->susp_res_lock); 2900 if (!arm_state->videocore_use_count || !(*entity_uc)) { 2901 /* Don't use BUG_ON - don't allow user thread to crash kernel */ 2902 WARN_ON(!arm_state->videocore_use_count); 2903 WARN_ON(!(*entity_uc)); 2904 ret = VCHIQ_ERROR; 2905 goto unlock; 2906 } 2907 local_uc = --arm_state->videocore_use_count; 2908 local_entity_uc = --(*entity_uc); 2909 2910 if (!vchiq_videocore_wanted(state)) { 2911 if (vchiq_platform_use_suspend_timer() && 2912 !arm_state->resume_blocked) { 2913 /* Only use the timer if we're not trying to force 2914 * suspend (=> resume_blocked) */ 2915 start_suspend_timer(arm_state); 2916 } else { 2917 vchiq_log_info(vchiq_susp_log_level, 2918 "%s %s count %d, state count %d - suspending", 2919 __func__, entity, *entity_uc, 2920 arm_state->videocore_use_count); 2921 vchiq_arm_vcsuspend(state); 2922 } 2923 } else 2924 vchiq_log_trace(vchiq_susp_log_level, 2925 "%s %s count %d, state count %d", 2926 __func__, entity, *entity_uc, 2927 arm_state->videocore_use_count); 2928 2929 unlock: 2930 write_unlock_bh(&arm_state->susp_res_lock); 2931 2932 out: 2933 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret); 2934 return ret; 2935 } 2936 2937 void 2938 vchiq_on_remote_use(VCHIQ_STATE_T *state) 2939 { 2940 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2941 2942 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2943 atomic_inc(&arm_state->ka_use_count); 2944 complete(&arm_state->ka_evt); 2945 } 2946 2947 void 2948 vchiq_on_remote_release(VCHIQ_STATE_T *state) 2949 { 2950 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2951 2952 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2953 atomic_inc(&arm_state->ka_release_count); 2954 complete(&arm_state->ka_evt); 2955 } 2956 2957 VCHIQ_STATUS_T 2958 vchiq_use_service_internal(VCHIQ_SERVICE_T *service) 2959 { 2960 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE); 2961 } 2962 2963 VCHIQ_STATUS_T 2964 vchiq_release_service_internal(VCHIQ_SERVICE_T *service) 2965 { 2966 return vchiq_release_internal(service->state, service); 2967 } 2968 2969 VCHIQ_DEBUGFS_NODE_T * 2970 vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance) 2971 { 2972 return &instance->debugfs_node; 2973 } 2974 2975 int 2976 vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance) 2977 { 2978 VCHIQ_SERVICE_T *service; 2979 int use_count = 0, i; 2980 2981 i = 0; 2982 while ((service = next_service_by_instance(instance->state, 2983 instance, &i)) != NULL) { 2984 use_count += service->service_use_count; 2985 unlock_service(service); 2986 } 2987 return use_count; 2988 } 2989 2990 int 2991 vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance) 2992 { 2993 return instance->pid; 2994 } 2995 2996 int 2997 vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance) 2998 { 2999 return instance->trace; 3000 } 3001 3002 void 3003 vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace) 3004 { 3005 VCHIQ_SERVICE_T *service; 3006 int i; 3007 3008 i = 0; 3009 while ((service = next_service_by_instance(instance->state, 3010 instance, &i)) != NULL) { 3011 service->trace = trace; 3012 unlock_service(service); 3013 } 3014 instance->trace = (trace != 0); 3015 } 3016 3017 static void suspend_timer_callback(struct timer_list *t) 3018 { 3019 VCHIQ_ARM_STATE_T *arm_state = from_timer(arm_state, t, suspend_timer); 3020 VCHIQ_STATE_T *state = arm_state->state; 3021 3022 vchiq_log_info(vchiq_susp_log_level, 3023 "%s - suspend timer expired - check suspend", __func__); 3024 vchiq_check_suspend(state); 3025 } 3026 3027 VCHIQ_STATUS_T 3028 vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle) 3029 { 3030 VCHIQ_STATUS_T ret = VCHIQ_ERROR; 3031 VCHIQ_SERVICE_T *service = find_service_by_handle(handle); 3032 3033 if (service) { 3034 ret = vchiq_use_internal(service->state, service, 3035 USE_TYPE_SERVICE_NO_RESUME); 3036 unlock_service(service); 3037 } 3038 return ret; 3039 } 3040 3041 VCHIQ_STATUS_T 3042 vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle) 3043 { 3044 VCHIQ_STATUS_T ret = VCHIQ_ERROR; 3045 VCHIQ_SERVICE_T *service = find_service_by_handle(handle); 3046 3047 if (service) { 3048 ret = vchiq_use_internal(service->state, service, 3049 USE_TYPE_SERVICE); 3050 unlock_service(service); 3051 } 3052 return ret; 3053 } 3054 3055 VCHIQ_STATUS_T 3056 vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle) 3057 { 3058 VCHIQ_STATUS_T ret = VCHIQ_ERROR; 3059 VCHIQ_SERVICE_T *service = find_service_by_handle(handle); 3060 3061 if (service) { 3062 ret = vchiq_release_internal(service->state, service); 3063 unlock_service(service); 3064 } 3065 return ret; 3066 } 3067 3068 void 3069 vchiq_dump_service_use_state(VCHIQ_STATE_T *state) 3070 { 3071 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 3072 int i, j = 0; 3073 /* Only dump 64 services */ 3074 static const int local_max_services = 64; 3075 /* If there's more than 64 services, only dump ones with 3076 * non-zero counts */ 3077 int only_nonzero = 0; 3078 static const char *nz = "<-- preventing suspend"; 3079 3080 enum vc_suspend_status vc_suspend_state; 3081 enum vc_resume_status vc_resume_state; 3082 int peer_count; 3083 int vc_use_count; 3084 int active_services; 3085 struct service_data_struct { 3086 int fourcc; 3087 int clientid; 3088 int use_count; 3089 } service_data[local_max_services]; 3090 3091 if (!arm_state) 3092 return; 3093 3094 read_lock_bh(&arm_state->susp_res_lock); 3095 vc_suspend_state = arm_state->vc_suspend_state; 3096 vc_resume_state = arm_state->vc_resume_state; 3097 peer_count = arm_state->peer_use_count; 3098 vc_use_count = arm_state->videocore_use_count; 3099 active_services = state->unused_service; 3100 if (active_services > local_max_services) 3101 only_nonzero = 1; 3102 3103 for (i = 0; (i < active_services) && (j < local_max_services); i++) { 3104 VCHIQ_SERVICE_T *service_ptr = state->services[i]; 3105 3106 if (!service_ptr) 3107 continue; 3108 3109 if (only_nonzero && !service_ptr->service_use_count) 3110 continue; 3111 3112 if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE) 3113 continue; 3114 3115 service_data[j].fourcc = service_ptr->base.fourcc; 3116 service_data[j].clientid = service_ptr->client_id; 3117 service_data[j++].use_count = service_ptr->service_use_count; 3118 } 3119 3120 read_unlock_bh(&arm_state->susp_res_lock); 3121 3122 vchiq_log_warning(vchiq_susp_log_level, 3123 "-- Videcore suspend state: %s --", 3124 suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]); 3125 vchiq_log_warning(vchiq_susp_log_level, 3126 "-- Videcore resume state: %s --", 3127 resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]); 3128 3129 if (only_nonzero) 3130 vchiq_log_warning(vchiq_susp_log_level, "Too many active " 3131 "services (%d). Only dumping up to first %d services " 3132 "with non-zero use-count", active_services, 3133 local_max_services); 3134 3135 for (i = 0; i < j; i++) { 3136 vchiq_log_warning(vchiq_susp_log_level, 3137 "----- %c%c%c%c:%d service count %d %s", 3138 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc), 3139 service_data[i].clientid, 3140 service_data[i].use_count, 3141 service_data[i].use_count ? nz : ""); 3142 } 3143 vchiq_log_warning(vchiq_susp_log_level, 3144 "----- VCHIQ use count count %d", peer_count); 3145 vchiq_log_warning(vchiq_susp_log_level, 3146 "--- Overall vchiq instance use count %d", vc_use_count); 3147 3148 vchiq_dump_platform_use_state(state); 3149 } 3150 3151 VCHIQ_STATUS_T 3152 vchiq_check_service(VCHIQ_SERVICE_T *service) 3153 { 3154 VCHIQ_ARM_STATE_T *arm_state; 3155 VCHIQ_STATUS_T ret = VCHIQ_ERROR; 3156 3157 if (!service || !service->state) 3158 goto out; 3159 3160 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 3161 3162 arm_state = vchiq_platform_get_arm_state(service->state); 3163 3164 read_lock_bh(&arm_state->susp_res_lock); 3165 if (service->service_use_count) 3166 ret = VCHIQ_SUCCESS; 3167 read_unlock_bh(&arm_state->susp_res_lock); 3168 3169 if (ret == VCHIQ_ERROR) { 3170 vchiq_log_error(vchiq_susp_log_level, 3171 "%s ERROR - %c%c%c%c:%d service count %d, " 3172 "state count %d, videocore suspend state %s", __func__, 3173 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 3174 service->client_id, service->service_use_count, 3175 arm_state->videocore_use_count, 3176 suspend_state_names[arm_state->vc_suspend_state + 3177 VC_SUSPEND_NUM_OFFSET]); 3178 vchiq_dump_service_use_state(service->state); 3179 } 3180 out: 3181 return ret; 3182 } 3183 3184 /* stub functions */ 3185 void vchiq_on_remote_use_active(VCHIQ_STATE_T *state) 3186 { 3187 (void)state; 3188 } 3189 3190 void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state, 3191 VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate) 3192 { 3193 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 3194 3195 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id, 3196 get_conn_state_name(oldstate), get_conn_state_name(newstate)); 3197 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) { 3198 write_lock_bh(&arm_state->susp_res_lock); 3199 if (!arm_state->first_connect) { 3200 char threadname[16]; 3201 3202 arm_state->first_connect = 1; 3203 write_unlock_bh(&arm_state->susp_res_lock); 3204 snprintf(threadname, sizeof(threadname), "vchiq-keep/%d", 3205 state->id); 3206 arm_state->ka_thread = kthread_create( 3207 &vchiq_keepalive_thread_func, 3208 (void *)state, 3209 threadname); 3210 if (IS_ERR(arm_state->ka_thread)) { 3211 vchiq_log_error(vchiq_susp_log_level, 3212 "vchiq: FATAL: couldn't create thread %s", 3213 threadname); 3214 } else { 3215 wake_up_process(arm_state->ka_thread); 3216 } 3217 } else 3218 write_unlock_bh(&arm_state->susp_res_lock); 3219 } 3220 } 3221 3222 static int vchiq_probe(struct platform_device *pdev) 3223 { 3224 struct device_node *fw_node; 3225 struct rpi_firmware *fw; 3226 int err; 3227 3228 fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0); 3229 if (!fw_node) { 3230 dev_err(&pdev->dev, "Missing firmware node\n"); 3231 return -ENOENT; 3232 } 3233 3234 fw = rpi_firmware_get(fw_node); 3235 of_node_put(fw_node); 3236 if (!fw) 3237 return -EPROBE_DEFER; 3238 3239 platform_set_drvdata(pdev, fw); 3240 3241 err = vchiq_platform_init(pdev, &g_state); 3242 if (err != 0) 3243 goto failed_platform_init; 3244 3245 err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME); 3246 if (err != 0) { 3247 vchiq_log_error(vchiq_arm_log_level, 3248 "Unable to allocate device number"); 3249 goto failed_platform_init; 3250 } 3251 cdev_init(&vchiq_cdev, &vchiq_fops); 3252 vchiq_cdev.owner = THIS_MODULE; 3253 err = cdev_add(&vchiq_cdev, vchiq_devid, 1); 3254 if (err != 0) { 3255 vchiq_log_error(vchiq_arm_log_level, 3256 "Unable to register device"); 3257 goto failed_cdev_add; 3258 } 3259 3260 /* create sysfs entries */ 3261 vchiq_class = class_create(THIS_MODULE, DEVICE_NAME); 3262 err = PTR_ERR(vchiq_class); 3263 if (IS_ERR(vchiq_class)) 3264 goto failed_class_create; 3265 3266 vchiq_dev = device_create(vchiq_class, NULL, 3267 vchiq_devid, NULL, "vchiq"); 3268 err = PTR_ERR(vchiq_dev); 3269 if (IS_ERR(vchiq_dev)) 3270 goto failed_device_create; 3271 3272 /* create debugfs entries */ 3273 err = vchiq_debugfs_init(); 3274 if (err != 0) 3275 goto failed_debugfs_init; 3276 3277 vchiq_log_info(vchiq_arm_log_level, 3278 "vchiq: initialised - version %d (min %d), device %d.%d", 3279 VCHIQ_VERSION, VCHIQ_VERSION_MIN, 3280 MAJOR(vchiq_devid), MINOR(vchiq_devid)); 3281 3282 return 0; 3283 3284 failed_debugfs_init: 3285 device_destroy(vchiq_class, vchiq_devid); 3286 failed_device_create: 3287 class_destroy(vchiq_class); 3288 failed_class_create: 3289 cdev_del(&vchiq_cdev); 3290 failed_cdev_add: 3291 unregister_chrdev_region(vchiq_devid, 1); 3292 failed_platform_init: 3293 vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq"); 3294 return err; 3295 } 3296 3297 static int vchiq_remove(struct platform_device *pdev) 3298 { 3299 vchiq_debugfs_deinit(); 3300 device_destroy(vchiq_class, vchiq_devid); 3301 class_destroy(vchiq_class); 3302 cdev_del(&vchiq_cdev); 3303 unregister_chrdev_region(vchiq_devid, 1); 3304 3305 return 0; 3306 } 3307 3308 static const struct of_device_id vchiq_of_match[] = { 3309 { .compatible = "brcm,bcm2835-vchiq", }, 3310 {}, 3311 }; 3312 MODULE_DEVICE_TABLE(of, vchiq_of_match); 3313 3314 static struct platform_driver vchiq_driver = { 3315 .driver = { 3316 .name = "bcm2835_vchiq", 3317 .of_match_table = vchiq_of_match, 3318 }, 3319 .probe = vchiq_probe, 3320 .remove = vchiq_remove, 3321 }; 3322 module_platform_driver(vchiq_driver); 3323 3324 MODULE_LICENSE("Dual BSD/GPL"); 3325 MODULE_DESCRIPTION("Videocore VCHIQ driver"); 3326 MODULE_AUTHOR("Broadcom Corporation"); 3327