1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */ 3 4 #include <linux/kref.h> 5 #include <linux/rcupdate.h> 6 7 #include "vchiq_core.h" 8 9 #define VCHIQ_SLOT_HANDLER_STACK 8192 10 11 #define HANDLE_STATE_SHIFT 12 12 13 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index)) 14 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index)) 15 #define SLOT_INDEX_FROM_DATA(state, data) \ 16 (((unsigned int)((char *)data - (char *)state->slot_data)) / \ 17 VCHIQ_SLOT_SIZE) 18 #define SLOT_INDEX_FROM_INFO(state, info) \ 19 ((unsigned int)(info - state->slot_info)) 20 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \ 21 ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE)) 22 23 #define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1)) 24 25 #define SRVTRACE_LEVEL(srv) \ 26 (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level) 27 #define SRVTRACE_ENABLED(srv, lev) \ 28 (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev))) 29 30 struct vchiq_open_payload { 31 int fourcc; 32 int client_id; 33 short version; 34 short version_min; 35 }; 36 37 struct vchiq_openack_payload { 38 short version; 39 }; 40 41 enum { 42 QMFLAGS_IS_BLOCKING = (1 << 0), 43 QMFLAGS_NO_MUTEX_LOCK = (1 << 1), 44 QMFLAGS_NO_MUTEX_UNLOCK = (1 << 2) 45 }; 46 47 /* we require this for consistency between endpoints */ 48 vchiq_static_assert(sizeof(struct vchiq_header) == 8); 49 vchiq_static_assert(IS_POW2(sizeof(struct vchiq_header))); 50 vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS)); 51 vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS)); 52 vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES)); 53 vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN); 54 55 /* Run time control of log level, based on KERN_XXX level. */ 56 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT; 57 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT; 58 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT; 59 60 DEFINE_SPINLOCK(bulk_waiter_spinlock); 61 static DEFINE_SPINLOCK(quota_spinlock); 62 63 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES]; 64 static unsigned int handle_seq; 65 66 static const char *const srvstate_names[] = { 67 "FREE", 68 "HIDDEN", 69 "LISTENING", 70 "OPENING", 71 "OPEN", 72 "OPENSYNC", 73 "CLOSESENT", 74 "CLOSERECVD", 75 "CLOSEWAIT", 76 "CLOSED" 77 }; 78 79 static const char *const reason_names[] = { 80 "SERVICE_OPENED", 81 "SERVICE_CLOSED", 82 "MESSAGE_AVAILABLE", 83 "BULK_TRANSMIT_DONE", 84 "BULK_RECEIVE_DONE", 85 "BULK_TRANSMIT_ABORTED", 86 "BULK_RECEIVE_ABORTED" 87 }; 88 89 static const char *const conn_state_names[] = { 90 "DISCONNECTED", 91 "CONNECTING", 92 "CONNECTED", 93 "PAUSING", 94 "PAUSE_SENT", 95 "PAUSED", 96 "RESUMING", 97 "PAUSE_TIMEOUT", 98 "RESUME_TIMEOUT" 99 }; 100 101 static void 102 release_message_sync(struct vchiq_state *state, struct vchiq_header *header); 103 104 static const char *msg_type_str(unsigned int msg_type) 105 { 106 switch (msg_type) { 107 case VCHIQ_MSG_PADDING: return "PADDING"; 108 case VCHIQ_MSG_CONNECT: return "CONNECT"; 109 case VCHIQ_MSG_OPEN: return "OPEN"; 110 case VCHIQ_MSG_OPENACK: return "OPENACK"; 111 case VCHIQ_MSG_CLOSE: return "CLOSE"; 112 case VCHIQ_MSG_DATA: return "DATA"; 113 case VCHIQ_MSG_BULK_RX: return "BULK_RX"; 114 case VCHIQ_MSG_BULK_TX: return "BULK_TX"; 115 case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE"; 116 case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE"; 117 case VCHIQ_MSG_PAUSE: return "PAUSE"; 118 case VCHIQ_MSG_RESUME: return "RESUME"; 119 case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE"; 120 case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE"; 121 case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE"; 122 } 123 return "???"; 124 } 125 126 static inline void 127 vchiq_set_service_state(struct vchiq_service *service, int newstate) 128 { 129 vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s", 130 service->state->id, service->localport, 131 srvstate_names[service->srvstate], 132 srvstate_names[newstate]); 133 service->srvstate = newstate; 134 } 135 136 struct vchiq_service * 137 find_service_by_handle(unsigned int handle) 138 { 139 struct vchiq_service *service; 140 141 rcu_read_lock(); 142 service = handle_to_service(handle); 143 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && 144 service->handle == handle && 145 kref_get_unless_zero(&service->ref_count)) { 146 service = rcu_pointer_handoff(service); 147 rcu_read_unlock(); 148 return service; 149 } 150 rcu_read_unlock(); 151 vchiq_log_info(vchiq_core_log_level, 152 "Invalid service handle 0x%x", handle); 153 return NULL; 154 } 155 156 struct vchiq_service * 157 find_service_by_port(struct vchiq_state *state, int localport) 158 { 159 160 if ((unsigned int)localport <= VCHIQ_PORT_MAX) { 161 struct vchiq_service *service; 162 163 rcu_read_lock(); 164 service = rcu_dereference(state->services[localport]); 165 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && 166 kref_get_unless_zero(&service->ref_count)) { 167 service = rcu_pointer_handoff(service); 168 rcu_read_unlock(); 169 return service; 170 } 171 rcu_read_unlock(); 172 } 173 vchiq_log_info(vchiq_core_log_level, 174 "Invalid port %d", localport); 175 return NULL; 176 } 177 178 struct vchiq_service * 179 find_service_for_instance(struct vchiq_instance *instance, 180 unsigned int handle) 181 { 182 struct vchiq_service *service; 183 184 rcu_read_lock(); 185 service = handle_to_service(handle); 186 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && 187 service->handle == handle && 188 service->instance == instance && 189 kref_get_unless_zero(&service->ref_count)) { 190 service = rcu_pointer_handoff(service); 191 rcu_read_unlock(); 192 return service; 193 } 194 rcu_read_unlock(); 195 vchiq_log_info(vchiq_core_log_level, 196 "Invalid service handle 0x%x", handle); 197 return NULL; 198 } 199 200 struct vchiq_service * 201 find_closed_service_for_instance(struct vchiq_instance *instance, 202 unsigned int handle) 203 { 204 struct vchiq_service *service; 205 206 rcu_read_lock(); 207 service = handle_to_service(handle); 208 if (service && 209 (service->srvstate == VCHIQ_SRVSTATE_FREE || 210 service->srvstate == VCHIQ_SRVSTATE_CLOSED) && 211 service->handle == handle && 212 service->instance == instance && 213 kref_get_unless_zero(&service->ref_count)) { 214 service = rcu_pointer_handoff(service); 215 rcu_read_unlock(); 216 return service; 217 } 218 rcu_read_unlock(); 219 vchiq_log_info(vchiq_core_log_level, 220 "Invalid service handle 0x%x", handle); 221 return service; 222 } 223 224 struct vchiq_service * 225 __next_service_by_instance(struct vchiq_state *state, 226 struct vchiq_instance *instance, 227 int *pidx) 228 { 229 struct vchiq_service *service = NULL; 230 int idx = *pidx; 231 232 while (idx < state->unused_service) { 233 struct vchiq_service *srv; 234 235 srv = rcu_dereference(state->services[idx++]); 236 if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE && 237 srv->instance == instance) { 238 service = srv; 239 break; 240 } 241 } 242 243 *pidx = idx; 244 return service; 245 } 246 247 struct vchiq_service * 248 next_service_by_instance(struct vchiq_state *state, 249 struct vchiq_instance *instance, 250 int *pidx) 251 { 252 struct vchiq_service *service; 253 254 rcu_read_lock(); 255 while (1) { 256 service = __next_service_by_instance(state, instance, pidx); 257 if (!service) 258 break; 259 if (kref_get_unless_zero(&service->ref_count)) { 260 service = rcu_pointer_handoff(service); 261 break; 262 } 263 } 264 rcu_read_unlock(); 265 return service; 266 } 267 268 void 269 lock_service(struct vchiq_service *service) 270 { 271 if (!service) { 272 WARN(1, "%s service is NULL\n", __func__); 273 return; 274 } 275 kref_get(&service->ref_count); 276 } 277 278 static void service_release(struct kref *kref) 279 { 280 struct vchiq_service *service = 281 container_of(kref, struct vchiq_service, ref_count); 282 struct vchiq_state *state = service->state; 283 284 WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE); 285 rcu_assign_pointer(state->services[service->localport], NULL); 286 if (service->userdata_term) 287 service->userdata_term(service->base.userdata); 288 kfree_rcu(service, rcu); 289 } 290 291 void 292 unlock_service(struct vchiq_service *service) 293 { 294 if (!service) { 295 WARN(1, "%s: service is NULL\n", __func__); 296 return; 297 } 298 kref_put(&service->ref_count, service_release); 299 } 300 301 int 302 vchiq_get_client_id(unsigned int handle) 303 { 304 struct vchiq_service *service; 305 int id; 306 307 rcu_read_lock(); 308 service = handle_to_service(handle); 309 id = service ? service->client_id : 0; 310 rcu_read_unlock(); 311 return id; 312 } 313 314 void * 315 vchiq_get_service_userdata(unsigned int handle) 316 { 317 void *userdata; 318 struct vchiq_service *service; 319 320 rcu_read_lock(); 321 service = handle_to_service(handle); 322 userdata = service ? service->base.userdata : NULL; 323 rcu_read_unlock(); 324 return userdata; 325 } 326 327 static void 328 mark_service_closing_internal(struct vchiq_service *service, int sh_thread) 329 { 330 struct vchiq_state *state = service->state; 331 struct vchiq_service_quota *service_quota; 332 333 service->closing = 1; 334 335 /* Synchronise with other threads. */ 336 mutex_lock(&state->recycle_mutex); 337 mutex_unlock(&state->recycle_mutex); 338 if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) { 339 /* If we're pausing then the slot_mutex is held until resume 340 * by the slot handler. Therefore don't try to acquire this 341 * mutex if we're the slot handler and in the pause sent state. 342 * We don't need to in this case anyway. */ 343 mutex_lock(&state->slot_mutex); 344 mutex_unlock(&state->slot_mutex); 345 } 346 347 /* Unblock any sending thread. */ 348 service_quota = &state->service_quotas[service->localport]; 349 complete(&service_quota->quota_event); 350 } 351 352 static void 353 mark_service_closing(struct vchiq_service *service) 354 { 355 mark_service_closing_internal(service, 0); 356 } 357 358 static inline enum vchiq_status 359 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason, 360 struct vchiq_header *header, void *bulk_userdata) 361 { 362 enum vchiq_status status; 363 364 vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)", 365 service->state->id, service->localport, reason_names[reason], 366 header, bulk_userdata); 367 status = service->base.callback(reason, header, service->handle, 368 bulk_userdata); 369 if (status == VCHIQ_ERROR) { 370 vchiq_log_warning(vchiq_core_log_level, 371 "%d: ignoring ERROR from callback to service %x", 372 service->state->id, service->handle); 373 status = VCHIQ_SUCCESS; 374 } 375 376 if (reason != VCHIQ_MESSAGE_AVAILABLE) 377 vchiq_release_message(service->handle, header); 378 379 return status; 380 } 381 382 inline void 383 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate) 384 { 385 enum vchiq_connstate oldstate = state->conn_state; 386 387 vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id, 388 conn_state_names[oldstate], 389 conn_state_names[newstate]); 390 state->conn_state = newstate; 391 vchiq_platform_conn_state_changed(state, oldstate, newstate); 392 } 393 394 static inline void 395 remote_event_create(wait_queue_head_t *wq, struct remote_event *event) 396 { 397 event->armed = 0; 398 /* Don't clear the 'fired' flag because it may already have been set 399 ** by the other side. */ 400 init_waitqueue_head(wq); 401 } 402 403 /* 404 * All the event waiting routines in VCHIQ used a custom semaphore 405 * implementation that filtered most signals. This achieved a behaviour similar 406 * to the "killable" family of functions. While cleaning up this code all the 407 * routines where switched to the "interruptible" family of functions, as the 408 * former was deemed unjustified and the use "killable" set all VCHIQ's 409 * threads in D state. 410 */ 411 static inline int 412 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event) 413 { 414 if (!event->fired) { 415 event->armed = 1; 416 dsb(sy); 417 if (wait_event_interruptible(*wq, event->fired)) { 418 event->armed = 0; 419 return 0; 420 } 421 event->armed = 0; 422 wmb(); 423 } 424 425 event->fired = 0; 426 return 1; 427 } 428 429 static inline void 430 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event) 431 { 432 event->fired = 1; 433 event->armed = 0; 434 wake_up_all(wq); 435 } 436 437 static inline void 438 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event) 439 { 440 if (event->fired && event->armed) 441 remote_event_signal_local(wq, event); 442 } 443 444 void 445 remote_event_pollall(struct vchiq_state *state) 446 { 447 remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger); 448 remote_event_poll(&state->sync_release_event, &state->local->sync_release); 449 remote_event_poll(&state->trigger_event, &state->local->trigger); 450 remote_event_poll(&state->recycle_event, &state->local->recycle); 451 } 452 453 /* Round up message sizes so that any space at the end of a slot is always big 454 ** enough for a header. This relies on header size being a power of two, which 455 ** has been verified earlier by a static assertion. */ 456 457 static inline size_t 458 calc_stride(size_t size) 459 { 460 /* Allow room for the header */ 461 size += sizeof(struct vchiq_header); 462 463 /* Round up */ 464 return (size + sizeof(struct vchiq_header) - 1) & 465 ~(sizeof(struct vchiq_header) - 1); 466 } 467 468 /* Called by the slot handler thread */ 469 static struct vchiq_service * 470 get_listening_service(struct vchiq_state *state, int fourcc) 471 { 472 int i; 473 474 WARN_ON(fourcc == VCHIQ_FOURCC_INVALID); 475 476 rcu_read_lock(); 477 for (i = 0; i < state->unused_service; i++) { 478 struct vchiq_service *service; 479 480 service = rcu_dereference(state->services[i]); 481 if (service && 482 service->public_fourcc == fourcc && 483 (service->srvstate == VCHIQ_SRVSTATE_LISTENING || 484 (service->srvstate == VCHIQ_SRVSTATE_OPEN && 485 service->remoteport == VCHIQ_PORT_FREE)) && 486 kref_get_unless_zero(&service->ref_count)) { 487 service = rcu_pointer_handoff(service); 488 rcu_read_unlock(); 489 return service; 490 } 491 } 492 rcu_read_unlock(); 493 return NULL; 494 } 495 496 /* Called by the slot handler thread */ 497 static struct vchiq_service * 498 get_connected_service(struct vchiq_state *state, unsigned int port) 499 { 500 int i; 501 502 rcu_read_lock(); 503 for (i = 0; i < state->unused_service; i++) { 504 struct vchiq_service *service = 505 rcu_dereference(state->services[i]); 506 507 if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN && 508 service->remoteport == port && 509 kref_get_unless_zero(&service->ref_count)) { 510 service = rcu_pointer_handoff(service); 511 rcu_read_unlock(); 512 return service; 513 } 514 } 515 rcu_read_unlock(); 516 return NULL; 517 } 518 519 inline void 520 request_poll(struct vchiq_state *state, struct vchiq_service *service, 521 int poll_type) 522 { 523 u32 value; 524 525 if (service) { 526 do { 527 value = atomic_read(&service->poll_flags); 528 } while (atomic_cmpxchg(&service->poll_flags, value, 529 value | (1 << poll_type)) != value); 530 531 do { 532 value = atomic_read(&state->poll_services[ 533 service->localport>>5]); 534 } while (atomic_cmpxchg( 535 &state->poll_services[service->localport>>5], 536 value, value | (1 << (service->localport & 0x1f))) 537 != value); 538 } 539 540 state->poll_needed = 1; 541 wmb(); 542 543 /* ... and ensure the slot handler runs. */ 544 remote_event_signal_local(&state->trigger_event, &state->local->trigger); 545 } 546 547 /* Called from queue_message, by the slot handler and application threads, 548 ** with slot_mutex held */ 549 static struct vchiq_header * 550 reserve_space(struct vchiq_state *state, size_t space, int is_blocking) 551 { 552 struct vchiq_shared_state *local = state->local; 553 int tx_pos = state->local_tx_pos; 554 int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK); 555 556 if (space > slot_space) { 557 struct vchiq_header *header; 558 /* Fill the remaining space with padding */ 559 WARN_ON(!state->tx_data); 560 header = (struct vchiq_header *) 561 (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK)); 562 header->msgid = VCHIQ_MSGID_PADDING; 563 header->size = slot_space - sizeof(struct vchiq_header); 564 565 tx_pos += slot_space; 566 } 567 568 /* If necessary, get the next slot. */ 569 if ((tx_pos & VCHIQ_SLOT_MASK) == 0) { 570 int slot_index; 571 572 /* If there is no free slot... */ 573 574 if (!try_wait_for_completion(&state->slot_available_event)) { 575 /* ...wait for one. */ 576 577 VCHIQ_STATS_INC(state, slot_stalls); 578 579 /* But first, flush through the last slot. */ 580 state->local_tx_pos = tx_pos; 581 local->tx_pos = tx_pos; 582 remote_event_signal(&state->remote->trigger); 583 584 if (!is_blocking || 585 (wait_for_completion_interruptible( 586 &state->slot_available_event))) 587 return NULL; /* No space available */ 588 } 589 590 if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) { 591 complete(&state->slot_available_event); 592 pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos); 593 return NULL; 594 } 595 596 slot_index = local->slot_queue[ 597 SLOT_QUEUE_INDEX_FROM_POS(tx_pos) & 598 VCHIQ_SLOT_QUEUE_MASK]; 599 state->tx_data = 600 (char *)SLOT_DATA_FROM_INDEX(state, slot_index); 601 } 602 603 state->local_tx_pos = tx_pos + space; 604 605 return (struct vchiq_header *)(state->tx_data + 606 (tx_pos & VCHIQ_SLOT_MASK)); 607 } 608 609 /* Called by the recycle thread. */ 610 static void 611 process_free_queue(struct vchiq_state *state, BITSET_T *service_found, 612 size_t length) 613 { 614 struct vchiq_shared_state *local = state->local; 615 int slot_queue_available; 616 617 /* Find slots which have been freed by the other side, and return them 618 ** to the available queue. */ 619 slot_queue_available = state->slot_queue_available; 620 621 /* 622 * Use a memory barrier to ensure that any state that may have been 623 * modified by another thread is not masked by stale prefetched 624 * values. 625 */ 626 mb(); 627 628 while (slot_queue_available != local->slot_queue_recycle) { 629 unsigned int pos; 630 int slot_index = local->slot_queue[slot_queue_available++ & 631 VCHIQ_SLOT_QUEUE_MASK]; 632 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index); 633 int data_found = 0; 634 635 /* 636 * Beware of the address dependency - data is calculated 637 * using an index written by the other side. 638 */ 639 rmb(); 640 641 vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x", 642 state->id, slot_index, data, 643 local->slot_queue_recycle, slot_queue_available); 644 645 /* Initialise the bitmask for services which have used this 646 ** slot */ 647 memset(service_found, 0, length); 648 649 pos = 0; 650 651 while (pos < VCHIQ_SLOT_SIZE) { 652 struct vchiq_header *header = 653 (struct vchiq_header *)(data + pos); 654 int msgid = header->msgid; 655 656 if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) { 657 int port = VCHIQ_MSG_SRCPORT(msgid); 658 struct vchiq_service_quota *service_quota = 659 &state->service_quotas[port]; 660 int count; 661 662 spin_lock("a_spinlock); 663 count = service_quota->message_use_count; 664 if (count > 0) 665 service_quota->message_use_count = 666 count - 1; 667 spin_unlock("a_spinlock); 668 669 if (count == service_quota->message_quota) 670 /* Signal the service that it 671 ** has dropped below its quota 672 */ 673 complete(&service_quota->quota_event); 674 else if (count == 0) { 675 vchiq_log_error(vchiq_core_log_level, 676 "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)", 677 port, 678 service_quota->message_use_count, 679 header, msgid, header->msgid, 680 header->size); 681 WARN(1, "invalid message use count\n"); 682 } 683 if (!BITSET_IS_SET(service_found, port)) { 684 /* Set the found bit for this service */ 685 BITSET_SET(service_found, port); 686 687 spin_lock("a_spinlock); 688 count = service_quota->slot_use_count; 689 if (count > 0) 690 service_quota->slot_use_count = 691 count - 1; 692 spin_unlock("a_spinlock); 693 694 if (count > 0) { 695 /* Signal the service in case 696 ** it has dropped below its 697 ** quota */ 698 complete(&service_quota->quota_event); 699 vchiq_log_trace( 700 vchiq_core_log_level, 701 "%d: pfq:%d %x@%pK - slot_use->%d", 702 state->id, port, 703 header->size, header, 704 count - 1); 705 } else { 706 vchiq_log_error( 707 vchiq_core_log_level, 708 "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)", 709 port, count, header, 710 msgid, header->msgid, 711 header->size); 712 WARN(1, "bad slot use count\n"); 713 } 714 } 715 716 data_found = 1; 717 } 718 719 pos += calc_stride(header->size); 720 if (pos > VCHIQ_SLOT_SIZE) { 721 vchiq_log_error(vchiq_core_log_level, 722 "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x", 723 pos, header, msgid, header->msgid, 724 header->size); 725 WARN(1, "invalid slot position\n"); 726 } 727 } 728 729 if (data_found) { 730 int count; 731 732 spin_lock("a_spinlock); 733 count = state->data_use_count; 734 if (count > 0) 735 state->data_use_count = 736 count - 1; 737 spin_unlock("a_spinlock); 738 if (count == state->data_quota) 739 complete(&state->data_quota_event); 740 } 741 742 /* 743 * Don't allow the slot to be reused until we are no 744 * longer interested in it. 745 */ 746 mb(); 747 748 state->slot_queue_available = slot_queue_available; 749 complete(&state->slot_available_event); 750 } 751 } 752 753 static ssize_t 754 memcpy_copy_callback( 755 void *context, void *dest, 756 size_t offset, size_t maxsize) 757 { 758 memcpy(dest + offset, context + offset, maxsize); 759 return maxsize; 760 } 761 762 static ssize_t 763 copy_message_data( 764 ssize_t (*copy_callback)(void *context, void *dest, 765 size_t offset, size_t maxsize), 766 void *context, 767 void *dest, 768 size_t size) 769 { 770 size_t pos = 0; 771 772 while (pos < size) { 773 ssize_t callback_result; 774 size_t max_bytes = size - pos; 775 776 callback_result = 777 copy_callback(context, dest + pos, 778 pos, max_bytes); 779 780 if (callback_result < 0) 781 return callback_result; 782 783 if (!callback_result) 784 return -EIO; 785 786 if (callback_result > max_bytes) 787 return -EIO; 788 789 pos += callback_result; 790 } 791 792 return size; 793 } 794 795 /* Called by the slot handler and application threads */ 796 static enum vchiq_status 797 queue_message(struct vchiq_state *state, struct vchiq_service *service, 798 int msgid, 799 ssize_t (*copy_callback)(void *context, void *dest, 800 size_t offset, size_t maxsize), 801 void *context, size_t size, int flags) 802 { 803 struct vchiq_shared_state *local; 804 struct vchiq_service_quota *service_quota = NULL; 805 struct vchiq_header *header; 806 int type = VCHIQ_MSG_TYPE(msgid); 807 808 size_t stride; 809 810 local = state->local; 811 812 stride = calc_stride(size); 813 814 WARN_ON(!(stride <= VCHIQ_SLOT_SIZE)); 815 816 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) && 817 mutex_lock_killable(&state->slot_mutex)) 818 return VCHIQ_RETRY; 819 820 if (type == VCHIQ_MSG_DATA) { 821 int tx_end_index; 822 823 if (!service) { 824 WARN(1, "%s: service is NULL\n", __func__); 825 mutex_unlock(&state->slot_mutex); 826 return VCHIQ_ERROR; 827 } 828 829 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK | 830 QMFLAGS_NO_MUTEX_UNLOCK)); 831 832 if (service->closing) { 833 /* The service has been closed */ 834 mutex_unlock(&state->slot_mutex); 835 return VCHIQ_ERROR; 836 } 837 838 service_quota = &state->service_quotas[service->localport]; 839 840 spin_lock("a_spinlock); 841 842 /* Ensure this service doesn't use more than its quota of 843 ** messages or slots */ 844 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS( 845 state->local_tx_pos + stride - 1); 846 847 /* Ensure data messages don't use more than their quota of 848 ** slots */ 849 while ((tx_end_index != state->previous_data_index) && 850 (state->data_use_count == state->data_quota)) { 851 VCHIQ_STATS_INC(state, data_stalls); 852 spin_unlock("a_spinlock); 853 mutex_unlock(&state->slot_mutex); 854 855 if (wait_for_completion_interruptible( 856 &state->data_quota_event)) 857 return VCHIQ_RETRY; 858 859 mutex_lock(&state->slot_mutex); 860 spin_lock("a_spinlock); 861 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS( 862 state->local_tx_pos + stride - 1); 863 if ((tx_end_index == state->previous_data_index) || 864 (state->data_use_count < state->data_quota)) { 865 /* Pass the signal on to other waiters */ 866 complete(&state->data_quota_event); 867 break; 868 } 869 } 870 871 while ((service_quota->message_use_count == 872 service_quota->message_quota) || 873 ((tx_end_index != service_quota->previous_tx_index) && 874 (service_quota->slot_use_count == 875 service_quota->slot_quota))) { 876 spin_unlock("a_spinlock); 877 vchiq_log_trace(vchiq_core_log_level, 878 "%d: qm:%d %s,%zx - quota stall " 879 "(msg %d, slot %d)", 880 state->id, service->localport, 881 msg_type_str(type), size, 882 service_quota->message_use_count, 883 service_quota->slot_use_count); 884 VCHIQ_SERVICE_STATS_INC(service, quota_stalls); 885 mutex_unlock(&state->slot_mutex); 886 if (wait_for_completion_interruptible( 887 &service_quota->quota_event)) 888 return VCHIQ_RETRY; 889 if (service->closing) 890 return VCHIQ_ERROR; 891 if (mutex_lock_killable(&state->slot_mutex)) 892 return VCHIQ_RETRY; 893 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) { 894 /* The service has been closed */ 895 mutex_unlock(&state->slot_mutex); 896 return VCHIQ_ERROR; 897 } 898 spin_lock("a_spinlock); 899 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS( 900 state->local_tx_pos + stride - 1); 901 } 902 903 spin_unlock("a_spinlock); 904 } 905 906 header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING); 907 908 if (!header) { 909 if (service) 910 VCHIQ_SERVICE_STATS_INC(service, slot_stalls); 911 /* In the event of a failure, return the mutex to the 912 state it was in */ 913 if (!(flags & QMFLAGS_NO_MUTEX_LOCK)) 914 mutex_unlock(&state->slot_mutex); 915 return VCHIQ_RETRY; 916 } 917 918 if (type == VCHIQ_MSG_DATA) { 919 ssize_t callback_result; 920 int tx_end_index; 921 int slot_use_count; 922 923 vchiq_log_info(vchiq_core_log_level, 924 "%d: qm %s@%pK,%zx (%d->%d)", 925 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), 926 header, size, VCHIQ_MSG_SRCPORT(msgid), 927 VCHIQ_MSG_DSTPORT(msgid)); 928 929 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK | 930 QMFLAGS_NO_MUTEX_UNLOCK)); 931 932 callback_result = 933 copy_message_data(copy_callback, context, 934 header->data, size); 935 936 if (callback_result < 0) { 937 mutex_unlock(&state->slot_mutex); 938 VCHIQ_SERVICE_STATS_INC(service, 939 error_count); 940 return VCHIQ_ERROR; 941 } 942 943 if (SRVTRACE_ENABLED(service, 944 VCHIQ_LOG_INFO)) 945 vchiq_log_dump_mem("Sent", 0, 946 header->data, 947 min((size_t)16, 948 (size_t)callback_result)); 949 950 spin_lock("a_spinlock); 951 service_quota->message_use_count++; 952 953 tx_end_index = 954 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1); 955 956 /* If this transmission can't fit in the last slot used by any 957 ** service, the data_use_count must be increased. */ 958 if (tx_end_index != state->previous_data_index) { 959 state->previous_data_index = tx_end_index; 960 state->data_use_count++; 961 } 962 963 /* If this isn't the same slot last used by this service, 964 ** the service's slot_use_count must be increased. */ 965 if (tx_end_index != service_quota->previous_tx_index) { 966 service_quota->previous_tx_index = tx_end_index; 967 slot_use_count = ++service_quota->slot_use_count; 968 } else { 969 slot_use_count = 0; 970 } 971 972 spin_unlock("a_spinlock); 973 974 if (slot_use_count) 975 vchiq_log_trace(vchiq_core_log_level, 976 "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)", 977 state->id, service->localport, 978 msg_type_str(VCHIQ_MSG_TYPE(msgid)), size, 979 slot_use_count, header); 980 981 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count); 982 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size); 983 } else { 984 vchiq_log_info(vchiq_core_log_level, 985 "%d: qm %s@%pK,%zx (%d->%d)", state->id, 986 msg_type_str(VCHIQ_MSG_TYPE(msgid)), 987 header, size, VCHIQ_MSG_SRCPORT(msgid), 988 VCHIQ_MSG_DSTPORT(msgid)); 989 if (size != 0) { 990 /* It is assumed for now that this code path 991 * only happens from calls inside this file. 992 * 993 * External callers are through the vchiq_queue_message 994 * path which always sets the type to be VCHIQ_MSG_DATA 995 * 996 * At first glance this appears to be correct but 997 * more review is needed. 998 */ 999 copy_message_data(copy_callback, context, 1000 header->data, size); 1001 } 1002 VCHIQ_STATS_INC(state, ctrl_tx_count); 1003 } 1004 1005 header->msgid = msgid; 1006 header->size = size; 1007 1008 { 1009 int svc_fourcc; 1010 1011 svc_fourcc = service 1012 ? service->base.fourcc 1013 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); 1014 1015 vchiq_log_info(SRVTRACE_LEVEL(service), 1016 "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu", 1017 msg_type_str(VCHIQ_MSG_TYPE(msgid)), 1018 VCHIQ_MSG_TYPE(msgid), 1019 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), 1020 VCHIQ_MSG_SRCPORT(msgid), 1021 VCHIQ_MSG_DSTPORT(msgid), 1022 size); 1023 } 1024 1025 /* Make sure the new header is visible to the peer. */ 1026 wmb(); 1027 1028 /* Make the new tx_pos visible to the peer. */ 1029 local->tx_pos = state->local_tx_pos; 1030 wmb(); 1031 1032 if (service && (type == VCHIQ_MSG_CLOSE)) 1033 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT); 1034 1035 if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK)) 1036 mutex_unlock(&state->slot_mutex); 1037 1038 remote_event_signal(&state->remote->trigger); 1039 1040 return VCHIQ_SUCCESS; 1041 } 1042 1043 /* Called by the slot handler and application threads */ 1044 static enum vchiq_status 1045 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service, 1046 int msgid, 1047 ssize_t (*copy_callback)(void *context, void *dest, 1048 size_t offset, size_t maxsize), 1049 void *context, int size, int is_blocking) 1050 { 1051 struct vchiq_shared_state *local; 1052 struct vchiq_header *header; 1053 ssize_t callback_result; 1054 1055 local = state->local; 1056 1057 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME && 1058 mutex_lock_killable(&state->sync_mutex)) 1059 return VCHIQ_RETRY; 1060 1061 remote_event_wait(&state->sync_release_event, &local->sync_release); 1062 1063 rmb(); 1064 1065 header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state, 1066 local->slot_sync); 1067 1068 { 1069 int oldmsgid = header->msgid; 1070 1071 if (oldmsgid != VCHIQ_MSGID_PADDING) 1072 vchiq_log_error(vchiq_core_log_level, 1073 "%d: qms - msgid %x, not PADDING", 1074 state->id, oldmsgid); 1075 } 1076 1077 vchiq_log_info(vchiq_sync_log_level, 1078 "%d: qms %s@%pK,%x (%d->%d)", state->id, 1079 msg_type_str(VCHIQ_MSG_TYPE(msgid)), 1080 header, size, VCHIQ_MSG_SRCPORT(msgid), 1081 VCHIQ_MSG_DSTPORT(msgid)); 1082 1083 callback_result = 1084 copy_message_data(copy_callback, context, 1085 header->data, size); 1086 1087 if (callback_result < 0) { 1088 mutex_unlock(&state->slot_mutex); 1089 VCHIQ_SERVICE_STATS_INC(service, 1090 error_count); 1091 return VCHIQ_ERROR; 1092 } 1093 1094 if (service) { 1095 if (SRVTRACE_ENABLED(service, 1096 VCHIQ_LOG_INFO)) 1097 vchiq_log_dump_mem("Sent", 0, 1098 header->data, 1099 min((size_t)16, 1100 (size_t)callback_result)); 1101 1102 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count); 1103 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size); 1104 } else { 1105 VCHIQ_STATS_INC(state, ctrl_tx_count); 1106 } 1107 1108 header->size = size; 1109 header->msgid = msgid; 1110 1111 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) { 1112 int svc_fourcc; 1113 1114 svc_fourcc = service 1115 ? service->base.fourcc 1116 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); 1117 1118 vchiq_log_trace(vchiq_sync_log_level, 1119 "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d", 1120 msg_type_str(VCHIQ_MSG_TYPE(msgid)), 1121 VCHIQ_MSG_TYPE(msgid), 1122 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), 1123 VCHIQ_MSG_SRCPORT(msgid), 1124 VCHIQ_MSG_DSTPORT(msgid), 1125 size); 1126 } 1127 1128 remote_event_signal(&state->remote->sync_trigger); 1129 1130 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE) 1131 mutex_unlock(&state->sync_mutex); 1132 1133 return VCHIQ_SUCCESS; 1134 } 1135 1136 static inline void 1137 claim_slot(struct vchiq_slot_info *slot) 1138 { 1139 slot->use_count++; 1140 } 1141 1142 static void 1143 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info, 1144 struct vchiq_header *header, struct vchiq_service *service) 1145 { 1146 int release_count; 1147 1148 mutex_lock(&state->recycle_mutex); 1149 1150 if (header) { 1151 int msgid = header->msgid; 1152 1153 if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) || 1154 (service && service->closing)) { 1155 mutex_unlock(&state->recycle_mutex); 1156 return; 1157 } 1158 1159 /* Rewrite the message header to prevent a double 1160 ** release */ 1161 header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED; 1162 } 1163 1164 release_count = slot_info->release_count; 1165 slot_info->release_count = ++release_count; 1166 1167 if (release_count == slot_info->use_count) { 1168 int slot_queue_recycle; 1169 /* Add to the freed queue */ 1170 1171 /* A read barrier is necessary here to prevent speculative 1172 ** fetches of remote->slot_queue_recycle from overtaking the 1173 ** mutex. */ 1174 rmb(); 1175 1176 slot_queue_recycle = state->remote->slot_queue_recycle; 1177 state->remote->slot_queue[slot_queue_recycle & 1178 VCHIQ_SLOT_QUEUE_MASK] = 1179 SLOT_INDEX_FROM_INFO(state, slot_info); 1180 state->remote->slot_queue_recycle = slot_queue_recycle + 1; 1181 vchiq_log_info(vchiq_core_log_level, 1182 "%d: %s %d - recycle->%x", state->id, __func__, 1183 SLOT_INDEX_FROM_INFO(state, slot_info), 1184 state->remote->slot_queue_recycle); 1185 1186 /* A write barrier is necessary, but remote_event_signal 1187 ** contains one. */ 1188 remote_event_signal(&state->remote->recycle); 1189 } 1190 1191 mutex_unlock(&state->recycle_mutex); 1192 } 1193 1194 /* Called by the slot handler - don't hold the bulk mutex */ 1195 static enum vchiq_status 1196 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue, 1197 int retry_poll) 1198 { 1199 enum vchiq_status status = VCHIQ_SUCCESS; 1200 1201 vchiq_log_trace(vchiq_core_log_level, 1202 "%d: nb:%d %cx - p=%x rn=%x r=%x", 1203 service->state->id, service->localport, 1204 (queue == &service->bulk_tx) ? 't' : 'r', 1205 queue->process, queue->remote_notify, queue->remove); 1206 1207 queue->remote_notify = queue->process; 1208 1209 if (status == VCHIQ_SUCCESS) { 1210 while (queue->remove != queue->remote_notify) { 1211 struct vchiq_bulk *bulk = 1212 &queue->bulks[BULK_INDEX(queue->remove)]; 1213 1214 /* Only generate callbacks for non-dummy bulk 1215 ** requests, and non-terminated services */ 1216 if (bulk->data && service->instance) { 1217 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) { 1218 if (bulk->dir == VCHIQ_BULK_TRANSMIT) { 1219 VCHIQ_SERVICE_STATS_INC(service, 1220 bulk_tx_count); 1221 VCHIQ_SERVICE_STATS_ADD(service, 1222 bulk_tx_bytes, 1223 bulk->actual); 1224 } else { 1225 VCHIQ_SERVICE_STATS_INC(service, 1226 bulk_rx_count); 1227 VCHIQ_SERVICE_STATS_ADD(service, 1228 bulk_rx_bytes, 1229 bulk->actual); 1230 } 1231 } else { 1232 VCHIQ_SERVICE_STATS_INC(service, 1233 bulk_aborted_count); 1234 } 1235 if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) { 1236 struct bulk_waiter *waiter; 1237 1238 spin_lock(&bulk_waiter_spinlock); 1239 waiter = bulk->userdata; 1240 if (waiter) { 1241 waiter->actual = bulk->actual; 1242 complete(&waiter->event); 1243 } 1244 spin_unlock(&bulk_waiter_spinlock); 1245 } else if (bulk->mode == 1246 VCHIQ_BULK_MODE_CALLBACK) { 1247 enum vchiq_reason reason = (bulk->dir == 1248 VCHIQ_BULK_TRANSMIT) ? 1249 ((bulk->actual == 1250 VCHIQ_BULK_ACTUAL_ABORTED) ? 1251 VCHIQ_BULK_TRANSMIT_ABORTED : 1252 VCHIQ_BULK_TRANSMIT_DONE) : 1253 ((bulk->actual == 1254 VCHIQ_BULK_ACTUAL_ABORTED) ? 1255 VCHIQ_BULK_RECEIVE_ABORTED : 1256 VCHIQ_BULK_RECEIVE_DONE); 1257 status = make_service_callback(service, 1258 reason, NULL, bulk->userdata); 1259 if (status == VCHIQ_RETRY) 1260 break; 1261 } 1262 } 1263 1264 queue->remove++; 1265 complete(&service->bulk_remove_event); 1266 } 1267 if (!retry_poll) 1268 status = VCHIQ_SUCCESS; 1269 } 1270 1271 if (status == VCHIQ_RETRY) 1272 request_poll(service->state, service, 1273 (queue == &service->bulk_tx) ? 1274 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY); 1275 1276 return status; 1277 } 1278 1279 /* Called by the slot handler thread */ 1280 static void 1281 poll_services(struct vchiq_state *state) 1282 { 1283 int group, i; 1284 1285 for (group = 0; group < BITSET_SIZE(state->unused_service); group++) { 1286 u32 flags; 1287 1288 flags = atomic_xchg(&state->poll_services[group], 0); 1289 for (i = 0; flags; i++) { 1290 if (flags & (1 << i)) { 1291 struct vchiq_service *service = 1292 find_service_by_port(state, 1293 (group<<5) + i); 1294 u32 service_flags; 1295 1296 flags &= ~(1 << i); 1297 if (!service) 1298 continue; 1299 service_flags = 1300 atomic_xchg(&service->poll_flags, 0); 1301 if (service_flags & 1302 (1 << VCHIQ_POLL_REMOVE)) { 1303 vchiq_log_info(vchiq_core_log_level, 1304 "%d: ps - remove %d<->%d", 1305 state->id, service->localport, 1306 service->remoteport); 1307 1308 /* Make it look like a client, because 1309 it must be removed and not left in 1310 the LISTENING state. */ 1311 service->public_fourcc = 1312 VCHIQ_FOURCC_INVALID; 1313 1314 if (vchiq_close_service_internal( 1315 service, 0/*!close_recvd*/) != 1316 VCHIQ_SUCCESS) 1317 request_poll(state, service, 1318 VCHIQ_POLL_REMOVE); 1319 } else if (service_flags & 1320 (1 << VCHIQ_POLL_TERMINATE)) { 1321 vchiq_log_info(vchiq_core_log_level, 1322 "%d: ps - terminate %d<->%d", 1323 state->id, service->localport, 1324 service->remoteport); 1325 if (vchiq_close_service_internal( 1326 service, 0/*!close_recvd*/) != 1327 VCHIQ_SUCCESS) 1328 request_poll(state, service, 1329 VCHIQ_POLL_TERMINATE); 1330 } 1331 if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY)) 1332 notify_bulks(service, 1333 &service->bulk_tx, 1334 1/*retry_poll*/); 1335 if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY)) 1336 notify_bulks(service, 1337 &service->bulk_rx, 1338 1/*retry_poll*/); 1339 unlock_service(service); 1340 } 1341 } 1342 } 1343 } 1344 1345 /* Called with the bulk_mutex held */ 1346 static void 1347 abort_outstanding_bulks(struct vchiq_service *service, 1348 struct vchiq_bulk_queue *queue) 1349 { 1350 int is_tx = (queue == &service->bulk_tx); 1351 1352 vchiq_log_trace(vchiq_core_log_level, 1353 "%d: aob:%d %cx - li=%x ri=%x p=%x", 1354 service->state->id, service->localport, is_tx ? 't' : 'r', 1355 queue->local_insert, queue->remote_insert, queue->process); 1356 1357 WARN_ON(!((int)(queue->local_insert - queue->process) >= 0)); 1358 WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0)); 1359 1360 while ((queue->process != queue->local_insert) || 1361 (queue->process != queue->remote_insert)) { 1362 struct vchiq_bulk *bulk = 1363 &queue->bulks[BULK_INDEX(queue->process)]; 1364 1365 if (queue->process == queue->remote_insert) { 1366 /* fabricate a matching dummy bulk */ 1367 bulk->remote_data = NULL; 1368 bulk->remote_size = 0; 1369 queue->remote_insert++; 1370 } 1371 1372 if (queue->process != queue->local_insert) { 1373 vchiq_complete_bulk(bulk); 1374 1375 vchiq_log_info(SRVTRACE_LEVEL(service), 1376 "%s %c%c%c%c d:%d ABORTED - tx len:%d, " 1377 "rx len:%d", 1378 is_tx ? "Send Bulk to" : "Recv Bulk from", 1379 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 1380 service->remoteport, 1381 bulk->size, 1382 bulk->remote_size); 1383 } else { 1384 /* fabricate a matching dummy bulk */ 1385 bulk->data = NULL; 1386 bulk->size = 0; 1387 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED; 1388 bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT : 1389 VCHIQ_BULK_RECEIVE; 1390 queue->local_insert++; 1391 } 1392 1393 queue->process++; 1394 } 1395 } 1396 1397 static int 1398 parse_open(struct vchiq_state *state, struct vchiq_header *header) 1399 { 1400 struct vchiq_service *service = NULL; 1401 int msgid, size; 1402 unsigned int localport, remoteport; 1403 1404 msgid = header->msgid; 1405 size = header->size; 1406 localport = VCHIQ_MSG_DSTPORT(msgid); 1407 remoteport = VCHIQ_MSG_SRCPORT(msgid); 1408 if (size >= sizeof(struct vchiq_open_payload)) { 1409 const struct vchiq_open_payload *payload = 1410 (struct vchiq_open_payload *)header->data; 1411 unsigned int fourcc; 1412 1413 fourcc = payload->fourcc; 1414 vchiq_log_info(vchiq_core_log_level, 1415 "%d: prs OPEN@%pK (%d->'%c%c%c%c')", 1416 state->id, header, localport, 1417 VCHIQ_FOURCC_AS_4CHARS(fourcc)); 1418 1419 service = get_listening_service(state, fourcc); 1420 1421 if (service) { 1422 /* A matching service exists */ 1423 short version = payload->version; 1424 short version_min = payload->version_min; 1425 1426 if ((service->version < version_min) || 1427 (version < service->version_min)) { 1428 /* Version mismatch */ 1429 vchiq_loud_error_header(); 1430 vchiq_loud_error("%d: service %d (%c%c%c%c) " 1431 "version mismatch - local (%d, min %d)" 1432 " vs. remote (%d, min %d)", 1433 state->id, service->localport, 1434 VCHIQ_FOURCC_AS_4CHARS(fourcc), 1435 service->version, service->version_min, 1436 version, version_min); 1437 vchiq_loud_error_footer(); 1438 unlock_service(service); 1439 service = NULL; 1440 goto fail_open; 1441 } 1442 service->peer_version = version; 1443 1444 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) { 1445 struct vchiq_openack_payload ack_payload = { 1446 service->version 1447 }; 1448 1449 if (state->version_common < 1450 VCHIQ_VERSION_SYNCHRONOUS_MODE) 1451 service->sync = 0; 1452 1453 /* Acknowledge the OPEN */ 1454 if (service->sync) { 1455 if (queue_message_sync( 1456 state, 1457 NULL, 1458 VCHIQ_MAKE_MSG( 1459 VCHIQ_MSG_OPENACK, 1460 service->localport, 1461 remoteport), 1462 memcpy_copy_callback, 1463 &ack_payload, 1464 sizeof(ack_payload), 1465 0) == VCHIQ_RETRY) 1466 goto bail_not_ready; 1467 } else { 1468 if (queue_message(state, 1469 NULL, 1470 VCHIQ_MAKE_MSG( 1471 VCHIQ_MSG_OPENACK, 1472 service->localport, 1473 remoteport), 1474 memcpy_copy_callback, 1475 &ack_payload, 1476 sizeof(ack_payload), 1477 0) == VCHIQ_RETRY) 1478 goto bail_not_ready; 1479 } 1480 1481 /* The service is now open */ 1482 vchiq_set_service_state(service, 1483 service->sync ? VCHIQ_SRVSTATE_OPENSYNC 1484 : VCHIQ_SRVSTATE_OPEN); 1485 } 1486 1487 /* Success - the message has been dealt with */ 1488 unlock_service(service); 1489 return 1; 1490 } 1491 } 1492 1493 fail_open: 1494 /* No available service, or an invalid request - send a CLOSE */ 1495 if (queue_message(state, NULL, 1496 VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)), 1497 NULL, NULL, 0, 0) == VCHIQ_RETRY) 1498 goto bail_not_ready; 1499 1500 return 1; 1501 1502 bail_not_ready: 1503 if (service) 1504 unlock_service(service); 1505 1506 return 0; 1507 } 1508 1509 /* Called by the slot handler thread */ 1510 static void 1511 parse_rx_slots(struct vchiq_state *state) 1512 { 1513 struct vchiq_shared_state *remote = state->remote; 1514 struct vchiq_service *service = NULL; 1515 int tx_pos; 1516 1517 DEBUG_INITIALISE(state->local) 1518 1519 tx_pos = remote->tx_pos; 1520 1521 while (state->rx_pos != tx_pos) { 1522 struct vchiq_header *header; 1523 int msgid, size; 1524 int type; 1525 unsigned int localport, remoteport; 1526 1527 DEBUG_TRACE(PARSE_LINE); 1528 if (!state->rx_data) { 1529 int rx_index; 1530 1531 WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0)); 1532 rx_index = remote->slot_queue[ 1533 SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) & 1534 VCHIQ_SLOT_QUEUE_MASK]; 1535 state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state, 1536 rx_index); 1537 state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index); 1538 1539 /* Initialise use_count to one, and increment 1540 ** release_count at the end of the slot to avoid 1541 ** releasing the slot prematurely. */ 1542 state->rx_info->use_count = 1; 1543 state->rx_info->release_count = 0; 1544 } 1545 1546 header = (struct vchiq_header *)(state->rx_data + 1547 (state->rx_pos & VCHIQ_SLOT_MASK)); 1548 DEBUG_VALUE(PARSE_HEADER, (int)(long)header); 1549 msgid = header->msgid; 1550 DEBUG_VALUE(PARSE_MSGID, msgid); 1551 size = header->size; 1552 type = VCHIQ_MSG_TYPE(msgid); 1553 localport = VCHIQ_MSG_DSTPORT(msgid); 1554 remoteport = VCHIQ_MSG_SRCPORT(msgid); 1555 1556 if (type != VCHIQ_MSG_DATA) 1557 VCHIQ_STATS_INC(state, ctrl_rx_count); 1558 1559 switch (type) { 1560 case VCHIQ_MSG_OPENACK: 1561 case VCHIQ_MSG_CLOSE: 1562 case VCHIQ_MSG_DATA: 1563 case VCHIQ_MSG_BULK_RX: 1564 case VCHIQ_MSG_BULK_TX: 1565 case VCHIQ_MSG_BULK_RX_DONE: 1566 case VCHIQ_MSG_BULK_TX_DONE: 1567 service = find_service_by_port(state, localport); 1568 if ((!service || 1569 ((service->remoteport != remoteport) && 1570 (service->remoteport != VCHIQ_PORT_FREE))) && 1571 (localport == 0) && 1572 (type == VCHIQ_MSG_CLOSE)) { 1573 /* This could be a CLOSE from a client which 1574 hadn't yet received the OPENACK - look for 1575 the connected service */ 1576 if (service) 1577 unlock_service(service); 1578 service = get_connected_service(state, 1579 remoteport); 1580 if (service) 1581 vchiq_log_warning(vchiq_core_log_level, 1582 "%d: prs %s@%pK (%d->%d) - found connected service %d", 1583 state->id, msg_type_str(type), 1584 header, remoteport, localport, 1585 service->localport); 1586 } 1587 1588 if (!service) { 1589 vchiq_log_error(vchiq_core_log_level, 1590 "%d: prs %s@%pK (%d->%d) - invalid/closed service %d", 1591 state->id, msg_type_str(type), 1592 header, remoteport, localport, 1593 localport); 1594 goto skip_message; 1595 } 1596 break; 1597 default: 1598 break; 1599 } 1600 1601 if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) { 1602 int svc_fourcc; 1603 1604 svc_fourcc = service 1605 ? service->base.fourcc 1606 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); 1607 vchiq_log_info(SRVTRACE_LEVEL(service), 1608 "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d " 1609 "len:%d", 1610 msg_type_str(type), type, 1611 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), 1612 remoteport, localport, size); 1613 if (size > 0) 1614 vchiq_log_dump_mem("Rcvd", 0, header->data, 1615 min(16, size)); 1616 } 1617 1618 if (((unsigned long)header & VCHIQ_SLOT_MASK) + 1619 calc_stride(size) > VCHIQ_SLOT_SIZE) { 1620 vchiq_log_error(vchiq_core_log_level, 1621 "header %pK (msgid %x) - size %x too big for slot", 1622 header, (unsigned int)msgid, 1623 (unsigned int)size); 1624 WARN(1, "oversized for slot\n"); 1625 } 1626 1627 switch (type) { 1628 case VCHIQ_MSG_OPEN: 1629 WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0)); 1630 if (!parse_open(state, header)) 1631 goto bail_not_ready; 1632 break; 1633 case VCHIQ_MSG_OPENACK: 1634 if (size >= sizeof(struct vchiq_openack_payload)) { 1635 const struct vchiq_openack_payload *payload = 1636 (struct vchiq_openack_payload *) 1637 header->data; 1638 service->peer_version = payload->version; 1639 } 1640 vchiq_log_info(vchiq_core_log_level, 1641 "%d: prs OPENACK@%pK,%x (%d->%d) v:%d", 1642 state->id, header, size, remoteport, localport, 1643 service->peer_version); 1644 if (service->srvstate == 1645 VCHIQ_SRVSTATE_OPENING) { 1646 service->remoteport = remoteport; 1647 vchiq_set_service_state(service, 1648 VCHIQ_SRVSTATE_OPEN); 1649 complete(&service->remove_event); 1650 } else 1651 vchiq_log_error(vchiq_core_log_level, 1652 "OPENACK received in state %s", 1653 srvstate_names[service->srvstate]); 1654 break; 1655 case VCHIQ_MSG_CLOSE: 1656 WARN_ON(size != 0); /* There should be no data */ 1657 1658 vchiq_log_info(vchiq_core_log_level, 1659 "%d: prs CLOSE@%pK (%d->%d)", 1660 state->id, header, remoteport, localport); 1661 1662 mark_service_closing_internal(service, 1); 1663 1664 if (vchiq_close_service_internal(service, 1665 1/*close_recvd*/) == VCHIQ_RETRY) 1666 goto bail_not_ready; 1667 1668 vchiq_log_info(vchiq_core_log_level, 1669 "Close Service %c%c%c%c s:%u d:%d", 1670 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 1671 service->localport, 1672 service->remoteport); 1673 break; 1674 case VCHIQ_MSG_DATA: 1675 vchiq_log_info(vchiq_core_log_level, 1676 "%d: prs DATA@%pK,%x (%d->%d)", 1677 state->id, header, size, remoteport, localport); 1678 1679 if ((service->remoteport == remoteport) 1680 && (service->srvstate == 1681 VCHIQ_SRVSTATE_OPEN)) { 1682 header->msgid = msgid | VCHIQ_MSGID_CLAIMED; 1683 claim_slot(state->rx_info); 1684 DEBUG_TRACE(PARSE_LINE); 1685 if (make_service_callback(service, 1686 VCHIQ_MESSAGE_AVAILABLE, header, 1687 NULL) == VCHIQ_RETRY) { 1688 DEBUG_TRACE(PARSE_LINE); 1689 goto bail_not_ready; 1690 } 1691 VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count); 1692 VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes, 1693 size); 1694 } else { 1695 VCHIQ_STATS_INC(state, error_count); 1696 } 1697 break; 1698 case VCHIQ_MSG_CONNECT: 1699 vchiq_log_info(vchiq_core_log_level, 1700 "%d: prs CONNECT@%pK", state->id, header); 1701 state->version_common = ((struct vchiq_slot_zero *) 1702 state->slot_data)->version; 1703 complete(&state->connect); 1704 break; 1705 case VCHIQ_MSG_BULK_RX: 1706 case VCHIQ_MSG_BULK_TX: 1707 /* 1708 * We should never receive a bulk request from the 1709 * other side since we're not setup to perform as the 1710 * master. 1711 */ 1712 WARN_ON(1); 1713 break; 1714 case VCHIQ_MSG_BULK_RX_DONE: 1715 case VCHIQ_MSG_BULK_TX_DONE: 1716 if ((service->remoteport == remoteport) 1717 && (service->srvstate != 1718 VCHIQ_SRVSTATE_FREE)) { 1719 struct vchiq_bulk_queue *queue; 1720 struct vchiq_bulk *bulk; 1721 1722 queue = (type == VCHIQ_MSG_BULK_RX_DONE) ? 1723 &service->bulk_rx : &service->bulk_tx; 1724 1725 DEBUG_TRACE(PARSE_LINE); 1726 if (mutex_lock_killable(&service->bulk_mutex)) { 1727 DEBUG_TRACE(PARSE_LINE); 1728 goto bail_not_ready; 1729 } 1730 if ((int)(queue->remote_insert - 1731 queue->local_insert) >= 0) { 1732 vchiq_log_error(vchiq_core_log_level, 1733 "%d: prs %s@%pK (%d->%d) " 1734 "unexpected (ri=%d,li=%d)", 1735 state->id, msg_type_str(type), 1736 header, remoteport, localport, 1737 queue->remote_insert, 1738 queue->local_insert); 1739 mutex_unlock(&service->bulk_mutex); 1740 break; 1741 } 1742 if (queue->process != queue->remote_insert) { 1743 pr_err("%s: p %x != ri %x\n", 1744 __func__, 1745 queue->process, 1746 queue->remote_insert); 1747 mutex_unlock(&service->bulk_mutex); 1748 goto bail_not_ready; 1749 } 1750 1751 bulk = &queue->bulks[ 1752 BULK_INDEX(queue->remote_insert)]; 1753 bulk->actual = *(int *)header->data; 1754 queue->remote_insert++; 1755 1756 vchiq_log_info(vchiq_core_log_level, 1757 "%d: prs %s@%pK (%d->%d) %x@%pK", 1758 state->id, msg_type_str(type), 1759 header, remoteport, localport, 1760 bulk->actual, bulk->data); 1761 1762 vchiq_log_trace(vchiq_core_log_level, 1763 "%d: prs:%d %cx li=%x ri=%x p=%x", 1764 state->id, localport, 1765 (type == VCHIQ_MSG_BULK_RX_DONE) ? 1766 'r' : 't', 1767 queue->local_insert, 1768 queue->remote_insert, queue->process); 1769 1770 DEBUG_TRACE(PARSE_LINE); 1771 WARN_ON(queue->process == queue->local_insert); 1772 vchiq_complete_bulk(bulk); 1773 queue->process++; 1774 mutex_unlock(&service->bulk_mutex); 1775 DEBUG_TRACE(PARSE_LINE); 1776 notify_bulks(service, queue, 1/*retry_poll*/); 1777 DEBUG_TRACE(PARSE_LINE); 1778 } 1779 break; 1780 case VCHIQ_MSG_PADDING: 1781 vchiq_log_trace(vchiq_core_log_level, 1782 "%d: prs PADDING@%pK,%x", 1783 state->id, header, size); 1784 break; 1785 case VCHIQ_MSG_PAUSE: 1786 /* If initiated, signal the application thread */ 1787 vchiq_log_trace(vchiq_core_log_level, 1788 "%d: prs PAUSE@%pK,%x", 1789 state->id, header, size); 1790 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) { 1791 vchiq_log_error(vchiq_core_log_level, 1792 "%d: PAUSE received in state PAUSED", 1793 state->id); 1794 break; 1795 } 1796 if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) { 1797 /* Send a PAUSE in response */ 1798 if (queue_message(state, NULL, 1799 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0), 1800 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK) 1801 == VCHIQ_RETRY) 1802 goto bail_not_ready; 1803 } 1804 /* At this point slot_mutex is held */ 1805 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED); 1806 break; 1807 case VCHIQ_MSG_RESUME: 1808 vchiq_log_trace(vchiq_core_log_level, 1809 "%d: prs RESUME@%pK,%x", 1810 state->id, header, size); 1811 /* Release the slot mutex */ 1812 mutex_unlock(&state->slot_mutex); 1813 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED); 1814 break; 1815 1816 case VCHIQ_MSG_REMOTE_USE: 1817 vchiq_on_remote_use(state); 1818 break; 1819 case VCHIQ_MSG_REMOTE_RELEASE: 1820 vchiq_on_remote_release(state); 1821 break; 1822 case VCHIQ_MSG_REMOTE_USE_ACTIVE: 1823 break; 1824 1825 default: 1826 vchiq_log_error(vchiq_core_log_level, 1827 "%d: prs invalid msgid %x@%pK,%x", 1828 state->id, msgid, header, size); 1829 WARN(1, "invalid message\n"); 1830 break; 1831 } 1832 1833 skip_message: 1834 if (service) { 1835 unlock_service(service); 1836 service = NULL; 1837 } 1838 1839 state->rx_pos += calc_stride(size); 1840 1841 DEBUG_TRACE(PARSE_LINE); 1842 /* Perform some housekeeping when the end of the slot is 1843 ** reached. */ 1844 if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) { 1845 /* Remove the extra reference count. */ 1846 release_slot(state, state->rx_info, NULL, NULL); 1847 state->rx_data = NULL; 1848 } 1849 } 1850 1851 bail_not_ready: 1852 if (service) 1853 unlock_service(service); 1854 } 1855 1856 /* Called by the slot handler thread */ 1857 static int 1858 slot_handler_func(void *v) 1859 { 1860 struct vchiq_state *state = v; 1861 struct vchiq_shared_state *local = state->local; 1862 1863 DEBUG_INITIALISE(local) 1864 1865 while (1) { 1866 DEBUG_COUNT(SLOT_HANDLER_COUNT); 1867 DEBUG_TRACE(SLOT_HANDLER_LINE); 1868 remote_event_wait(&state->trigger_event, &local->trigger); 1869 1870 rmb(); 1871 1872 DEBUG_TRACE(SLOT_HANDLER_LINE); 1873 if (state->poll_needed) { 1874 1875 state->poll_needed = 0; 1876 1877 /* Handle service polling and other rare conditions here 1878 ** out of the mainline code */ 1879 switch (state->conn_state) { 1880 case VCHIQ_CONNSTATE_CONNECTED: 1881 /* Poll the services as requested */ 1882 poll_services(state); 1883 break; 1884 1885 case VCHIQ_CONNSTATE_PAUSING: 1886 if (queue_message(state, NULL, 1887 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0), 1888 NULL, NULL, 0, 1889 QMFLAGS_NO_MUTEX_UNLOCK) 1890 != VCHIQ_RETRY) { 1891 vchiq_set_conn_state(state, 1892 VCHIQ_CONNSTATE_PAUSE_SENT); 1893 } else { 1894 /* Retry later */ 1895 state->poll_needed = 1; 1896 } 1897 break; 1898 1899 case VCHIQ_CONNSTATE_RESUMING: 1900 if (queue_message(state, NULL, 1901 VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0), 1902 NULL, NULL, 0, QMFLAGS_NO_MUTEX_LOCK) 1903 != VCHIQ_RETRY) { 1904 vchiq_set_conn_state(state, 1905 VCHIQ_CONNSTATE_CONNECTED); 1906 } else { 1907 /* This should really be impossible, 1908 ** since the PAUSE should have flushed 1909 ** through outstanding messages. */ 1910 vchiq_log_error(vchiq_core_log_level, 1911 "Failed to send RESUME " 1912 "message"); 1913 } 1914 break; 1915 default: 1916 break; 1917 } 1918 1919 } 1920 1921 DEBUG_TRACE(SLOT_HANDLER_LINE); 1922 parse_rx_slots(state); 1923 } 1924 return 0; 1925 } 1926 1927 /* Called by the recycle thread */ 1928 static int 1929 recycle_func(void *v) 1930 { 1931 struct vchiq_state *state = v; 1932 struct vchiq_shared_state *local = state->local; 1933 BITSET_T *found; 1934 size_t length; 1935 1936 length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES); 1937 1938 found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found), 1939 GFP_KERNEL); 1940 if (!found) 1941 return -ENOMEM; 1942 1943 while (1) { 1944 remote_event_wait(&state->recycle_event, &local->recycle); 1945 1946 process_free_queue(state, found, length); 1947 } 1948 return 0; 1949 } 1950 1951 /* Called by the sync thread */ 1952 static int 1953 sync_func(void *v) 1954 { 1955 struct vchiq_state *state = v; 1956 struct vchiq_shared_state *local = state->local; 1957 struct vchiq_header *header = 1958 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state, 1959 state->remote->slot_sync); 1960 1961 while (1) { 1962 struct vchiq_service *service; 1963 int msgid, size; 1964 int type; 1965 unsigned int localport, remoteport; 1966 1967 remote_event_wait(&state->sync_trigger_event, &local->sync_trigger); 1968 1969 rmb(); 1970 1971 msgid = header->msgid; 1972 size = header->size; 1973 type = VCHIQ_MSG_TYPE(msgid); 1974 localport = VCHIQ_MSG_DSTPORT(msgid); 1975 remoteport = VCHIQ_MSG_SRCPORT(msgid); 1976 1977 service = find_service_by_port(state, localport); 1978 1979 if (!service) { 1980 vchiq_log_error(vchiq_sync_log_level, 1981 "%d: sf %s@%pK (%d->%d) - invalid/closed service %d", 1982 state->id, msg_type_str(type), 1983 header, remoteport, localport, localport); 1984 release_message_sync(state, header); 1985 continue; 1986 } 1987 1988 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) { 1989 int svc_fourcc; 1990 1991 svc_fourcc = service 1992 ? service->base.fourcc 1993 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); 1994 vchiq_log_trace(vchiq_sync_log_level, 1995 "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d", 1996 msg_type_str(type), 1997 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), 1998 remoteport, localport, size); 1999 if (size > 0) 2000 vchiq_log_dump_mem("Rcvd", 0, header->data, 2001 min(16, size)); 2002 } 2003 2004 switch (type) { 2005 case VCHIQ_MSG_OPENACK: 2006 if (size >= sizeof(struct vchiq_openack_payload)) { 2007 const struct vchiq_openack_payload *payload = 2008 (struct vchiq_openack_payload *) 2009 header->data; 2010 service->peer_version = payload->version; 2011 } 2012 vchiq_log_info(vchiq_sync_log_level, 2013 "%d: sf OPENACK@%pK,%x (%d->%d) v:%d", 2014 state->id, header, size, remoteport, localport, 2015 service->peer_version); 2016 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) { 2017 service->remoteport = remoteport; 2018 vchiq_set_service_state(service, 2019 VCHIQ_SRVSTATE_OPENSYNC); 2020 service->sync = 1; 2021 complete(&service->remove_event); 2022 } 2023 release_message_sync(state, header); 2024 break; 2025 2026 case VCHIQ_MSG_DATA: 2027 vchiq_log_trace(vchiq_sync_log_level, 2028 "%d: sf DATA@%pK,%x (%d->%d)", 2029 state->id, header, size, remoteport, localport); 2030 2031 if ((service->remoteport == remoteport) && 2032 (service->srvstate == 2033 VCHIQ_SRVSTATE_OPENSYNC)) { 2034 if (make_service_callback(service, 2035 VCHIQ_MESSAGE_AVAILABLE, header, 2036 NULL) == VCHIQ_RETRY) 2037 vchiq_log_error(vchiq_sync_log_level, 2038 "synchronous callback to " 2039 "service %d returns " 2040 "VCHIQ_RETRY", 2041 localport); 2042 } 2043 break; 2044 2045 default: 2046 vchiq_log_error(vchiq_sync_log_level, 2047 "%d: sf unexpected msgid %x@%pK,%x", 2048 state->id, msgid, header, size); 2049 release_message_sync(state, header); 2050 break; 2051 } 2052 2053 unlock_service(service); 2054 } 2055 2056 return 0; 2057 } 2058 2059 static void 2060 init_bulk_queue(struct vchiq_bulk_queue *queue) 2061 { 2062 queue->local_insert = 0; 2063 queue->remote_insert = 0; 2064 queue->process = 0; 2065 queue->remote_notify = 0; 2066 queue->remove = 0; 2067 } 2068 2069 inline const char * 2070 get_conn_state_name(enum vchiq_connstate conn_state) 2071 { 2072 return conn_state_names[conn_state]; 2073 } 2074 2075 struct vchiq_slot_zero * 2076 vchiq_init_slots(void *mem_base, int mem_size) 2077 { 2078 int mem_align = 2079 (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK); 2080 struct vchiq_slot_zero *slot_zero = 2081 (struct vchiq_slot_zero *)(mem_base + mem_align); 2082 int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE; 2083 int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS; 2084 2085 /* Ensure there is enough memory to run an absolutely minimum system */ 2086 num_slots -= first_data_slot; 2087 2088 if (num_slots < 4) { 2089 vchiq_log_error(vchiq_core_log_level, 2090 "%s - insufficient memory %x bytes", 2091 __func__, mem_size); 2092 return NULL; 2093 } 2094 2095 memset(slot_zero, 0, sizeof(struct vchiq_slot_zero)); 2096 2097 slot_zero->magic = VCHIQ_MAGIC; 2098 slot_zero->version = VCHIQ_VERSION; 2099 slot_zero->version_min = VCHIQ_VERSION_MIN; 2100 slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero); 2101 slot_zero->slot_size = VCHIQ_SLOT_SIZE; 2102 slot_zero->max_slots = VCHIQ_MAX_SLOTS; 2103 slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE; 2104 2105 slot_zero->master.slot_sync = first_data_slot; 2106 slot_zero->master.slot_first = first_data_slot + 1; 2107 slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1; 2108 slot_zero->slave.slot_sync = first_data_slot + (num_slots/2); 2109 slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1; 2110 slot_zero->slave.slot_last = first_data_slot + num_slots - 1; 2111 2112 return slot_zero; 2113 } 2114 2115 enum vchiq_status 2116 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero) 2117 { 2118 struct vchiq_shared_state *local; 2119 struct vchiq_shared_state *remote; 2120 enum vchiq_status status; 2121 char threadname[16]; 2122 int i; 2123 2124 if (vchiq_states[0]) { 2125 pr_err("%s: VCHIQ state already initialized\n", __func__); 2126 return VCHIQ_ERROR; 2127 } 2128 2129 local = &slot_zero->slave; 2130 remote = &slot_zero->master; 2131 2132 if (local->initialised) { 2133 vchiq_loud_error_header(); 2134 if (remote->initialised) 2135 vchiq_loud_error("local state has already been " 2136 "initialised"); 2137 else 2138 vchiq_loud_error("master/slave mismatch two slaves"); 2139 vchiq_loud_error_footer(); 2140 return VCHIQ_ERROR; 2141 } 2142 2143 memset(state, 0, sizeof(struct vchiq_state)); 2144 2145 /* 2146 initialize shared state pointers 2147 */ 2148 2149 state->local = local; 2150 state->remote = remote; 2151 state->slot_data = (struct vchiq_slot *)slot_zero; 2152 2153 /* 2154 initialize events and mutexes 2155 */ 2156 2157 init_completion(&state->connect); 2158 mutex_init(&state->mutex); 2159 mutex_init(&state->slot_mutex); 2160 mutex_init(&state->recycle_mutex); 2161 mutex_init(&state->sync_mutex); 2162 mutex_init(&state->bulk_transfer_mutex); 2163 2164 init_completion(&state->slot_available_event); 2165 init_completion(&state->slot_remove_event); 2166 init_completion(&state->data_quota_event); 2167 2168 state->slot_queue_available = 0; 2169 2170 for (i = 0; i < VCHIQ_MAX_SERVICES; i++) { 2171 struct vchiq_service_quota *service_quota = 2172 &state->service_quotas[i]; 2173 init_completion(&service_quota->quota_event); 2174 } 2175 2176 for (i = local->slot_first; i <= local->slot_last; i++) { 2177 local->slot_queue[state->slot_queue_available++] = i; 2178 complete(&state->slot_available_event); 2179 } 2180 2181 state->default_slot_quota = state->slot_queue_available/2; 2182 state->default_message_quota = 2183 min((unsigned short)(state->default_slot_quota * 256), 2184 (unsigned short)~0); 2185 2186 state->previous_data_index = -1; 2187 state->data_use_count = 0; 2188 state->data_quota = state->slot_queue_available - 1; 2189 2190 remote_event_create(&state->trigger_event, &local->trigger); 2191 local->tx_pos = 0; 2192 remote_event_create(&state->recycle_event, &local->recycle); 2193 local->slot_queue_recycle = state->slot_queue_available; 2194 remote_event_create(&state->sync_trigger_event, &local->sync_trigger); 2195 remote_event_create(&state->sync_release_event, &local->sync_release); 2196 2197 /* At start-of-day, the slot is empty and available */ 2198 ((struct vchiq_header *) 2199 SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid = 2200 VCHIQ_MSGID_PADDING; 2201 remote_event_signal_local(&state->sync_release_event, &local->sync_release); 2202 2203 local->debug[DEBUG_ENTRIES] = DEBUG_MAX; 2204 2205 status = vchiq_platform_init_state(state); 2206 if (status != VCHIQ_SUCCESS) 2207 return VCHIQ_ERROR; 2208 2209 /* 2210 bring up slot handler thread 2211 */ 2212 snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id); 2213 state->slot_handler_thread = kthread_create(&slot_handler_func, 2214 (void *)state, 2215 threadname); 2216 2217 if (IS_ERR(state->slot_handler_thread)) { 2218 vchiq_loud_error_header(); 2219 vchiq_loud_error("couldn't create thread %s", threadname); 2220 vchiq_loud_error_footer(); 2221 return VCHIQ_ERROR; 2222 } 2223 set_user_nice(state->slot_handler_thread, -19); 2224 2225 snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id); 2226 state->recycle_thread = kthread_create(&recycle_func, 2227 (void *)state, 2228 threadname); 2229 if (IS_ERR(state->recycle_thread)) { 2230 vchiq_loud_error_header(); 2231 vchiq_loud_error("couldn't create thread %s", threadname); 2232 vchiq_loud_error_footer(); 2233 goto fail_free_handler_thread; 2234 } 2235 set_user_nice(state->recycle_thread, -19); 2236 2237 snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id); 2238 state->sync_thread = kthread_create(&sync_func, 2239 (void *)state, 2240 threadname); 2241 if (IS_ERR(state->sync_thread)) { 2242 vchiq_loud_error_header(); 2243 vchiq_loud_error("couldn't create thread %s", threadname); 2244 vchiq_loud_error_footer(); 2245 goto fail_free_recycle_thread; 2246 } 2247 set_user_nice(state->sync_thread, -20); 2248 2249 wake_up_process(state->slot_handler_thread); 2250 wake_up_process(state->recycle_thread); 2251 wake_up_process(state->sync_thread); 2252 2253 vchiq_states[0] = state; 2254 2255 /* Indicate readiness to the other side */ 2256 local->initialised = 1; 2257 2258 return status; 2259 2260 fail_free_recycle_thread: 2261 kthread_stop(state->recycle_thread); 2262 fail_free_handler_thread: 2263 kthread_stop(state->slot_handler_thread); 2264 2265 return VCHIQ_ERROR; 2266 } 2267 2268 /* Called from application thread when a client or server service is created. */ 2269 struct vchiq_service * 2270 vchiq_add_service_internal(struct vchiq_state *state, 2271 const struct vchiq_service_params *params, 2272 int srvstate, struct vchiq_instance *instance, 2273 vchiq_userdata_term userdata_term) 2274 { 2275 struct vchiq_service *service; 2276 struct vchiq_service __rcu **pservice = NULL; 2277 struct vchiq_service_quota *service_quota; 2278 int i; 2279 2280 service = kmalloc(sizeof(*service), GFP_KERNEL); 2281 if (!service) 2282 return service; 2283 2284 service->base.fourcc = params->fourcc; 2285 service->base.callback = params->callback; 2286 service->base.userdata = params->userdata; 2287 service->handle = VCHIQ_SERVICE_HANDLE_INVALID; 2288 kref_init(&service->ref_count); 2289 service->srvstate = VCHIQ_SRVSTATE_FREE; 2290 service->userdata_term = userdata_term; 2291 service->localport = VCHIQ_PORT_FREE; 2292 service->remoteport = VCHIQ_PORT_FREE; 2293 2294 service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ? 2295 VCHIQ_FOURCC_INVALID : params->fourcc; 2296 service->client_id = 0; 2297 service->auto_close = 1; 2298 service->sync = 0; 2299 service->closing = 0; 2300 service->trace = 0; 2301 atomic_set(&service->poll_flags, 0); 2302 service->version = params->version; 2303 service->version_min = params->version_min; 2304 service->state = state; 2305 service->instance = instance; 2306 service->service_use_count = 0; 2307 init_bulk_queue(&service->bulk_tx); 2308 init_bulk_queue(&service->bulk_rx); 2309 init_completion(&service->remove_event); 2310 init_completion(&service->bulk_remove_event); 2311 mutex_init(&service->bulk_mutex); 2312 memset(&service->stats, 0, sizeof(service->stats)); 2313 2314 /* Although it is perfectly possible to use a spinlock 2315 ** to protect the creation of services, it is overkill as it 2316 ** disables interrupts while the array is searched. 2317 ** The only danger is of another thread trying to create a 2318 ** service - service deletion is safe. 2319 ** Therefore it is preferable to use state->mutex which, 2320 ** although slower to claim, doesn't block interrupts while 2321 ** it is held. 2322 */ 2323 2324 mutex_lock(&state->mutex); 2325 2326 /* Prepare to use a previously unused service */ 2327 if (state->unused_service < VCHIQ_MAX_SERVICES) 2328 pservice = &state->services[state->unused_service]; 2329 2330 if (srvstate == VCHIQ_SRVSTATE_OPENING) { 2331 for (i = 0; i < state->unused_service; i++) { 2332 if (!rcu_access_pointer(state->services[i])) { 2333 pservice = &state->services[i]; 2334 break; 2335 } 2336 } 2337 } else { 2338 rcu_read_lock(); 2339 for (i = (state->unused_service - 1); i >= 0; i--) { 2340 struct vchiq_service *srv; 2341 2342 srv = rcu_dereference(state->services[i]); 2343 if (!srv) 2344 pservice = &state->services[i]; 2345 else if ((srv->public_fourcc == params->fourcc) 2346 && ((srv->instance != instance) || 2347 (srv->base.callback != 2348 params->callback))) { 2349 /* There is another server using this 2350 ** fourcc which doesn't match. */ 2351 pservice = NULL; 2352 break; 2353 } 2354 } 2355 rcu_read_unlock(); 2356 } 2357 2358 if (pservice) { 2359 service->localport = (pservice - state->services); 2360 if (!handle_seq) 2361 handle_seq = VCHIQ_MAX_STATES * 2362 VCHIQ_MAX_SERVICES; 2363 service->handle = handle_seq | 2364 (state->id * VCHIQ_MAX_SERVICES) | 2365 service->localport; 2366 handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES; 2367 rcu_assign_pointer(*pservice, service); 2368 if (pservice == &state->services[state->unused_service]) 2369 state->unused_service++; 2370 } 2371 2372 mutex_unlock(&state->mutex); 2373 2374 if (!pservice) { 2375 kfree(service); 2376 return NULL; 2377 } 2378 2379 service_quota = &state->service_quotas[service->localport]; 2380 service_quota->slot_quota = state->default_slot_quota; 2381 service_quota->message_quota = state->default_message_quota; 2382 if (service_quota->slot_use_count == 0) 2383 service_quota->previous_tx_index = 2384 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos) 2385 - 1; 2386 2387 /* Bring this service online */ 2388 vchiq_set_service_state(service, srvstate); 2389 2390 vchiq_log_info(vchiq_core_msg_log_level, 2391 "%s Service %c%c%c%c SrcPort:%d", 2392 (srvstate == VCHIQ_SRVSTATE_OPENING) 2393 ? "Open" : "Add", 2394 VCHIQ_FOURCC_AS_4CHARS(params->fourcc), 2395 service->localport); 2396 2397 /* Don't unlock the service - leave it with a ref_count of 1. */ 2398 2399 return service; 2400 } 2401 2402 enum vchiq_status 2403 vchiq_open_service_internal(struct vchiq_service *service, int client_id) 2404 { 2405 struct vchiq_open_payload payload = { 2406 service->base.fourcc, 2407 client_id, 2408 service->version, 2409 service->version_min 2410 }; 2411 enum vchiq_status status = VCHIQ_SUCCESS; 2412 2413 service->client_id = client_id; 2414 vchiq_use_service_internal(service); 2415 status = queue_message(service->state, 2416 NULL, 2417 VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, 2418 service->localport, 2419 0), 2420 memcpy_copy_callback, 2421 &payload, 2422 sizeof(payload), 2423 QMFLAGS_IS_BLOCKING); 2424 if (status == VCHIQ_SUCCESS) { 2425 /* Wait for the ACK/NAK */ 2426 if (wait_for_completion_interruptible(&service->remove_event)) { 2427 status = VCHIQ_RETRY; 2428 vchiq_release_service_internal(service); 2429 } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) && 2430 (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) { 2431 if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) 2432 vchiq_log_error(vchiq_core_log_level, 2433 "%d: osi - srvstate = %s (ref %u)", 2434 service->state->id, 2435 srvstate_names[service->srvstate], 2436 kref_read(&service->ref_count)); 2437 status = VCHIQ_ERROR; 2438 VCHIQ_SERVICE_STATS_INC(service, error_count); 2439 vchiq_release_service_internal(service); 2440 } 2441 } 2442 return status; 2443 } 2444 2445 static void 2446 release_service_messages(struct vchiq_service *service) 2447 { 2448 struct vchiq_state *state = service->state; 2449 int slot_last = state->remote->slot_last; 2450 int i; 2451 2452 /* Release any claimed messages aimed at this service */ 2453 2454 if (service->sync) { 2455 struct vchiq_header *header = 2456 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state, 2457 state->remote->slot_sync); 2458 if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport) 2459 release_message_sync(state, header); 2460 2461 return; 2462 } 2463 2464 for (i = state->remote->slot_first; i <= slot_last; i++) { 2465 struct vchiq_slot_info *slot_info = 2466 SLOT_INFO_FROM_INDEX(state, i); 2467 if (slot_info->release_count != slot_info->use_count) { 2468 char *data = 2469 (char *)SLOT_DATA_FROM_INDEX(state, i); 2470 unsigned int pos, end; 2471 2472 end = VCHIQ_SLOT_SIZE; 2473 if (data == state->rx_data) 2474 /* This buffer is still being read from - stop 2475 ** at the current read position */ 2476 end = state->rx_pos & VCHIQ_SLOT_MASK; 2477 2478 pos = 0; 2479 2480 while (pos < end) { 2481 struct vchiq_header *header = 2482 (struct vchiq_header *)(data + pos); 2483 int msgid = header->msgid; 2484 int port = VCHIQ_MSG_DSTPORT(msgid); 2485 2486 if ((port == service->localport) && 2487 (msgid & VCHIQ_MSGID_CLAIMED)) { 2488 vchiq_log_info(vchiq_core_log_level, 2489 " fsi - hdr %pK", header); 2490 release_slot(state, slot_info, header, 2491 NULL); 2492 } 2493 pos += calc_stride(header->size); 2494 if (pos > VCHIQ_SLOT_SIZE) { 2495 vchiq_log_error(vchiq_core_log_level, 2496 "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x", 2497 pos, header, msgid, 2498 header->msgid, header->size); 2499 WARN(1, "invalid slot position\n"); 2500 } 2501 } 2502 } 2503 } 2504 } 2505 2506 static int 2507 do_abort_bulks(struct vchiq_service *service) 2508 { 2509 enum vchiq_status status; 2510 2511 /* Abort any outstanding bulk transfers */ 2512 if (mutex_lock_killable(&service->bulk_mutex)) 2513 return 0; 2514 abort_outstanding_bulks(service, &service->bulk_tx); 2515 abort_outstanding_bulks(service, &service->bulk_rx); 2516 mutex_unlock(&service->bulk_mutex); 2517 2518 status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/); 2519 if (status == VCHIQ_SUCCESS) 2520 status = notify_bulks(service, &service->bulk_rx, 2521 0/*!retry_poll*/); 2522 return (status == VCHIQ_SUCCESS); 2523 } 2524 2525 static enum vchiq_status 2526 close_service_complete(struct vchiq_service *service, int failstate) 2527 { 2528 enum vchiq_status status; 2529 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID); 2530 int newstate; 2531 2532 switch (service->srvstate) { 2533 case VCHIQ_SRVSTATE_OPEN: 2534 case VCHIQ_SRVSTATE_CLOSESENT: 2535 case VCHIQ_SRVSTATE_CLOSERECVD: 2536 if (is_server) { 2537 if (service->auto_close) { 2538 service->client_id = 0; 2539 service->remoteport = VCHIQ_PORT_FREE; 2540 newstate = VCHIQ_SRVSTATE_LISTENING; 2541 } else 2542 newstate = VCHIQ_SRVSTATE_CLOSEWAIT; 2543 } else 2544 newstate = VCHIQ_SRVSTATE_CLOSED; 2545 vchiq_set_service_state(service, newstate); 2546 break; 2547 case VCHIQ_SRVSTATE_LISTENING: 2548 break; 2549 default: 2550 vchiq_log_error(vchiq_core_log_level, 2551 "%s(%x) called in state %s", __func__, 2552 service->handle, srvstate_names[service->srvstate]); 2553 WARN(1, "%s in unexpected state\n", __func__); 2554 return VCHIQ_ERROR; 2555 } 2556 2557 status = make_service_callback(service, 2558 VCHIQ_SERVICE_CLOSED, NULL, NULL); 2559 2560 if (status != VCHIQ_RETRY) { 2561 int uc = service->service_use_count; 2562 int i; 2563 /* Complete the close process */ 2564 for (i = 0; i < uc; i++) 2565 /* cater for cases where close is forced and the 2566 ** client may not close all it's handles */ 2567 vchiq_release_service_internal(service); 2568 2569 service->client_id = 0; 2570 service->remoteport = VCHIQ_PORT_FREE; 2571 2572 if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) 2573 vchiq_free_service_internal(service); 2574 else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) { 2575 if (is_server) 2576 service->closing = 0; 2577 2578 complete(&service->remove_event); 2579 } 2580 } else 2581 vchiq_set_service_state(service, failstate); 2582 2583 return status; 2584 } 2585 2586 /* Called by the slot handler */ 2587 enum vchiq_status 2588 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd) 2589 { 2590 struct vchiq_state *state = service->state; 2591 enum vchiq_status status = VCHIQ_SUCCESS; 2592 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID); 2593 2594 vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)", 2595 service->state->id, service->localport, close_recvd, 2596 srvstate_names[service->srvstate]); 2597 2598 switch (service->srvstate) { 2599 case VCHIQ_SRVSTATE_CLOSED: 2600 case VCHIQ_SRVSTATE_HIDDEN: 2601 case VCHIQ_SRVSTATE_LISTENING: 2602 case VCHIQ_SRVSTATE_CLOSEWAIT: 2603 if (close_recvd) 2604 vchiq_log_error(vchiq_core_log_level, 2605 "%s(1) called " 2606 "in state %s", 2607 __func__, srvstate_names[service->srvstate]); 2608 else if (is_server) { 2609 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) { 2610 status = VCHIQ_ERROR; 2611 } else { 2612 service->client_id = 0; 2613 service->remoteport = VCHIQ_PORT_FREE; 2614 if (service->srvstate == 2615 VCHIQ_SRVSTATE_CLOSEWAIT) 2616 vchiq_set_service_state(service, 2617 VCHIQ_SRVSTATE_LISTENING); 2618 } 2619 complete(&service->remove_event); 2620 } else 2621 vchiq_free_service_internal(service); 2622 break; 2623 case VCHIQ_SRVSTATE_OPENING: 2624 if (close_recvd) { 2625 /* The open was rejected - tell the user */ 2626 vchiq_set_service_state(service, 2627 VCHIQ_SRVSTATE_CLOSEWAIT); 2628 complete(&service->remove_event); 2629 } else { 2630 /* Shutdown mid-open - let the other side know */ 2631 status = queue_message(state, service, 2632 VCHIQ_MAKE_MSG 2633 (VCHIQ_MSG_CLOSE, 2634 service->localport, 2635 VCHIQ_MSG_DSTPORT(service->remoteport)), 2636 NULL, NULL, 0, 0); 2637 } 2638 break; 2639 2640 case VCHIQ_SRVSTATE_OPENSYNC: 2641 mutex_lock(&state->sync_mutex); 2642 /* fall through */ 2643 case VCHIQ_SRVSTATE_OPEN: 2644 if (close_recvd) { 2645 if (!do_abort_bulks(service)) 2646 status = VCHIQ_RETRY; 2647 } 2648 2649 release_service_messages(service); 2650 2651 if (status == VCHIQ_SUCCESS) 2652 status = queue_message(state, service, 2653 VCHIQ_MAKE_MSG 2654 (VCHIQ_MSG_CLOSE, 2655 service->localport, 2656 VCHIQ_MSG_DSTPORT(service->remoteport)), 2657 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK); 2658 2659 if (status == VCHIQ_SUCCESS) { 2660 if (!close_recvd) { 2661 /* Change the state while the mutex is 2662 still held */ 2663 vchiq_set_service_state(service, 2664 VCHIQ_SRVSTATE_CLOSESENT); 2665 mutex_unlock(&state->slot_mutex); 2666 if (service->sync) 2667 mutex_unlock(&state->sync_mutex); 2668 break; 2669 } 2670 } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) { 2671 mutex_unlock(&state->sync_mutex); 2672 break; 2673 } else 2674 break; 2675 2676 /* Change the state while the mutex is still held */ 2677 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD); 2678 mutex_unlock(&state->slot_mutex); 2679 if (service->sync) 2680 mutex_unlock(&state->sync_mutex); 2681 2682 status = close_service_complete(service, 2683 VCHIQ_SRVSTATE_CLOSERECVD); 2684 break; 2685 2686 case VCHIQ_SRVSTATE_CLOSESENT: 2687 if (!close_recvd) 2688 /* This happens when a process is killed mid-close */ 2689 break; 2690 2691 if (!do_abort_bulks(service)) { 2692 status = VCHIQ_RETRY; 2693 break; 2694 } 2695 2696 if (status == VCHIQ_SUCCESS) 2697 status = close_service_complete(service, 2698 VCHIQ_SRVSTATE_CLOSERECVD); 2699 break; 2700 2701 case VCHIQ_SRVSTATE_CLOSERECVD: 2702 if (!close_recvd && is_server) 2703 /* Force into LISTENING mode */ 2704 vchiq_set_service_state(service, 2705 VCHIQ_SRVSTATE_LISTENING); 2706 status = close_service_complete(service, 2707 VCHIQ_SRVSTATE_CLOSERECVD); 2708 break; 2709 2710 default: 2711 vchiq_log_error(vchiq_core_log_level, 2712 "%s(%d) called in state %s", __func__, 2713 close_recvd, srvstate_names[service->srvstate]); 2714 break; 2715 } 2716 2717 return status; 2718 } 2719 2720 /* Called from the application process upon process death */ 2721 void 2722 vchiq_terminate_service_internal(struct vchiq_service *service) 2723 { 2724 struct vchiq_state *state = service->state; 2725 2726 vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)", 2727 state->id, service->localport, service->remoteport); 2728 2729 mark_service_closing(service); 2730 2731 /* Mark the service for removal by the slot handler */ 2732 request_poll(state, service, VCHIQ_POLL_REMOVE); 2733 } 2734 2735 /* Called from the slot handler */ 2736 void 2737 vchiq_free_service_internal(struct vchiq_service *service) 2738 { 2739 struct vchiq_state *state = service->state; 2740 2741 vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)", 2742 state->id, service->localport); 2743 2744 switch (service->srvstate) { 2745 case VCHIQ_SRVSTATE_OPENING: 2746 case VCHIQ_SRVSTATE_CLOSED: 2747 case VCHIQ_SRVSTATE_HIDDEN: 2748 case VCHIQ_SRVSTATE_LISTENING: 2749 case VCHIQ_SRVSTATE_CLOSEWAIT: 2750 break; 2751 default: 2752 vchiq_log_error(vchiq_core_log_level, 2753 "%d: fsi - (%d) in state %s", 2754 state->id, service->localport, 2755 srvstate_names[service->srvstate]); 2756 return; 2757 } 2758 2759 vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE); 2760 2761 complete(&service->remove_event); 2762 2763 /* Release the initial lock */ 2764 unlock_service(service); 2765 } 2766 2767 enum vchiq_status 2768 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance) 2769 { 2770 struct vchiq_service *service; 2771 int i; 2772 2773 /* Find all services registered to this client and enable them. */ 2774 i = 0; 2775 while ((service = next_service_by_instance(state, instance, 2776 &i)) != NULL) { 2777 if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN) 2778 vchiq_set_service_state(service, 2779 VCHIQ_SRVSTATE_LISTENING); 2780 unlock_service(service); 2781 } 2782 2783 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) { 2784 if (queue_message(state, NULL, 2785 VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, NULL, 2786 0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY) 2787 return VCHIQ_RETRY; 2788 2789 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING); 2790 } 2791 2792 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) { 2793 if (wait_for_completion_interruptible(&state->connect)) 2794 return VCHIQ_RETRY; 2795 2796 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED); 2797 complete(&state->connect); 2798 } 2799 2800 return VCHIQ_SUCCESS; 2801 } 2802 2803 enum vchiq_status 2804 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance) 2805 { 2806 struct vchiq_service *service; 2807 int i; 2808 2809 /* Find all services registered to this client and enable them. */ 2810 i = 0; 2811 while ((service = next_service_by_instance(state, instance, 2812 &i)) != NULL) { 2813 (void)vchiq_remove_service(service->handle); 2814 unlock_service(service); 2815 } 2816 2817 return VCHIQ_SUCCESS; 2818 } 2819 2820 enum vchiq_status 2821 vchiq_close_service(unsigned int handle) 2822 { 2823 /* Unregister the service */ 2824 struct vchiq_service *service = find_service_by_handle(handle); 2825 enum vchiq_status status = VCHIQ_SUCCESS; 2826 2827 if (!service) 2828 return VCHIQ_ERROR; 2829 2830 vchiq_log_info(vchiq_core_log_level, 2831 "%d: close_service:%d", 2832 service->state->id, service->localport); 2833 2834 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) || 2835 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) || 2836 (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) { 2837 unlock_service(service); 2838 return VCHIQ_ERROR; 2839 } 2840 2841 mark_service_closing(service); 2842 2843 if (current == service->state->slot_handler_thread) { 2844 status = vchiq_close_service_internal(service, 2845 0/*!close_recvd*/); 2846 WARN_ON(status == VCHIQ_RETRY); 2847 } else { 2848 /* Mark the service for termination by the slot handler */ 2849 request_poll(service->state, service, VCHIQ_POLL_TERMINATE); 2850 } 2851 2852 while (1) { 2853 if (wait_for_completion_interruptible(&service->remove_event)) { 2854 status = VCHIQ_RETRY; 2855 break; 2856 } 2857 2858 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) || 2859 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) || 2860 (service->srvstate == VCHIQ_SRVSTATE_OPEN)) 2861 break; 2862 2863 vchiq_log_warning(vchiq_core_log_level, 2864 "%d: close_service:%d - waiting in state %s", 2865 service->state->id, service->localport, 2866 srvstate_names[service->srvstate]); 2867 } 2868 2869 if ((status == VCHIQ_SUCCESS) && 2870 (service->srvstate != VCHIQ_SRVSTATE_FREE) && 2871 (service->srvstate != VCHIQ_SRVSTATE_LISTENING)) 2872 status = VCHIQ_ERROR; 2873 2874 unlock_service(service); 2875 2876 return status; 2877 } 2878 2879 enum vchiq_status 2880 vchiq_remove_service(unsigned int handle) 2881 { 2882 /* Unregister the service */ 2883 struct vchiq_service *service = find_service_by_handle(handle); 2884 enum vchiq_status status = VCHIQ_SUCCESS; 2885 2886 if (!service) 2887 return VCHIQ_ERROR; 2888 2889 vchiq_log_info(vchiq_core_log_level, 2890 "%d: remove_service:%d", 2891 service->state->id, service->localport); 2892 2893 if (service->srvstate == VCHIQ_SRVSTATE_FREE) { 2894 unlock_service(service); 2895 return VCHIQ_ERROR; 2896 } 2897 2898 mark_service_closing(service); 2899 2900 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) || 2901 (current == service->state->slot_handler_thread)) { 2902 /* Make it look like a client, because it must be removed and 2903 not left in the LISTENING state. */ 2904 service->public_fourcc = VCHIQ_FOURCC_INVALID; 2905 2906 status = vchiq_close_service_internal(service, 2907 0/*!close_recvd*/); 2908 WARN_ON(status == VCHIQ_RETRY); 2909 } else { 2910 /* Mark the service for removal by the slot handler */ 2911 request_poll(service->state, service, VCHIQ_POLL_REMOVE); 2912 } 2913 while (1) { 2914 if (wait_for_completion_interruptible(&service->remove_event)) { 2915 status = VCHIQ_RETRY; 2916 break; 2917 } 2918 2919 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) || 2920 (service->srvstate == VCHIQ_SRVSTATE_OPEN)) 2921 break; 2922 2923 vchiq_log_warning(vchiq_core_log_level, 2924 "%d: remove_service:%d - waiting in state %s", 2925 service->state->id, service->localport, 2926 srvstate_names[service->srvstate]); 2927 } 2928 2929 if ((status == VCHIQ_SUCCESS) && 2930 (service->srvstate != VCHIQ_SRVSTATE_FREE)) 2931 status = VCHIQ_ERROR; 2932 2933 unlock_service(service); 2934 2935 return status; 2936 } 2937 2938 /* This function may be called by kernel threads or user threads. 2939 * User threads may receive VCHIQ_RETRY to indicate that a signal has been 2940 * received and the call should be retried after being returned to user 2941 * context. 2942 * When called in blocking mode, the userdata field points to a bulk_waiter 2943 * structure. 2944 */ 2945 enum vchiq_status vchiq_bulk_transfer(unsigned int handle, 2946 void *offset, int size, void *userdata, 2947 enum vchiq_bulk_mode mode, 2948 enum vchiq_bulk_dir dir) 2949 { 2950 struct vchiq_service *service = find_service_by_handle(handle); 2951 struct vchiq_bulk_queue *queue; 2952 struct vchiq_bulk *bulk; 2953 struct vchiq_state *state; 2954 struct bulk_waiter *bulk_waiter = NULL; 2955 const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r'; 2956 const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ? 2957 VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX; 2958 enum vchiq_status status = VCHIQ_ERROR; 2959 int payload[2]; 2960 2961 if (!service || service->srvstate != VCHIQ_SRVSTATE_OPEN || 2962 !offset || vchiq_check_service(service) != VCHIQ_SUCCESS) 2963 goto error_exit; 2964 2965 switch (mode) { 2966 case VCHIQ_BULK_MODE_NOCALLBACK: 2967 case VCHIQ_BULK_MODE_CALLBACK: 2968 break; 2969 case VCHIQ_BULK_MODE_BLOCKING: 2970 bulk_waiter = userdata; 2971 init_completion(&bulk_waiter->event); 2972 bulk_waiter->actual = 0; 2973 bulk_waiter->bulk = NULL; 2974 break; 2975 case VCHIQ_BULK_MODE_WAITING: 2976 bulk_waiter = userdata; 2977 bulk = bulk_waiter->bulk; 2978 goto waiting; 2979 default: 2980 goto error_exit; 2981 } 2982 2983 state = service->state; 2984 2985 queue = (dir == VCHIQ_BULK_TRANSMIT) ? 2986 &service->bulk_tx : &service->bulk_rx; 2987 2988 if (mutex_lock_killable(&service->bulk_mutex)) { 2989 status = VCHIQ_RETRY; 2990 goto error_exit; 2991 } 2992 2993 if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) { 2994 VCHIQ_SERVICE_STATS_INC(service, bulk_stalls); 2995 do { 2996 mutex_unlock(&service->bulk_mutex); 2997 if (wait_for_completion_interruptible( 2998 &service->bulk_remove_event)) { 2999 status = VCHIQ_RETRY; 3000 goto error_exit; 3001 } 3002 if (mutex_lock_killable(&service->bulk_mutex)) { 3003 status = VCHIQ_RETRY; 3004 goto error_exit; 3005 } 3006 } while (queue->local_insert == queue->remove + 3007 VCHIQ_NUM_SERVICE_BULKS); 3008 } 3009 3010 bulk = &queue->bulks[BULK_INDEX(queue->local_insert)]; 3011 3012 bulk->mode = mode; 3013 bulk->dir = dir; 3014 bulk->userdata = userdata; 3015 bulk->size = size; 3016 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED; 3017 3018 if (vchiq_prepare_bulk_data(bulk, offset, size, dir) != VCHIQ_SUCCESS) 3019 goto unlock_error_exit; 3020 3021 wmb(); 3022 3023 vchiq_log_info(vchiq_core_log_level, 3024 "%d: bt (%d->%d) %cx %x@%pK %pK", 3025 state->id, service->localport, service->remoteport, dir_char, 3026 size, bulk->data, userdata); 3027 3028 /* The slot mutex must be held when the service is being closed, so 3029 claim it here to ensure that isn't happening */ 3030 if (mutex_lock_killable(&state->slot_mutex)) { 3031 status = VCHIQ_RETRY; 3032 goto cancel_bulk_error_exit; 3033 } 3034 3035 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) 3036 goto unlock_both_error_exit; 3037 3038 payload[0] = (int)(long)bulk->data; 3039 payload[1] = bulk->size; 3040 status = queue_message(state, 3041 NULL, 3042 VCHIQ_MAKE_MSG(dir_msgtype, 3043 service->localport, 3044 service->remoteport), 3045 memcpy_copy_callback, 3046 &payload, 3047 sizeof(payload), 3048 QMFLAGS_IS_BLOCKING | 3049 QMFLAGS_NO_MUTEX_LOCK | 3050 QMFLAGS_NO_MUTEX_UNLOCK); 3051 if (status != VCHIQ_SUCCESS) 3052 goto unlock_both_error_exit; 3053 3054 queue->local_insert++; 3055 3056 mutex_unlock(&state->slot_mutex); 3057 mutex_unlock(&service->bulk_mutex); 3058 3059 vchiq_log_trace(vchiq_core_log_level, 3060 "%d: bt:%d %cx li=%x ri=%x p=%x", 3061 state->id, 3062 service->localport, dir_char, 3063 queue->local_insert, queue->remote_insert, queue->process); 3064 3065 waiting: 3066 unlock_service(service); 3067 3068 status = VCHIQ_SUCCESS; 3069 3070 if (bulk_waiter) { 3071 bulk_waiter->bulk = bulk; 3072 if (wait_for_completion_interruptible(&bulk_waiter->event)) 3073 status = VCHIQ_RETRY; 3074 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED) 3075 status = VCHIQ_ERROR; 3076 } 3077 3078 return status; 3079 3080 unlock_both_error_exit: 3081 mutex_unlock(&state->slot_mutex); 3082 cancel_bulk_error_exit: 3083 vchiq_complete_bulk(bulk); 3084 unlock_error_exit: 3085 mutex_unlock(&service->bulk_mutex); 3086 3087 error_exit: 3088 if (service) 3089 unlock_service(service); 3090 return status; 3091 } 3092 3093 enum vchiq_status 3094 vchiq_queue_message(unsigned int handle, 3095 ssize_t (*copy_callback)(void *context, void *dest, 3096 size_t offset, size_t maxsize), 3097 void *context, 3098 size_t size) 3099 { 3100 struct vchiq_service *service = find_service_by_handle(handle); 3101 enum vchiq_status status = VCHIQ_ERROR; 3102 3103 if (!service || 3104 (vchiq_check_service(service) != VCHIQ_SUCCESS)) 3105 goto error_exit; 3106 3107 if (!size) { 3108 VCHIQ_SERVICE_STATS_INC(service, error_count); 3109 goto error_exit; 3110 3111 } 3112 3113 if (size > VCHIQ_MAX_MSG_SIZE) { 3114 VCHIQ_SERVICE_STATS_INC(service, error_count); 3115 goto error_exit; 3116 } 3117 3118 switch (service->srvstate) { 3119 case VCHIQ_SRVSTATE_OPEN: 3120 status = queue_message(service->state, service, 3121 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA, 3122 service->localport, 3123 service->remoteport), 3124 copy_callback, context, size, 1); 3125 break; 3126 case VCHIQ_SRVSTATE_OPENSYNC: 3127 status = queue_message_sync(service->state, service, 3128 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA, 3129 service->localport, 3130 service->remoteport), 3131 copy_callback, context, size, 1); 3132 break; 3133 default: 3134 status = VCHIQ_ERROR; 3135 break; 3136 } 3137 3138 error_exit: 3139 if (service) 3140 unlock_service(service); 3141 3142 return status; 3143 } 3144 3145 enum vchiq_status vchiq_queue_kernel_message(unsigned int handle, void *context, 3146 size_t size) 3147 { 3148 return vchiq_queue_message(handle, memcpy_copy_callback, context, size); 3149 } 3150 3151 void 3152 vchiq_release_message(unsigned int handle, 3153 struct vchiq_header *header) 3154 { 3155 struct vchiq_service *service = find_service_by_handle(handle); 3156 struct vchiq_shared_state *remote; 3157 struct vchiq_state *state; 3158 int slot_index; 3159 3160 if (!service) 3161 return; 3162 3163 state = service->state; 3164 remote = state->remote; 3165 3166 slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header); 3167 3168 if ((slot_index >= remote->slot_first) && 3169 (slot_index <= remote->slot_last)) { 3170 int msgid = header->msgid; 3171 3172 if (msgid & VCHIQ_MSGID_CLAIMED) { 3173 struct vchiq_slot_info *slot_info = 3174 SLOT_INFO_FROM_INDEX(state, slot_index); 3175 3176 release_slot(state, slot_info, header, service); 3177 } 3178 } else if (slot_index == remote->slot_sync) 3179 release_message_sync(state, header); 3180 3181 unlock_service(service); 3182 } 3183 3184 static void 3185 release_message_sync(struct vchiq_state *state, struct vchiq_header *header) 3186 { 3187 header->msgid = VCHIQ_MSGID_PADDING; 3188 remote_event_signal(&state->remote->sync_release); 3189 } 3190 3191 enum vchiq_status 3192 vchiq_get_peer_version(unsigned int handle, short *peer_version) 3193 { 3194 enum vchiq_status status = VCHIQ_ERROR; 3195 struct vchiq_service *service = find_service_by_handle(handle); 3196 3197 if (!service || 3198 (vchiq_check_service(service) != VCHIQ_SUCCESS) || 3199 !peer_version) 3200 goto exit; 3201 *peer_version = service->peer_version; 3202 status = VCHIQ_SUCCESS; 3203 3204 exit: 3205 if (service) 3206 unlock_service(service); 3207 return status; 3208 } 3209 3210 void vchiq_get_config(struct vchiq_config *config) 3211 { 3212 config->max_msg_size = VCHIQ_MAX_MSG_SIZE; 3213 config->bulk_threshold = VCHIQ_MAX_MSG_SIZE; 3214 config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS; 3215 config->max_services = VCHIQ_MAX_SERVICES; 3216 config->version = VCHIQ_VERSION; 3217 config->version_min = VCHIQ_VERSION_MIN; 3218 } 3219 3220 enum vchiq_status 3221 vchiq_set_service_option(unsigned int handle, 3222 enum vchiq_service_option option, int value) 3223 { 3224 struct vchiq_service *service = find_service_by_handle(handle); 3225 enum vchiq_status status = VCHIQ_ERROR; 3226 3227 if (service) { 3228 switch (option) { 3229 case VCHIQ_SERVICE_OPTION_AUTOCLOSE: 3230 service->auto_close = value; 3231 status = VCHIQ_SUCCESS; 3232 break; 3233 3234 case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: { 3235 struct vchiq_service_quota *service_quota = 3236 &service->state->service_quotas[ 3237 service->localport]; 3238 if (value == 0) 3239 value = service->state->default_slot_quota; 3240 if ((value >= service_quota->slot_use_count) && 3241 (value < (unsigned short)~0)) { 3242 service_quota->slot_quota = value; 3243 if ((value >= service_quota->slot_use_count) && 3244 (service_quota->message_quota >= 3245 service_quota->message_use_count)) { 3246 /* Signal the service that it may have 3247 ** dropped below its quota */ 3248 complete(&service_quota->quota_event); 3249 } 3250 status = VCHIQ_SUCCESS; 3251 } 3252 } break; 3253 3254 case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: { 3255 struct vchiq_service_quota *service_quota = 3256 &service->state->service_quotas[ 3257 service->localport]; 3258 if (value == 0) 3259 value = service->state->default_message_quota; 3260 if ((value >= service_quota->message_use_count) && 3261 (value < (unsigned short)~0)) { 3262 service_quota->message_quota = value; 3263 if ((value >= 3264 service_quota->message_use_count) && 3265 (service_quota->slot_quota >= 3266 service_quota->slot_use_count)) 3267 /* Signal the service that it may have 3268 ** dropped below its quota */ 3269 complete(&service_quota->quota_event); 3270 status = VCHIQ_SUCCESS; 3271 } 3272 } break; 3273 3274 case VCHIQ_SERVICE_OPTION_SYNCHRONOUS: 3275 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) || 3276 (service->srvstate == 3277 VCHIQ_SRVSTATE_LISTENING)) { 3278 service->sync = value; 3279 status = VCHIQ_SUCCESS; 3280 } 3281 break; 3282 3283 case VCHIQ_SERVICE_OPTION_TRACE: 3284 service->trace = value; 3285 status = VCHIQ_SUCCESS; 3286 break; 3287 3288 default: 3289 break; 3290 } 3291 unlock_service(service); 3292 } 3293 3294 return status; 3295 } 3296 3297 static int 3298 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state, 3299 struct vchiq_shared_state *shared, const char *label) 3300 { 3301 static const char *const debug_names[] = { 3302 "<entries>", 3303 "SLOT_HANDLER_COUNT", 3304 "SLOT_HANDLER_LINE", 3305 "PARSE_LINE", 3306 "PARSE_HEADER", 3307 "PARSE_MSGID", 3308 "AWAIT_COMPLETION_LINE", 3309 "DEQUEUE_MESSAGE_LINE", 3310 "SERVICE_CALLBACK_LINE", 3311 "MSG_QUEUE_FULL_COUNT", 3312 "COMPLETION_QUEUE_FULL_COUNT" 3313 }; 3314 int i; 3315 char buf[80]; 3316 int len; 3317 int err; 3318 3319 len = scnprintf(buf, sizeof(buf), 3320 " %s: slots %d-%d tx_pos=%x recycle=%x", 3321 label, shared->slot_first, shared->slot_last, 3322 shared->tx_pos, shared->slot_queue_recycle); 3323 err = vchiq_dump(dump_context, buf, len + 1); 3324 if (err) 3325 return err; 3326 3327 len = scnprintf(buf, sizeof(buf), 3328 " Slots claimed:"); 3329 err = vchiq_dump(dump_context, buf, len + 1); 3330 if (err) 3331 return err; 3332 3333 for (i = shared->slot_first; i <= shared->slot_last; i++) { 3334 struct vchiq_slot_info slot_info = 3335 *SLOT_INFO_FROM_INDEX(state, i); 3336 if (slot_info.use_count != slot_info.release_count) { 3337 len = scnprintf(buf, sizeof(buf), 3338 " %d: %d/%d", i, slot_info.use_count, 3339 slot_info.release_count); 3340 err = vchiq_dump(dump_context, buf, len + 1); 3341 if (err) 3342 return err; 3343 } 3344 } 3345 3346 for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) { 3347 len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)", 3348 debug_names[i], shared->debug[i], shared->debug[i]); 3349 err = vchiq_dump(dump_context, buf, len + 1); 3350 if (err) 3351 return err; 3352 } 3353 return 0; 3354 } 3355 3356 int vchiq_dump_state(void *dump_context, struct vchiq_state *state) 3357 { 3358 char buf[80]; 3359 int len; 3360 int i; 3361 int err; 3362 3363 len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id, 3364 conn_state_names[state->conn_state]); 3365 err = vchiq_dump(dump_context, buf, len + 1); 3366 if (err) 3367 return err; 3368 3369 len = scnprintf(buf, sizeof(buf), 3370 " tx_pos=%x(@%pK), rx_pos=%x(@%pK)", 3371 state->local->tx_pos, 3372 state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK), 3373 state->rx_pos, 3374 state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK)); 3375 err = vchiq_dump(dump_context, buf, len + 1); 3376 if (err) 3377 return err; 3378 3379 len = scnprintf(buf, sizeof(buf), 3380 " Version: %d (min %d)", 3381 VCHIQ_VERSION, VCHIQ_VERSION_MIN); 3382 err = vchiq_dump(dump_context, buf, len + 1); 3383 if (err) 3384 return err; 3385 3386 if (VCHIQ_ENABLE_STATS) { 3387 len = scnprintf(buf, sizeof(buf), 3388 " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, " 3389 "error_count=%d", 3390 state->stats.ctrl_tx_count, state->stats.ctrl_rx_count, 3391 state->stats.error_count); 3392 err = vchiq_dump(dump_context, buf, len + 1); 3393 if (err) 3394 return err; 3395 } 3396 3397 len = scnprintf(buf, sizeof(buf), 3398 " Slots: %d available (%d data), %d recyclable, %d stalls " 3399 "(%d data)", 3400 ((state->slot_queue_available * VCHIQ_SLOT_SIZE) - 3401 state->local_tx_pos) / VCHIQ_SLOT_SIZE, 3402 state->data_quota - state->data_use_count, 3403 state->local->slot_queue_recycle - state->slot_queue_available, 3404 state->stats.slot_stalls, state->stats.data_stalls); 3405 err = vchiq_dump(dump_context, buf, len + 1); 3406 if (err) 3407 return err; 3408 3409 err = vchiq_dump_platform_state(dump_context); 3410 if (err) 3411 return err; 3412 3413 err = vchiq_dump_shared_state(dump_context, 3414 state, 3415 state->local, 3416 "Local"); 3417 if (err) 3418 return err; 3419 err = vchiq_dump_shared_state(dump_context, 3420 state, 3421 state->remote, 3422 "Remote"); 3423 if (err) 3424 return err; 3425 3426 err = vchiq_dump_platform_instances(dump_context); 3427 if (err) 3428 return err; 3429 3430 for (i = 0; i < state->unused_service; i++) { 3431 struct vchiq_service *service = find_service_by_port(state, i); 3432 3433 if (service) { 3434 err = vchiq_dump_service_state(dump_context, service); 3435 unlock_service(service); 3436 if (err) 3437 return err; 3438 } 3439 } 3440 return 0; 3441 } 3442 3443 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service) 3444 { 3445 char buf[80]; 3446 int len; 3447 int err; 3448 unsigned int ref_count; 3449 3450 /*Don't include the lock just taken*/ 3451 ref_count = kref_read(&service->ref_count) - 1; 3452 len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)", 3453 service->localport, srvstate_names[service->srvstate], 3454 ref_count); 3455 3456 if (service->srvstate != VCHIQ_SRVSTATE_FREE) { 3457 char remoteport[30]; 3458 struct vchiq_service_quota *service_quota = 3459 &service->state->service_quotas[service->localport]; 3460 int fourcc = service->base.fourcc; 3461 int tx_pending, rx_pending; 3462 3463 if (service->remoteport != VCHIQ_PORT_FREE) { 3464 int len2 = scnprintf(remoteport, sizeof(remoteport), 3465 "%u", service->remoteport); 3466 3467 if (service->public_fourcc != VCHIQ_FOURCC_INVALID) 3468 scnprintf(remoteport + len2, 3469 sizeof(remoteport) - len2, 3470 " (client %x)", service->client_id); 3471 } else 3472 strcpy(remoteport, "n/a"); 3473 3474 len += scnprintf(buf + len, sizeof(buf) - len, 3475 " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)", 3476 VCHIQ_FOURCC_AS_4CHARS(fourcc), 3477 remoteport, 3478 service_quota->message_use_count, 3479 service_quota->message_quota, 3480 service_quota->slot_use_count, 3481 service_quota->slot_quota); 3482 3483 err = vchiq_dump(dump_context, buf, len + 1); 3484 if (err) 3485 return err; 3486 3487 tx_pending = service->bulk_tx.local_insert - 3488 service->bulk_tx.remote_insert; 3489 3490 rx_pending = service->bulk_rx.local_insert - 3491 service->bulk_rx.remote_insert; 3492 3493 len = scnprintf(buf, sizeof(buf), 3494 " Bulk: tx_pending=%d (size %d)," 3495 " rx_pending=%d (size %d)", 3496 tx_pending, 3497 tx_pending ? service->bulk_tx.bulks[ 3498 BULK_INDEX(service->bulk_tx.remove)].size : 0, 3499 rx_pending, 3500 rx_pending ? service->bulk_rx.bulks[ 3501 BULK_INDEX(service->bulk_rx.remove)].size : 0); 3502 3503 if (VCHIQ_ENABLE_STATS) { 3504 err = vchiq_dump(dump_context, buf, len + 1); 3505 if (err) 3506 return err; 3507 3508 len = scnprintf(buf, sizeof(buf), 3509 " Ctrl: tx_count=%d, tx_bytes=%llu, " 3510 "rx_count=%d, rx_bytes=%llu", 3511 service->stats.ctrl_tx_count, 3512 service->stats.ctrl_tx_bytes, 3513 service->stats.ctrl_rx_count, 3514 service->stats.ctrl_rx_bytes); 3515 err = vchiq_dump(dump_context, buf, len + 1); 3516 if (err) 3517 return err; 3518 3519 len = scnprintf(buf, sizeof(buf), 3520 " Bulk: tx_count=%d, tx_bytes=%llu, " 3521 "rx_count=%d, rx_bytes=%llu", 3522 service->stats.bulk_tx_count, 3523 service->stats.bulk_tx_bytes, 3524 service->stats.bulk_rx_count, 3525 service->stats.bulk_rx_bytes); 3526 err = vchiq_dump(dump_context, buf, len + 1); 3527 if (err) 3528 return err; 3529 3530 len = scnprintf(buf, sizeof(buf), 3531 " %d quota stalls, %d slot stalls, " 3532 "%d bulk stalls, %d aborted, %d errors", 3533 service->stats.quota_stalls, 3534 service->stats.slot_stalls, 3535 service->stats.bulk_stalls, 3536 service->stats.bulk_aborted_count, 3537 service->stats.error_count); 3538 } 3539 } 3540 3541 err = vchiq_dump(dump_context, buf, len + 1); 3542 if (err) 3543 return err; 3544 3545 if (service->srvstate != VCHIQ_SRVSTATE_FREE) 3546 err = vchiq_dump_platform_service_state(dump_context, service); 3547 return err; 3548 } 3549 3550 void 3551 vchiq_loud_error_header(void) 3552 { 3553 vchiq_log_error(vchiq_core_log_level, 3554 "============================================================" 3555 "================"); 3556 vchiq_log_error(vchiq_core_log_level, 3557 "============================================================" 3558 "================"); 3559 vchiq_log_error(vchiq_core_log_level, "====="); 3560 } 3561 3562 void 3563 vchiq_loud_error_footer(void) 3564 { 3565 vchiq_log_error(vchiq_core_log_level, "====="); 3566 vchiq_log_error(vchiq_core_log_level, 3567 "============================================================" 3568 "================"); 3569 vchiq_log_error(vchiq_core_log_level, 3570 "============================================================" 3571 "================"); 3572 } 3573 3574 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state) 3575 { 3576 enum vchiq_status status = VCHIQ_RETRY; 3577 3578 if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED) 3579 status = queue_message(state, NULL, 3580 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0), 3581 NULL, NULL, 0, 0); 3582 return status; 3583 } 3584 3585 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state) 3586 { 3587 enum vchiq_status status = VCHIQ_RETRY; 3588 3589 if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED) 3590 status = queue_message(state, NULL, 3591 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0), 3592 NULL, NULL, 0, 0); 3593 return status; 3594 } 3595 3596 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, 3597 size_t num_bytes) 3598 { 3599 const u8 *mem = void_mem; 3600 size_t offset; 3601 char line_buf[100]; 3602 char *s; 3603 3604 while (num_bytes > 0) { 3605 s = line_buf; 3606 3607 for (offset = 0; offset < 16; offset++) { 3608 if (offset < num_bytes) 3609 s += scnprintf(s, 4, "%02x ", mem[offset]); 3610 else 3611 s += scnprintf(s, 4, " "); 3612 } 3613 3614 for (offset = 0; offset < 16; offset++) { 3615 if (offset < num_bytes) { 3616 u8 ch = mem[offset]; 3617 3618 if ((ch < ' ') || (ch > '~')) 3619 ch = '.'; 3620 *s++ = (char)ch; 3621 } 3622 } 3623 *s++ = '\0'; 3624 3625 if (label && (*label != '\0')) 3626 vchiq_log_trace(VCHIQ_LOG_TRACE, 3627 "%s: %08x: %s", label, addr, line_buf); 3628 else 3629 vchiq_log_trace(VCHIQ_LOG_TRACE, 3630 "%08x: %s", addr, line_buf); 3631 3632 addr += 16; 3633 mem += 16; 3634 if (num_bytes > 16) 3635 num_bytes -= 16; 3636 else 3637 num_bytes = 0; 3638 } 3639 } 3640