1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */ 3 4 #include <linux/types.h> 5 #include <linux/completion.h> 6 #include <linux/mutex.h> 7 #include <linux/bitops.h> 8 #include <linux/kthread.h> 9 #include <linux/wait.h> 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/kref.h> 13 #include <linux/rcupdate.h> 14 #include <linux/sched/signal.h> 15 16 #include "vchiq_core.h" 17 18 #define VCHIQ_SLOT_HANDLER_STACK 8192 19 20 #define VCHIQ_MSG_PADDING 0 /* - */ 21 #define VCHIQ_MSG_CONNECT 1 /* - */ 22 #define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */ 23 #define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */ 24 #define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */ 25 #define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */ 26 #define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */ 27 #define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */ 28 #define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */ 29 #define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */ 30 #define VCHIQ_MSG_PAUSE 10 /* - */ 31 #define VCHIQ_MSG_RESUME 11 /* - */ 32 #define VCHIQ_MSG_REMOTE_USE 12 /* - */ 33 #define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */ 34 #define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */ 35 36 #define TYPE_SHIFT 24 37 38 #define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1) 39 #define VCHIQ_PORT_FREE 0x1000 40 #define VCHIQ_PORT_IS_VALID(port) ((port) < VCHIQ_PORT_FREE) 41 #define VCHIQ_MAKE_MSG(type, srcport, dstport) \ 42 (((type) << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0)) 43 #define VCHIQ_MSG_TYPE(msgid) ((unsigned int)(msgid) >> TYPE_SHIFT) 44 #define VCHIQ_MSG_SRCPORT(msgid) \ 45 (unsigned short)(((unsigned int)(msgid) >> 12) & 0xfff) 46 #define VCHIQ_MSG_DSTPORT(msgid) \ 47 ((unsigned short)(msgid) & 0xfff) 48 49 #define MAKE_CONNECT (VCHIQ_MSG_CONNECT << TYPE_SHIFT) 50 #define MAKE_OPEN(srcport) \ 51 ((VCHIQ_MSG_OPEN << TYPE_SHIFT) | ((srcport) << 12)) 52 #define MAKE_OPENACK(srcport, dstport) \ 53 ((VCHIQ_MSG_OPENACK << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0)) 54 #define MAKE_CLOSE(srcport, dstport) \ 55 ((VCHIQ_MSG_CLOSE << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0)) 56 #define MAKE_DATA(srcport, dstport) \ 57 ((VCHIQ_MSG_DATA << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0)) 58 #define MAKE_PAUSE (VCHIQ_MSG_PAUSE << TYPE_SHIFT) 59 #define MAKE_RESUME (VCHIQ_MSG_RESUME << TYPE_SHIFT) 60 #define MAKE_REMOTE_USE (VCHIQ_MSG_REMOTE_USE << TYPE_SHIFT) 61 #define MAKE_REMOTE_USE_ACTIVE (VCHIQ_MSG_REMOTE_USE_ACTIVE << TYPE_SHIFT) 62 63 /* Ensure the fields are wide enough */ 64 static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX)) 65 == 0); 66 static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0); 67 static_assert((unsigned int)VCHIQ_PORT_MAX < 68 (unsigned int)VCHIQ_PORT_FREE); 69 70 #define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0) 71 #define VCHIQ_MSGID_CLAIMED 0x40000000 72 73 #define VCHIQ_FOURCC_INVALID 0x00000000 74 #define VCHIQ_FOURCC_IS_LEGAL(fourcc) ((fourcc) != VCHIQ_FOURCC_INVALID) 75 76 #define VCHIQ_BULK_ACTUAL_ABORTED -1 77 78 #if VCHIQ_ENABLE_STATS 79 #define VCHIQ_STATS_INC(state, stat) (state->stats. stat++) 80 #define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++) 81 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \ 82 (service->stats. stat += addend) 83 #else 84 #define VCHIQ_STATS_INC(state, stat) ((void)0) 85 #define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0) 86 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0) 87 #endif 88 89 #define HANDLE_STATE_SHIFT 12 90 91 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index)) 92 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index)) 93 #define SLOT_INDEX_FROM_DATA(state, data) \ 94 (((unsigned int)((char *)data - (char *)state->slot_data)) / \ 95 VCHIQ_SLOT_SIZE) 96 #define SLOT_INDEX_FROM_INFO(state, info) \ 97 ((unsigned int)(info - state->slot_info)) 98 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \ 99 ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE)) 100 #define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \ 101 (SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK) 102 103 #define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1)) 104 105 #define SRVTRACE_LEVEL(srv) \ 106 (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level) 107 #define SRVTRACE_ENABLED(srv, lev) \ 108 (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev))) 109 110 #define NO_CLOSE_RECVD 0 111 #define CLOSE_RECVD 1 112 113 #define NO_RETRY_POLL 0 114 #define RETRY_POLL 1 115 116 struct vchiq_open_payload { 117 int fourcc; 118 int client_id; 119 short version; 120 short version_min; 121 }; 122 123 struct vchiq_openack_payload { 124 short version; 125 }; 126 127 enum { 128 QMFLAGS_IS_BLOCKING = BIT(0), 129 QMFLAGS_NO_MUTEX_LOCK = BIT(1), 130 QMFLAGS_NO_MUTEX_UNLOCK = BIT(2) 131 }; 132 133 enum { 134 VCHIQ_POLL_TERMINATE, 135 VCHIQ_POLL_REMOVE, 136 VCHIQ_POLL_TXNOTIFY, 137 VCHIQ_POLL_RXNOTIFY, 138 VCHIQ_POLL_COUNT 139 }; 140 141 /* we require this for consistency between endpoints */ 142 static_assert(sizeof(struct vchiq_header) == 8); 143 static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN); 144 145 static inline void check_sizes(void) 146 { 147 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_SLOT_SIZE); 148 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS); 149 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS_PER_SIDE); 150 BUILD_BUG_ON_NOT_POWER_OF_2(sizeof(struct vchiq_header)); 151 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_CURRENT_BULKS); 152 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_SERVICE_BULKS); 153 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES); 154 } 155 156 /* Run time control of log level, based on KERN_XXX level. */ 157 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT; 158 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT; 159 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT; 160 161 DEFINE_SPINLOCK(bulk_waiter_spinlock); 162 static DEFINE_SPINLOCK(quota_spinlock); 163 164 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES]; 165 static unsigned int handle_seq; 166 167 static const char *const srvstate_names[] = { 168 "FREE", 169 "HIDDEN", 170 "LISTENING", 171 "OPENING", 172 "OPEN", 173 "OPENSYNC", 174 "CLOSESENT", 175 "CLOSERECVD", 176 "CLOSEWAIT", 177 "CLOSED" 178 }; 179 180 static const char *const reason_names[] = { 181 "SERVICE_OPENED", 182 "SERVICE_CLOSED", 183 "MESSAGE_AVAILABLE", 184 "BULK_TRANSMIT_DONE", 185 "BULK_RECEIVE_DONE", 186 "BULK_TRANSMIT_ABORTED", 187 "BULK_RECEIVE_ABORTED" 188 }; 189 190 static const char *const conn_state_names[] = { 191 "DISCONNECTED", 192 "CONNECTING", 193 "CONNECTED", 194 "PAUSING", 195 "PAUSE_SENT", 196 "PAUSED", 197 "RESUMING", 198 "PAUSE_TIMEOUT", 199 "RESUME_TIMEOUT" 200 }; 201 202 static void 203 release_message_sync(struct vchiq_state *state, struct vchiq_header *header); 204 205 static const char *msg_type_str(unsigned int msg_type) 206 { 207 switch (msg_type) { 208 case VCHIQ_MSG_PADDING: return "PADDING"; 209 case VCHIQ_MSG_CONNECT: return "CONNECT"; 210 case VCHIQ_MSG_OPEN: return "OPEN"; 211 case VCHIQ_MSG_OPENACK: return "OPENACK"; 212 case VCHIQ_MSG_CLOSE: return "CLOSE"; 213 case VCHIQ_MSG_DATA: return "DATA"; 214 case VCHIQ_MSG_BULK_RX: return "BULK_RX"; 215 case VCHIQ_MSG_BULK_TX: return "BULK_TX"; 216 case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE"; 217 case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE"; 218 case VCHIQ_MSG_PAUSE: return "PAUSE"; 219 case VCHIQ_MSG_RESUME: return "RESUME"; 220 case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE"; 221 case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE"; 222 case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE"; 223 } 224 return "???"; 225 } 226 227 static inline void 228 vchiq_set_service_state(struct vchiq_service *service, int newstate) 229 { 230 vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s", 231 service->state->id, service->localport, 232 srvstate_names[service->srvstate], 233 srvstate_names[newstate]); 234 service->srvstate = newstate; 235 } 236 237 struct vchiq_service * 238 find_service_by_handle(unsigned int handle) 239 { 240 struct vchiq_service *service; 241 242 rcu_read_lock(); 243 service = handle_to_service(handle); 244 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && 245 service->handle == handle && 246 kref_get_unless_zero(&service->ref_count)) { 247 service = rcu_pointer_handoff(service); 248 rcu_read_unlock(); 249 return service; 250 } 251 rcu_read_unlock(); 252 vchiq_log_info(vchiq_core_log_level, 253 "Invalid service handle 0x%x", handle); 254 return NULL; 255 } 256 257 struct vchiq_service * 258 find_service_by_port(struct vchiq_state *state, int localport) 259 { 260 261 if ((unsigned int)localport <= VCHIQ_PORT_MAX) { 262 struct vchiq_service *service; 263 264 rcu_read_lock(); 265 service = rcu_dereference(state->services[localport]); 266 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && 267 kref_get_unless_zero(&service->ref_count)) { 268 service = rcu_pointer_handoff(service); 269 rcu_read_unlock(); 270 return service; 271 } 272 rcu_read_unlock(); 273 } 274 vchiq_log_info(vchiq_core_log_level, 275 "Invalid port %d", localport); 276 return NULL; 277 } 278 279 struct vchiq_service * 280 find_service_for_instance(struct vchiq_instance *instance, 281 unsigned int handle) 282 { 283 struct vchiq_service *service; 284 285 rcu_read_lock(); 286 service = handle_to_service(handle); 287 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && 288 service->handle == handle && 289 service->instance == instance && 290 kref_get_unless_zero(&service->ref_count)) { 291 service = rcu_pointer_handoff(service); 292 rcu_read_unlock(); 293 return service; 294 } 295 rcu_read_unlock(); 296 vchiq_log_info(vchiq_core_log_level, 297 "Invalid service handle 0x%x", handle); 298 return NULL; 299 } 300 301 struct vchiq_service * 302 find_closed_service_for_instance(struct vchiq_instance *instance, 303 unsigned int handle) 304 { 305 struct vchiq_service *service; 306 307 rcu_read_lock(); 308 service = handle_to_service(handle); 309 if (service && 310 (service->srvstate == VCHIQ_SRVSTATE_FREE || 311 service->srvstate == VCHIQ_SRVSTATE_CLOSED) && 312 service->handle == handle && 313 service->instance == instance && 314 kref_get_unless_zero(&service->ref_count)) { 315 service = rcu_pointer_handoff(service); 316 rcu_read_unlock(); 317 return service; 318 } 319 rcu_read_unlock(); 320 vchiq_log_info(vchiq_core_log_level, 321 "Invalid service handle 0x%x", handle); 322 return service; 323 } 324 325 struct vchiq_service * 326 __next_service_by_instance(struct vchiq_state *state, 327 struct vchiq_instance *instance, 328 int *pidx) 329 { 330 struct vchiq_service *service = NULL; 331 int idx = *pidx; 332 333 while (idx < state->unused_service) { 334 struct vchiq_service *srv; 335 336 srv = rcu_dereference(state->services[idx]); 337 idx++; 338 if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE && 339 srv->instance == instance) { 340 service = srv; 341 break; 342 } 343 } 344 345 *pidx = idx; 346 return service; 347 } 348 349 struct vchiq_service * 350 next_service_by_instance(struct vchiq_state *state, 351 struct vchiq_instance *instance, 352 int *pidx) 353 { 354 struct vchiq_service *service; 355 356 rcu_read_lock(); 357 while (1) { 358 service = __next_service_by_instance(state, instance, pidx); 359 if (!service) 360 break; 361 if (kref_get_unless_zero(&service->ref_count)) { 362 service = rcu_pointer_handoff(service); 363 break; 364 } 365 } 366 rcu_read_unlock(); 367 return service; 368 } 369 370 void 371 vchiq_service_get(struct vchiq_service *service) 372 { 373 if (!service) { 374 WARN(1, "%s service is NULL\n", __func__); 375 return; 376 } 377 kref_get(&service->ref_count); 378 } 379 380 static void service_release(struct kref *kref) 381 { 382 struct vchiq_service *service = 383 container_of(kref, struct vchiq_service, ref_count); 384 struct vchiq_state *state = service->state; 385 386 WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE); 387 rcu_assign_pointer(state->services[service->localport], NULL); 388 if (service->userdata_term) 389 service->userdata_term(service->base.userdata); 390 kfree_rcu(service, rcu); 391 } 392 393 void 394 vchiq_service_put(struct vchiq_service *service) 395 { 396 if (!service) { 397 WARN(1, "%s: service is NULL\n", __func__); 398 return; 399 } 400 kref_put(&service->ref_count, service_release); 401 } 402 403 int 404 vchiq_get_client_id(unsigned int handle) 405 { 406 struct vchiq_service *service; 407 int id; 408 409 rcu_read_lock(); 410 service = handle_to_service(handle); 411 id = service ? service->client_id : 0; 412 rcu_read_unlock(); 413 return id; 414 } 415 416 void * 417 vchiq_get_service_userdata(unsigned int handle) 418 { 419 void *userdata; 420 struct vchiq_service *service; 421 422 rcu_read_lock(); 423 service = handle_to_service(handle); 424 userdata = service ? service->base.userdata : NULL; 425 rcu_read_unlock(); 426 return userdata; 427 } 428 EXPORT_SYMBOL(vchiq_get_service_userdata); 429 430 static void 431 mark_service_closing_internal(struct vchiq_service *service, int sh_thread) 432 { 433 struct vchiq_state *state = service->state; 434 struct vchiq_service_quota *quota; 435 436 service->closing = 1; 437 438 /* Synchronise with other threads. */ 439 mutex_lock(&state->recycle_mutex); 440 mutex_unlock(&state->recycle_mutex); 441 if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) { 442 /* 443 * If we're pausing then the slot_mutex is held until resume 444 * by the slot handler. Therefore don't try to acquire this 445 * mutex if we're the slot handler and in the pause sent state. 446 * We don't need to in this case anyway. 447 */ 448 mutex_lock(&state->slot_mutex); 449 mutex_unlock(&state->slot_mutex); 450 } 451 452 /* Unblock any sending thread. */ 453 quota = &state->service_quotas[service->localport]; 454 complete("a->quota_event); 455 } 456 457 static void 458 mark_service_closing(struct vchiq_service *service) 459 { 460 mark_service_closing_internal(service, 0); 461 } 462 463 static inline enum vchiq_status 464 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason, 465 struct vchiq_header *header, void *bulk_userdata) 466 { 467 enum vchiq_status status; 468 469 vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)", 470 service->state->id, service->localport, reason_names[reason], 471 header, bulk_userdata); 472 status = service->base.callback(reason, header, service->handle, 473 bulk_userdata); 474 if (status == VCHIQ_ERROR) { 475 vchiq_log_warning(vchiq_core_log_level, 476 "%d: ignoring ERROR from callback to service %x", 477 service->state->id, service->handle); 478 status = VCHIQ_SUCCESS; 479 } 480 481 if (reason != VCHIQ_MESSAGE_AVAILABLE) 482 vchiq_release_message(service->handle, header); 483 484 return status; 485 } 486 487 inline void 488 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate) 489 { 490 enum vchiq_connstate oldstate = state->conn_state; 491 492 vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id, 493 conn_state_names[oldstate], 494 conn_state_names[newstate]); 495 state->conn_state = newstate; 496 vchiq_platform_conn_state_changed(state, oldstate, newstate); 497 } 498 499 static inline void 500 remote_event_create(wait_queue_head_t *wq, struct remote_event *event) 501 { 502 event->armed = 0; 503 /* 504 * Don't clear the 'fired' flag because it may already have been set 505 * by the other side. 506 */ 507 init_waitqueue_head(wq); 508 } 509 510 /* 511 * All the event waiting routines in VCHIQ used a custom semaphore 512 * implementation that filtered most signals. This achieved a behaviour similar 513 * to the "killable" family of functions. While cleaning up this code all the 514 * routines where switched to the "interruptible" family of functions, as the 515 * former was deemed unjustified and the use "killable" set all VCHIQ's 516 * threads in D state. 517 */ 518 static inline int 519 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event) 520 { 521 if (!event->fired) { 522 event->armed = 1; 523 dsb(sy); 524 if (wait_event_interruptible(*wq, event->fired)) { 525 event->armed = 0; 526 return 0; 527 } 528 event->armed = 0; 529 wmb(); 530 } 531 532 event->fired = 0; 533 return 1; 534 } 535 536 static inline void 537 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event) 538 { 539 event->fired = 1; 540 event->armed = 0; 541 wake_up_all(wq); 542 } 543 544 static inline void 545 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event) 546 { 547 if (event->fired && event->armed) 548 remote_event_signal_local(wq, event); 549 } 550 551 void 552 remote_event_pollall(struct vchiq_state *state) 553 { 554 remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger); 555 remote_event_poll(&state->sync_release_event, &state->local->sync_release); 556 remote_event_poll(&state->trigger_event, &state->local->trigger); 557 remote_event_poll(&state->recycle_event, &state->local->recycle); 558 } 559 560 /* 561 * Round up message sizes so that any space at the end of a slot is always big 562 * enough for a header. This relies on header size being a power of two, which 563 * has been verified earlier by a static assertion. 564 */ 565 566 static inline size_t 567 calc_stride(size_t size) 568 { 569 /* Allow room for the header */ 570 size += sizeof(struct vchiq_header); 571 572 /* Round up */ 573 return (size + sizeof(struct vchiq_header) - 1) & 574 ~(sizeof(struct vchiq_header) - 1); 575 } 576 577 /* Called by the slot handler thread */ 578 static struct vchiq_service * 579 get_listening_service(struct vchiq_state *state, int fourcc) 580 { 581 int i; 582 583 WARN_ON(fourcc == VCHIQ_FOURCC_INVALID); 584 585 rcu_read_lock(); 586 for (i = 0; i < state->unused_service; i++) { 587 struct vchiq_service *service; 588 589 service = rcu_dereference(state->services[i]); 590 if (service && 591 service->public_fourcc == fourcc && 592 (service->srvstate == VCHIQ_SRVSTATE_LISTENING || 593 (service->srvstate == VCHIQ_SRVSTATE_OPEN && 594 service->remoteport == VCHIQ_PORT_FREE)) && 595 kref_get_unless_zero(&service->ref_count)) { 596 service = rcu_pointer_handoff(service); 597 rcu_read_unlock(); 598 return service; 599 } 600 } 601 rcu_read_unlock(); 602 return NULL; 603 } 604 605 /* Called by the slot handler thread */ 606 static struct vchiq_service * 607 get_connected_service(struct vchiq_state *state, unsigned int port) 608 { 609 int i; 610 611 rcu_read_lock(); 612 for (i = 0; i < state->unused_service; i++) { 613 struct vchiq_service *service = 614 rcu_dereference(state->services[i]); 615 616 if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN && 617 service->remoteport == port && 618 kref_get_unless_zero(&service->ref_count)) { 619 service = rcu_pointer_handoff(service); 620 rcu_read_unlock(); 621 return service; 622 } 623 } 624 rcu_read_unlock(); 625 return NULL; 626 } 627 628 inline void 629 request_poll(struct vchiq_state *state, struct vchiq_service *service, 630 int poll_type) 631 { 632 u32 value; 633 int index; 634 635 if (!service) 636 goto skip_service; 637 638 do { 639 value = atomic_read(&service->poll_flags); 640 } while (atomic_cmpxchg(&service->poll_flags, value, 641 value | BIT(poll_type)) != value); 642 643 index = BITSET_WORD(service->localport); 644 do { 645 value = atomic_read(&state->poll_services[index]); 646 } while (atomic_cmpxchg(&state->poll_services[index], 647 value, value | BIT(service->localport & 0x1f)) != value); 648 649 skip_service: 650 state->poll_needed = 1; 651 wmb(); 652 653 /* ... and ensure the slot handler runs. */ 654 remote_event_signal_local(&state->trigger_event, &state->local->trigger); 655 } 656 657 /* 658 * Called from queue_message, by the slot handler and application threads, 659 * with slot_mutex held 660 */ 661 static struct vchiq_header * 662 reserve_space(struct vchiq_state *state, size_t space, int is_blocking) 663 { 664 struct vchiq_shared_state *local = state->local; 665 int tx_pos = state->local_tx_pos; 666 int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK); 667 668 if (space > slot_space) { 669 struct vchiq_header *header; 670 /* Fill the remaining space with padding */ 671 WARN_ON(!state->tx_data); 672 header = (struct vchiq_header *) 673 (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK)); 674 header->msgid = VCHIQ_MSGID_PADDING; 675 header->size = slot_space - sizeof(struct vchiq_header); 676 677 tx_pos += slot_space; 678 } 679 680 /* If necessary, get the next slot. */ 681 if ((tx_pos & VCHIQ_SLOT_MASK) == 0) { 682 int slot_index; 683 684 /* If there is no free slot... */ 685 686 if (!try_wait_for_completion(&state->slot_available_event)) { 687 /* ...wait for one. */ 688 689 VCHIQ_STATS_INC(state, slot_stalls); 690 691 /* But first, flush through the last slot. */ 692 state->local_tx_pos = tx_pos; 693 local->tx_pos = tx_pos; 694 remote_event_signal(&state->remote->trigger); 695 696 if (!is_blocking || 697 (wait_for_completion_interruptible( 698 &state->slot_available_event))) 699 return NULL; /* No space available */ 700 } 701 702 if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) { 703 complete(&state->slot_available_event); 704 pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos); 705 return NULL; 706 } 707 708 slot_index = local->slot_queue[ 709 SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)]; 710 state->tx_data = 711 (char *)SLOT_DATA_FROM_INDEX(state, slot_index); 712 } 713 714 state->local_tx_pos = tx_pos + space; 715 716 return (struct vchiq_header *)(state->tx_data + 717 (tx_pos & VCHIQ_SLOT_MASK)); 718 } 719 720 static void 721 process_free_data_message(struct vchiq_state *state, BITSET_T *service_found, 722 struct vchiq_header *header) 723 { 724 int msgid = header->msgid; 725 int port = VCHIQ_MSG_SRCPORT(msgid); 726 struct vchiq_service_quota *quota = &state->service_quotas[port]; 727 int count; 728 729 spin_lock("a_spinlock); 730 count = quota->message_use_count; 731 if (count > 0) 732 quota->message_use_count = count - 1; 733 spin_unlock("a_spinlock); 734 735 if (count == quota->message_quota) { 736 /* 737 * Signal the service that it 738 * has dropped below its quota 739 */ 740 complete("a->quota_event); 741 } else if (count == 0) { 742 vchiq_log_error(vchiq_core_log_level, 743 "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)", 744 port, 745 quota->message_use_count, 746 header, msgid, header->msgid, 747 header->size); 748 WARN(1, "invalid message use count\n"); 749 } 750 if (!BITSET_IS_SET(service_found, port)) { 751 /* Set the found bit for this service */ 752 BITSET_SET(service_found, port); 753 754 spin_lock("a_spinlock); 755 count = quota->slot_use_count; 756 if (count > 0) 757 quota->slot_use_count = count - 1; 758 spin_unlock("a_spinlock); 759 760 if (count > 0) { 761 /* 762 * Signal the service in case 763 * it has dropped below its quota 764 */ 765 complete("a->quota_event); 766 vchiq_log_trace(vchiq_core_log_level, 767 "%d: pfq:%d %x@%pK - slot_use->%d", 768 state->id, port, 769 header->size, header, 770 count - 1); 771 } else { 772 vchiq_log_error(vchiq_core_log_level, 773 "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)", 774 port, count, header, 775 msgid, header->msgid, 776 header->size); 777 WARN(1, "bad slot use count\n"); 778 } 779 } 780 } 781 782 /* Called by the recycle thread. */ 783 static void 784 process_free_queue(struct vchiq_state *state, BITSET_T *service_found, 785 size_t length) 786 { 787 struct vchiq_shared_state *local = state->local; 788 int slot_queue_available; 789 790 /* 791 * Find slots which have been freed by the other side, and return them 792 * to the available queue. 793 */ 794 slot_queue_available = state->slot_queue_available; 795 796 /* 797 * Use a memory barrier to ensure that any state that may have been 798 * modified by another thread is not masked by stale prefetched 799 * values. 800 */ 801 mb(); 802 803 while (slot_queue_available != local->slot_queue_recycle) { 804 unsigned int pos; 805 int slot_index = local->slot_queue[slot_queue_available & 806 VCHIQ_SLOT_QUEUE_MASK]; 807 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index); 808 int data_found = 0; 809 810 slot_queue_available++; 811 /* 812 * Beware of the address dependency - data is calculated 813 * using an index written by the other side. 814 */ 815 rmb(); 816 817 vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x", 818 state->id, slot_index, data, 819 local->slot_queue_recycle, slot_queue_available); 820 821 /* Initialise the bitmask for services which have used this slot */ 822 memset(service_found, 0, length); 823 824 pos = 0; 825 826 while (pos < VCHIQ_SLOT_SIZE) { 827 struct vchiq_header *header = 828 (struct vchiq_header *)(data + pos); 829 int msgid = header->msgid; 830 831 if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) { 832 process_free_data_message(state, service_found, 833 header); 834 data_found = 1; 835 } 836 837 pos += calc_stride(header->size); 838 if (pos > VCHIQ_SLOT_SIZE) { 839 vchiq_log_error(vchiq_core_log_level, 840 "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x", 841 pos, header, msgid, header->msgid, 842 header->size); 843 WARN(1, "invalid slot position\n"); 844 } 845 } 846 847 if (data_found) { 848 int count; 849 850 spin_lock("a_spinlock); 851 count = state->data_use_count; 852 if (count > 0) 853 state->data_use_count = count - 1; 854 spin_unlock("a_spinlock); 855 if (count == state->data_quota) 856 complete(&state->data_quota_event); 857 } 858 859 /* 860 * Don't allow the slot to be reused until we are no 861 * longer interested in it. 862 */ 863 mb(); 864 865 state->slot_queue_available = slot_queue_available; 866 complete(&state->slot_available_event); 867 } 868 } 869 870 static ssize_t 871 memcpy_copy_callback( 872 void *context, void *dest, 873 size_t offset, size_t maxsize) 874 { 875 memcpy(dest + offset, context + offset, maxsize); 876 return maxsize; 877 } 878 879 static ssize_t 880 copy_message_data( 881 ssize_t (*copy_callback)(void *context, void *dest, 882 size_t offset, size_t maxsize), 883 void *context, 884 void *dest, 885 size_t size) 886 { 887 size_t pos = 0; 888 889 while (pos < size) { 890 ssize_t callback_result; 891 size_t max_bytes = size - pos; 892 893 callback_result = 894 copy_callback(context, dest + pos, 895 pos, max_bytes); 896 897 if (callback_result < 0) 898 return callback_result; 899 900 if (!callback_result) 901 return -EIO; 902 903 if (callback_result > max_bytes) 904 return -EIO; 905 906 pos += callback_result; 907 } 908 909 return size; 910 } 911 912 /* Called by the slot handler and application threads */ 913 static enum vchiq_status 914 queue_message(struct vchiq_state *state, struct vchiq_service *service, 915 int msgid, 916 ssize_t (*copy_callback)(void *context, void *dest, 917 size_t offset, size_t maxsize), 918 void *context, size_t size, int flags) 919 { 920 struct vchiq_shared_state *local; 921 struct vchiq_service_quota *quota = NULL; 922 struct vchiq_header *header; 923 int type = VCHIQ_MSG_TYPE(msgid); 924 925 size_t stride; 926 927 local = state->local; 928 929 stride = calc_stride(size); 930 931 WARN_ON(stride > VCHIQ_SLOT_SIZE); 932 933 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) && 934 mutex_lock_killable(&state->slot_mutex)) 935 return VCHIQ_RETRY; 936 937 if (type == VCHIQ_MSG_DATA) { 938 int tx_end_index; 939 940 if (!service) { 941 WARN(1, "%s: service is NULL\n", __func__); 942 mutex_unlock(&state->slot_mutex); 943 return VCHIQ_ERROR; 944 } 945 946 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK | 947 QMFLAGS_NO_MUTEX_UNLOCK)); 948 949 if (service->closing) { 950 /* The service has been closed */ 951 mutex_unlock(&state->slot_mutex); 952 return VCHIQ_ERROR; 953 } 954 955 quota = &state->service_quotas[service->localport]; 956 957 spin_lock("a_spinlock); 958 959 /* 960 * Ensure this service doesn't use more than its quota of 961 * messages or slots 962 */ 963 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS( 964 state->local_tx_pos + stride - 1); 965 966 /* 967 * Ensure data messages don't use more than their quota of 968 * slots 969 */ 970 while ((tx_end_index != state->previous_data_index) && 971 (state->data_use_count == state->data_quota)) { 972 VCHIQ_STATS_INC(state, data_stalls); 973 spin_unlock("a_spinlock); 974 mutex_unlock(&state->slot_mutex); 975 976 if (wait_for_completion_interruptible( 977 &state->data_quota_event)) 978 return VCHIQ_RETRY; 979 980 mutex_lock(&state->slot_mutex); 981 spin_lock("a_spinlock); 982 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS( 983 state->local_tx_pos + stride - 1); 984 if ((tx_end_index == state->previous_data_index) || 985 (state->data_use_count < state->data_quota)) { 986 /* Pass the signal on to other waiters */ 987 complete(&state->data_quota_event); 988 break; 989 } 990 } 991 992 while ((quota->message_use_count == quota->message_quota) || 993 ((tx_end_index != quota->previous_tx_index) && 994 (quota->slot_use_count == quota->slot_quota))) { 995 spin_unlock("a_spinlock); 996 vchiq_log_trace(vchiq_core_log_level, 997 "%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)", 998 state->id, service->localport, 999 msg_type_str(type), size, 1000 quota->message_use_count, 1001 quota->slot_use_count); 1002 VCHIQ_SERVICE_STATS_INC(service, quota_stalls); 1003 mutex_unlock(&state->slot_mutex); 1004 if (wait_for_completion_interruptible( 1005 "a->quota_event)) 1006 return VCHIQ_RETRY; 1007 if (service->closing) 1008 return VCHIQ_ERROR; 1009 if (mutex_lock_killable(&state->slot_mutex)) 1010 return VCHIQ_RETRY; 1011 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) { 1012 /* The service has been closed */ 1013 mutex_unlock(&state->slot_mutex); 1014 return VCHIQ_ERROR; 1015 } 1016 spin_lock("a_spinlock); 1017 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS( 1018 state->local_tx_pos + stride - 1); 1019 } 1020 1021 spin_unlock("a_spinlock); 1022 } 1023 1024 header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING); 1025 1026 if (!header) { 1027 if (service) 1028 VCHIQ_SERVICE_STATS_INC(service, slot_stalls); 1029 /* 1030 * In the event of a failure, return the mutex to the 1031 * state it was in 1032 */ 1033 if (!(flags & QMFLAGS_NO_MUTEX_LOCK)) 1034 mutex_unlock(&state->slot_mutex); 1035 return VCHIQ_RETRY; 1036 } 1037 1038 if (type == VCHIQ_MSG_DATA) { 1039 ssize_t callback_result; 1040 int tx_end_index; 1041 int slot_use_count; 1042 1043 vchiq_log_info(vchiq_core_log_level, 1044 "%d: qm %s@%pK,%zx (%d->%d)", 1045 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), 1046 header, size, VCHIQ_MSG_SRCPORT(msgid), 1047 VCHIQ_MSG_DSTPORT(msgid)); 1048 1049 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK | 1050 QMFLAGS_NO_MUTEX_UNLOCK)); 1051 1052 callback_result = 1053 copy_message_data(copy_callback, context, 1054 header->data, size); 1055 1056 if (callback_result < 0) { 1057 mutex_unlock(&state->slot_mutex); 1058 VCHIQ_SERVICE_STATS_INC(service, 1059 error_count); 1060 return VCHIQ_ERROR; 1061 } 1062 1063 if (SRVTRACE_ENABLED(service, 1064 VCHIQ_LOG_INFO)) 1065 vchiq_log_dump_mem("Sent", 0, 1066 header->data, 1067 min((size_t)16, 1068 (size_t)callback_result)); 1069 1070 spin_lock("a_spinlock); 1071 quota->message_use_count++; 1072 1073 tx_end_index = 1074 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1); 1075 1076 /* 1077 * If this transmission can't fit in the last slot used by any 1078 * service, the data_use_count must be increased. 1079 */ 1080 if (tx_end_index != state->previous_data_index) { 1081 state->previous_data_index = tx_end_index; 1082 state->data_use_count++; 1083 } 1084 1085 /* 1086 * If this isn't the same slot last used by this service, 1087 * the service's slot_use_count must be increased. 1088 */ 1089 if (tx_end_index != quota->previous_tx_index) { 1090 quota->previous_tx_index = tx_end_index; 1091 slot_use_count = ++quota->slot_use_count; 1092 } else { 1093 slot_use_count = 0; 1094 } 1095 1096 spin_unlock("a_spinlock); 1097 1098 if (slot_use_count) 1099 vchiq_log_trace(vchiq_core_log_level, 1100 "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)", 1101 state->id, service->localport, 1102 msg_type_str(VCHIQ_MSG_TYPE(msgid)), size, 1103 slot_use_count, header); 1104 1105 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count); 1106 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size); 1107 } else { 1108 vchiq_log_info(vchiq_core_log_level, 1109 "%d: qm %s@%pK,%zx (%d->%d)", state->id, 1110 msg_type_str(VCHIQ_MSG_TYPE(msgid)), 1111 header, size, VCHIQ_MSG_SRCPORT(msgid), 1112 VCHIQ_MSG_DSTPORT(msgid)); 1113 if (size != 0) { 1114 /* 1115 * It is assumed for now that this code path 1116 * only happens from calls inside this file. 1117 * 1118 * External callers are through the vchiq_queue_message 1119 * path which always sets the type to be VCHIQ_MSG_DATA 1120 * 1121 * At first glance this appears to be correct but 1122 * more review is needed. 1123 */ 1124 copy_message_data(copy_callback, context, 1125 header->data, size); 1126 } 1127 VCHIQ_STATS_INC(state, ctrl_tx_count); 1128 } 1129 1130 header->msgid = msgid; 1131 header->size = size; 1132 1133 { 1134 int svc_fourcc; 1135 1136 svc_fourcc = service 1137 ? service->base.fourcc 1138 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); 1139 1140 vchiq_log_info(SRVTRACE_LEVEL(service), 1141 "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu", 1142 msg_type_str(VCHIQ_MSG_TYPE(msgid)), 1143 VCHIQ_MSG_TYPE(msgid), 1144 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), 1145 VCHIQ_MSG_SRCPORT(msgid), 1146 VCHIQ_MSG_DSTPORT(msgid), 1147 size); 1148 } 1149 1150 /* Make sure the new header is visible to the peer. */ 1151 wmb(); 1152 1153 /* Make the new tx_pos visible to the peer. */ 1154 local->tx_pos = state->local_tx_pos; 1155 wmb(); 1156 1157 if (service && (type == VCHIQ_MSG_CLOSE)) 1158 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT); 1159 1160 if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK)) 1161 mutex_unlock(&state->slot_mutex); 1162 1163 remote_event_signal(&state->remote->trigger); 1164 1165 return VCHIQ_SUCCESS; 1166 } 1167 1168 /* Called by the slot handler and application threads */ 1169 static enum vchiq_status 1170 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service, 1171 int msgid, 1172 ssize_t (*copy_callback)(void *context, void *dest, 1173 size_t offset, size_t maxsize), 1174 void *context, int size, int is_blocking) 1175 { 1176 struct vchiq_shared_state *local; 1177 struct vchiq_header *header; 1178 ssize_t callback_result; 1179 1180 local = state->local; 1181 1182 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME && 1183 mutex_lock_killable(&state->sync_mutex)) 1184 return VCHIQ_RETRY; 1185 1186 remote_event_wait(&state->sync_release_event, &local->sync_release); 1187 1188 rmb(); 1189 1190 header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state, 1191 local->slot_sync); 1192 1193 { 1194 int oldmsgid = header->msgid; 1195 1196 if (oldmsgid != VCHIQ_MSGID_PADDING) 1197 vchiq_log_error(vchiq_core_log_level, 1198 "%d: qms - msgid %x, not PADDING", 1199 state->id, oldmsgid); 1200 } 1201 1202 vchiq_log_info(vchiq_sync_log_level, 1203 "%d: qms %s@%pK,%x (%d->%d)", state->id, 1204 msg_type_str(VCHIQ_MSG_TYPE(msgid)), 1205 header, size, VCHIQ_MSG_SRCPORT(msgid), 1206 VCHIQ_MSG_DSTPORT(msgid)); 1207 1208 callback_result = 1209 copy_message_data(copy_callback, context, 1210 header->data, size); 1211 1212 if (callback_result < 0) { 1213 mutex_unlock(&state->slot_mutex); 1214 VCHIQ_SERVICE_STATS_INC(service, 1215 error_count); 1216 return VCHIQ_ERROR; 1217 } 1218 1219 if (service) { 1220 if (SRVTRACE_ENABLED(service, 1221 VCHIQ_LOG_INFO)) 1222 vchiq_log_dump_mem("Sent", 0, 1223 header->data, 1224 min((size_t)16, 1225 (size_t)callback_result)); 1226 1227 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count); 1228 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size); 1229 } else { 1230 VCHIQ_STATS_INC(state, ctrl_tx_count); 1231 } 1232 1233 header->size = size; 1234 header->msgid = msgid; 1235 1236 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) { 1237 int svc_fourcc; 1238 1239 svc_fourcc = service 1240 ? service->base.fourcc 1241 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); 1242 1243 vchiq_log_trace(vchiq_sync_log_level, 1244 "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d", 1245 msg_type_str(VCHIQ_MSG_TYPE(msgid)), 1246 VCHIQ_MSG_TYPE(msgid), 1247 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), 1248 VCHIQ_MSG_SRCPORT(msgid), 1249 VCHIQ_MSG_DSTPORT(msgid), 1250 size); 1251 } 1252 1253 remote_event_signal(&state->remote->sync_trigger); 1254 1255 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE) 1256 mutex_unlock(&state->sync_mutex); 1257 1258 return VCHIQ_SUCCESS; 1259 } 1260 1261 static inline void 1262 claim_slot(struct vchiq_slot_info *slot) 1263 { 1264 slot->use_count++; 1265 } 1266 1267 static void 1268 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info, 1269 struct vchiq_header *header, struct vchiq_service *service) 1270 { 1271 mutex_lock(&state->recycle_mutex); 1272 1273 if (header) { 1274 int msgid = header->msgid; 1275 1276 if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) || 1277 (service && service->closing)) { 1278 mutex_unlock(&state->recycle_mutex); 1279 return; 1280 } 1281 1282 /* Rewrite the message header to prevent a double release */ 1283 header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED; 1284 } 1285 1286 slot_info->release_count++; 1287 1288 if (slot_info->release_count == slot_info->use_count) { 1289 int slot_queue_recycle; 1290 /* Add to the freed queue */ 1291 1292 /* 1293 * A read barrier is necessary here to prevent speculative 1294 * fetches of remote->slot_queue_recycle from overtaking the 1295 * mutex. 1296 */ 1297 rmb(); 1298 1299 slot_queue_recycle = state->remote->slot_queue_recycle; 1300 state->remote->slot_queue[slot_queue_recycle & 1301 VCHIQ_SLOT_QUEUE_MASK] = 1302 SLOT_INDEX_FROM_INFO(state, slot_info); 1303 state->remote->slot_queue_recycle = slot_queue_recycle + 1; 1304 vchiq_log_info(vchiq_core_log_level, 1305 "%d: %s %d - recycle->%x", state->id, __func__, 1306 SLOT_INDEX_FROM_INFO(state, slot_info), 1307 state->remote->slot_queue_recycle); 1308 1309 /* 1310 * A write barrier is necessary, but remote_event_signal 1311 * contains one. 1312 */ 1313 remote_event_signal(&state->remote->recycle); 1314 } 1315 1316 mutex_unlock(&state->recycle_mutex); 1317 } 1318 1319 static inline enum vchiq_reason 1320 get_bulk_reason(struct vchiq_bulk *bulk) 1321 { 1322 if (bulk->dir == VCHIQ_BULK_TRANSMIT) { 1323 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED) 1324 return VCHIQ_BULK_TRANSMIT_ABORTED; 1325 1326 return VCHIQ_BULK_TRANSMIT_DONE; 1327 } 1328 1329 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED) 1330 return VCHIQ_BULK_RECEIVE_ABORTED; 1331 1332 return VCHIQ_BULK_RECEIVE_DONE; 1333 } 1334 1335 /* Called by the slot handler - don't hold the bulk mutex */ 1336 static enum vchiq_status 1337 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue, 1338 int retry_poll) 1339 { 1340 enum vchiq_status status = VCHIQ_SUCCESS; 1341 1342 vchiq_log_trace(vchiq_core_log_level, 1343 "%d: nb:%d %cx - p=%x rn=%x r=%x", 1344 service->state->id, service->localport, 1345 (queue == &service->bulk_tx) ? 't' : 'r', 1346 queue->process, queue->remote_notify, queue->remove); 1347 1348 queue->remote_notify = queue->process; 1349 1350 while (queue->remove != queue->remote_notify) { 1351 struct vchiq_bulk *bulk = 1352 &queue->bulks[BULK_INDEX(queue->remove)]; 1353 1354 /* 1355 * Only generate callbacks for non-dummy bulk 1356 * requests, and non-terminated services 1357 */ 1358 if (bulk->data && service->instance) { 1359 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) { 1360 if (bulk->dir == VCHIQ_BULK_TRANSMIT) { 1361 VCHIQ_SERVICE_STATS_INC(service, 1362 bulk_tx_count); 1363 VCHIQ_SERVICE_STATS_ADD(service, 1364 bulk_tx_bytes, 1365 bulk->actual); 1366 } else { 1367 VCHIQ_SERVICE_STATS_INC(service, 1368 bulk_rx_count); 1369 VCHIQ_SERVICE_STATS_ADD(service, 1370 bulk_rx_bytes, 1371 bulk->actual); 1372 } 1373 } else { 1374 VCHIQ_SERVICE_STATS_INC(service, 1375 bulk_aborted_count); 1376 } 1377 if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) { 1378 struct bulk_waiter *waiter; 1379 1380 spin_lock(&bulk_waiter_spinlock); 1381 waiter = bulk->userdata; 1382 if (waiter) { 1383 waiter->actual = bulk->actual; 1384 complete(&waiter->event); 1385 } 1386 spin_unlock(&bulk_waiter_spinlock); 1387 } else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) { 1388 enum vchiq_reason reason = 1389 get_bulk_reason(bulk); 1390 status = make_service_callback(service, 1391 reason, NULL, bulk->userdata); 1392 if (status == VCHIQ_RETRY) 1393 break; 1394 } 1395 } 1396 1397 queue->remove++; 1398 complete(&service->bulk_remove_event); 1399 } 1400 if (!retry_poll) 1401 status = VCHIQ_SUCCESS; 1402 1403 if (status == VCHIQ_RETRY) 1404 request_poll(service->state, service, 1405 (queue == &service->bulk_tx) ? 1406 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY); 1407 1408 return status; 1409 } 1410 1411 static void 1412 poll_services_of_group(struct vchiq_state *state, int group) 1413 { 1414 u32 flags = atomic_xchg(&state->poll_services[group], 0); 1415 int i; 1416 1417 for (i = 0; flags; i++) { 1418 struct vchiq_service *service; 1419 u32 service_flags; 1420 1421 if ((flags & BIT(i)) == 0) 1422 continue; 1423 1424 service = find_service_by_port(state, (group << 5) + i); 1425 flags &= ~BIT(i); 1426 1427 if (!service) 1428 continue; 1429 1430 service_flags = atomic_xchg(&service->poll_flags, 0); 1431 if (service_flags & BIT(VCHIQ_POLL_REMOVE)) { 1432 vchiq_log_info(vchiq_core_log_level, "%d: ps - remove %d<->%d", 1433 state->id, service->localport, 1434 service->remoteport); 1435 1436 /* 1437 * Make it look like a client, because 1438 * it must be removed and not left in 1439 * the LISTENING state. 1440 */ 1441 service->public_fourcc = VCHIQ_FOURCC_INVALID; 1442 1443 if (vchiq_close_service_internal(service, NO_CLOSE_RECVD) != 1444 VCHIQ_SUCCESS) 1445 request_poll(state, service, VCHIQ_POLL_REMOVE); 1446 } else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) { 1447 vchiq_log_info(vchiq_core_log_level, 1448 "%d: ps - terminate %d<->%d", 1449 state->id, service->localport, 1450 service->remoteport); 1451 if (vchiq_close_service_internal( 1452 service, NO_CLOSE_RECVD) != 1453 VCHIQ_SUCCESS) 1454 request_poll(state, service, 1455 VCHIQ_POLL_TERMINATE); 1456 } 1457 if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY)) 1458 notify_bulks(service, &service->bulk_tx, RETRY_POLL); 1459 if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY)) 1460 notify_bulks(service, &service->bulk_rx, RETRY_POLL); 1461 vchiq_service_put(service); 1462 } 1463 } 1464 1465 /* Called by the slot handler thread */ 1466 static void 1467 poll_services(struct vchiq_state *state) 1468 { 1469 int group; 1470 1471 for (group = 0; group < BITSET_SIZE(state->unused_service); group++) 1472 poll_services_of_group(state, group); 1473 } 1474 1475 /* Called with the bulk_mutex held */ 1476 static void 1477 abort_outstanding_bulks(struct vchiq_service *service, 1478 struct vchiq_bulk_queue *queue) 1479 { 1480 int is_tx = (queue == &service->bulk_tx); 1481 1482 vchiq_log_trace(vchiq_core_log_level, 1483 "%d: aob:%d %cx - li=%x ri=%x p=%x", 1484 service->state->id, service->localport, is_tx ? 't' : 'r', 1485 queue->local_insert, queue->remote_insert, queue->process); 1486 1487 WARN_ON((int)(queue->local_insert - queue->process) < 0); 1488 WARN_ON((int)(queue->remote_insert - queue->process) < 0); 1489 1490 while ((queue->process != queue->local_insert) || 1491 (queue->process != queue->remote_insert)) { 1492 struct vchiq_bulk *bulk = 1493 &queue->bulks[BULK_INDEX(queue->process)]; 1494 1495 if (queue->process == queue->remote_insert) { 1496 /* fabricate a matching dummy bulk */ 1497 bulk->remote_data = NULL; 1498 bulk->remote_size = 0; 1499 queue->remote_insert++; 1500 } 1501 1502 if (queue->process != queue->local_insert) { 1503 vchiq_complete_bulk(bulk); 1504 1505 vchiq_log_info(SRVTRACE_LEVEL(service), 1506 "%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d", 1507 is_tx ? "Send Bulk to" : "Recv Bulk from", 1508 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 1509 service->remoteport, 1510 bulk->size, 1511 bulk->remote_size); 1512 } else { 1513 /* fabricate a matching dummy bulk */ 1514 bulk->data = 0; 1515 bulk->size = 0; 1516 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED; 1517 bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT : 1518 VCHIQ_BULK_RECEIVE; 1519 queue->local_insert++; 1520 } 1521 1522 queue->process++; 1523 } 1524 } 1525 1526 static int 1527 parse_open(struct vchiq_state *state, struct vchiq_header *header) 1528 { 1529 const struct vchiq_open_payload *payload; 1530 struct vchiq_service *service = NULL; 1531 int msgid, size; 1532 unsigned int localport, remoteport, fourcc; 1533 short version, version_min; 1534 1535 msgid = header->msgid; 1536 size = header->size; 1537 localport = VCHIQ_MSG_DSTPORT(msgid); 1538 remoteport = VCHIQ_MSG_SRCPORT(msgid); 1539 if (size < sizeof(struct vchiq_open_payload)) 1540 goto fail_open; 1541 1542 payload = (struct vchiq_open_payload *)header->data; 1543 fourcc = payload->fourcc; 1544 vchiq_log_info(vchiq_core_log_level, 1545 "%d: prs OPEN@%pK (%d->'%c%c%c%c')", 1546 state->id, header, localport, 1547 VCHIQ_FOURCC_AS_4CHARS(fourcc)); 1548 1549 service = get_listening_service(state, fourcc); 1550 if (!service) 1551 goto fail_open; 1552 1553 /* A matching service exists */ 1554 version = payload->version; 1555 version_min = payload->version_min; 1556 1557 if ((service->version < version_min) || 1558 (version < service->version_min)) { 1559 /* Version mismatch */ 1560 vchiq_loud_error_header(); 1561 vchiq_loud_error("%d: service %d (%c%c%c%c) " 1562 "version mismatch - local (%d, min %d)" 1563 " vs. remote (%d, min %d)", 1564 state->id, service->localport, 1565 VCHIQ_FOURCC_AS_4CHARS(fourcc), 1566 service->version, service->version_min, 1567 version, version_min); 1568 vchiq_loud_error_footer(); 1569 vchiq_service_put(service); 1570 service = NULL; 1571 goto fail_open; 1572 } 1573 service->peer_version = version; 1574 1575 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) { 1576 struct vchiq_openack_payload ack_payload = { 1577 service->version 1578 }; 1579 int openack_id = MAKE_OPENACK(service->localport, remoteport); 1580 1581 if (state->version_common < 1582 VCHIQ_VERSION_SYNCHRONOUS_MODE) 1583 service->sync = 0; 1584 1585 /* Acknowledge the OPEN */ 1586 if (service->sync) { 1587 if (queue_message_sync(state, NULL, openack_id, 1588 memcpy_copy_callback, 1589 &ack_payload, 1590 sizeof(ack_payload), 1591 0) == VCHIQ_RETRY) 1592 goto bail_not_ready; 1593 } else { 1594 if (queue_message(state, NULL, openack_id, 1595 memcpy_copy_callback, 1596 &ack_payload, 1597 sizeof(ack_payload), 1598 0) == VCHIQ_RETRY) 1599 goto bail_not_ready; 1600 } 1601 1602 /* The service is now open */ 1603 vchiq_set_service_state(service, 1604 service->sync ? VCHIQ_SRVSTATE_OPENSYNC 1605 : VCHIQ_SRVSTATE_OPEN); 1606 } 1607 1608 /* Success - the message has been dealt with */ 1609 vchiq_service_put(service); 1610 return 1; 1611 1612 fail_open: 1613 /* No available service, or an invalid request - send a CLOSE */ 1614 if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)), 1615 NULL, NULL, 0, 0) == VCHIQ_RETRY) 1616 goto bail_not_ready; 1617 1618 return 1; 1619 1620 bail_not_ready: 1621 if (service) 1622 vchiq_service_put(service); 1623 1624 return 0; 1625 } 1626 1627 /** 1628 * parse_message() - parses a single message from the rx slot 1629 * @state: vchiq state struct 1630 * @header: message header 1631 * 1632 * Context: Process context 1633 * 1634 * Return: 1635 * * >= 0 - size of the parsed message payload (without header) 1636 * * -EINVAL - fatal error occurred, bail out is required 1637 */ 1638 static int 1639 parse_message(struct vchiq_state *state, struct vchiq_header *header) 1640 { 1641 struct vchiq_service *service = NULL; 1642 unsigned int localport, remoteport; 1643 int msgid, size, type, ret = -EINVAL; 1644 1645 DEBUG_INITIALISE(state->local) 1646 1647 DEBUG_VALUE(PARSE_HEADER, (int)(long)header); 1648 msgid = header->msgid; 1649 DEBUG_VALUE(PARSE_MSGID, msgid); 1650 size = header->size; 1651 type = VCHIQ_MSG_TYPE(msgid); 1652 localport = VCHIQ_MSG_DSTPORT(msgid); 1653 remoteport = VCHIQ_MSG_SRCPORT(msgid); 1654 1655 if (type != VCHIQ_MSG_DATA) 1656 VCHIQ_STATS_INC(state, ctrl_rx_count); 1657 1658 switch (type) { 1659 case VCHIQ_MSG_OPENACK: 1660 case VCHIQ_MSG_CLOSE: 1661 case VCHIQ_MSG_DATA: 1662 case VCHIQ_MSG_BULK_RX: 1663 case VCHIQ_MSG_BULK_TX: 1664 case VCHIQ_MSG_BULK_RX_DONE: 1665 case VCHIQ_MSG_BULK_TX_DONE: 1666 service = find_service_by_port(state, localport); 1667 if ((!service || 1668 ((service->remoteport != remoteport) && 1669 (service->remoteport != VCHIQ_PORT_FREE))) && 1670 (localport == 0) && 1671 (type == VCHIQ_MSG_CLOSE)) { 1672 /* 1673 * This could be a CLOSE from a client which 1674 * hadn't yet received the OPENACK - look for 1675 * the connected service 1676 */ 1677 if (service) 1678 vchiq_service_put(service); 1679 service = get_connected_service(state, 1680 remoteport); 1681 if (service) 1682 vchiq_log_warning(vchiq_core_log_level, 1683 "%d: prs %s@%pK (%d->%d) - found connected service %d", 1684 state->id, msg_type_str(type), 1685 header, remoteport, localport, 1686 service->localport); 1687 } 1688 1689 if (!service) { 1690 vchiq_log_error(vchiq_core_log_level, 1691 "%d: prs %s@%pK (%d->%d) - invalid/closed service %d", 1692 state->id, msg_type_str(type), 1693 header, remoteport, localport, 1694 localport); 1695 goto skip_message; 1696 } 1697 break; 1698 default: 1699 break; 1700 } 1701 1702 if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) { 1703 int svc_fourcc; 1704 1705 svc_fourcc = service 1706 ? service->base.fourcc 1707 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); 1708 vchiq_log_info(SRVTRACE_LEVEL(service), 1709 "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d", 1710 msg_type_str(type), type, 1711 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), 1712 remoteport, localport, size); 1713 if (size > 0) 1714 vchiq_log_dump_mem("Rcvd", 0, header->data, 1715 min(16, size)); 1716 } 1717 1718 if (((unsigned long)header & VCHIQ_SLOT_MASK) + 1719 calc_stride(size) > VCHIQ_SLOT_SIZE) { 1720 vchiq_log_error(vchiq_core_log_level, 1721 "header %pK (msgid %x) - size %x too big for slot", 1722 header, (unsigned int)msgid, 1723 (unsigned int)size); 1724 WARN(1, "oversized for slot\n"); 1725 } 1726 1727 switch (type) { 1728 case VCHIQ_MSG_OPEN: 1729 WARN_ON(VCHIQ_MSG_DSTPORT(msgid)); 1730 if (!parse_open(state, header)) 1731 goto bail_not_ready; 1732 break; 1733 case VCHIQ_MSG_OPENACK: 1734 if (size >= sizeof(struct vchiq_openack_payload)) { 1735 const struct vchiq_openack_payload *payload = 1736 (struct vchiq_openack_payload *) 1737 header->data; 1738 service->peer_version = payload->version; 1739 } 1740 vchiq_log_info(vchiq_core_log_level, 1741 "%d: prs OPENACK@%pK,%x (%d->%d) v:%d", 1742 state->id, header, size, remoteport, localport, 1743 service->peer_version); 1744 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) { 1745 service->remoteport = remoteport; 1746 vchiq_set_service_state(service, 1747 VCHIQ_SRVSTATE_OPEN); 1748 complete(&service->remove_event); 1749 } else { 1750 vchiq_log_error(vchiq_core_log_level, 1751 "OPENACK received in state %s", 1752 srvstate_names[service->srvstate]); 1753 } 1754 break; 1755 case VCHIQ_MSG_CLOSE: 1756 WARN_ON(size); /* There should be no data */ 1757 1758 vchiq_log_info(vchiq_core_log_level, 1759 "%d: prs CLOSE@%pK (%d->%d)", 1760 state->id, header, remoteport, localport); 1761 1762 mark_service_closing_internal(service, 1); 1763 1764 if (vchiq_close_service_internal(service, 1765 CLOSE_RECVD) == VCHIQ_RETRY) 1766 goto bail_not_ready; 1767 1768 vchiq_log_info(vchiq_core_log_level, 1769 "Close Service %c%c%c%c s:%u d:%d", 1770 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 1771 service->localport, 1772 service->remoteport); 1773 break; 1774 case VCHIQ_MSG_DATA: 1775 vchiq_log_info(vchiq_core_log_level, 1776 "%d: prs DATA@%pK,%x (%d->%d)", 1777 state->id, header, size, remoteport, localport); 1778 1779 if ((service->remoteport == remoteport) && 1780 (service->srvstate == VCHIQ_SRVSTATE_OPEN)) { 1781 header->msgid = msgid | VCHIQ_MSGID_CLAIMED; 1782 claim_slot(state->rx_info); 1783 DEBUG_TRACE(PARSE_LINE); 1784 if (make_service_callback(service, 1785 VCHIQ_MESSAGE_AVAILABLE, header, 1786 NULL) == VCHIQ_RETRY) { 1787 DEBUG_TRACE(PARSE_LINE); 1788 goto bail_not_ready; 1789 } 1790 VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count); 1791 VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes, 1792 size); 1793 } else { 1794 VCHIQ_STATS_INC(state, error_count); 1795 } 1796 break; 1797 case VCHIQ_MSG_CONNECT: 1798 vchiq_log_info(vchiq_core_log_level, 1799 "%d: prs CONNECT@%pK", state->id, header); 1800 state->version_common = ((struct vchiq_slot_zero *) 1801 state->slot_data)->version; 1802 complete(&state->connect); 1803 break; 1804 case VCHIQ_MSG_BULK_RX: 1805 case VCHIQ_MSG_BULK_TX: 1806 /* 1807 * We should never receive a bulk request from the 1808 * other side since we're not setup to perform as the 1809 * master. 1810 */ 1811 WARN_ON(1); 1812 break; 1813 case VCHIQ_MSG_BULK_RX_DONE: 1814 case VCHIQ_MSG_BULK_TX_DONE: 1815 if ((service->remoteport == remoteport) && 1816 (service->srvstate != VCHIQ_SRVSTATE_FREE)) { 1817 struct vchiq_bulk_queue *queue; 1818 struct vchiq_bulk *bulk; 1819 1820 queue = (type == VCHIQ_MSG_BULK_RX_DONE) ? 1821 &service->bulk_rx : &service->bulk_tx; 1822 1823 DEBUG_TRACE(PARSE_LINE); 1824 if (mutex_lock_killable(&service->bulk_mutex)) { 1825 DEBUG_TRACE(PARSE_LINE); 1826 goto bail_not_ready; 1827 } 1828 if ((int)(queue->remote_insert - 1829 queue->local_insert) >= 0) { 1830 vchiq_log_error(vchiq_core_log_level, 1831 "%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)", 1832 state->id, msg_type_str(type), 1833 header, remoteport, localport, 1834 queue->remote_insert, 1835 queue->local_insert); 1836 mutex_unlock(&service->bulk_mutex); 1837 break; 1838 } 1839 if (queue->process != queue->remote_insert) { 1840 pr_err("%s: p %x != ri %x\n", 1841 __func__, 1842 queue->process, 1843 queue->remote_insert); 1844 mutex_unlock(&service->bulk_mutex); 1845 goto bail_not_ready; 1846 } 1847 1848 bulk = &queue->bulks[ 1849 BULK_INDEX(queue->remote_insert)]; 1850 bulk->actual = *(int *)header->data; 1851 queue->remote_insert++; 1852 1853 vchiq_log_info(vchiq_core_log_level, 1854 "%d: prs %s@%pK (%d->%d) %x@%pad", 1855 state->id, msg_type_str(type), 1856 header, remoteport, localport, 1857 bulk->actual, &bulk->data); 1858 1859 vchiq_log_trace(vchiq_core_log_level, 1860 "%d: prs:%d %cx li=%x ri=%x p=%x", 1861 state->id, localport, 1862 (type == VCHIQ_MSG_BULK_RX_DONE) ? 1863 'r' : 't', 1864 queue->local_insert, 1865 queue->remote_insert, queue->process); 1866 1867 DEBUG_TRACE(PARSE_LINE); 1868 WARN_ON(queue->process == queue->local_insert); 1869 vchiq_complete_bulk(bulk); 1870 queue->process++; 1871 mutex_unlock(&service->bulk_mutex); 1872 DEBUG_TRACE(PARSE_LINE); 1873 notify_bulks(service, queue, RETRY_POLL); 1874 DEBUG_TRACE(PARSE_LINE); 1875 } 1876 break; 1877 case VCHIQ_MSG_PADDING: 1878 vchiq_log_trace(vchiq_core_log_level, 1879 "%d: prs PADDING@%pK,%x", 1880 state->id, header, size); 1881 break; 1882 case VCHIQ_MSG_PAUSE: 1883 /* If initiated, signal the application thread */ 1884 vchiq_log_trace(vchiq_core_log_level, 1885 "%d: prs PAUSE@%pK,%x", 1886 state->id, header, size); 1887 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) { 1888 vchiq_log_error(vchiq_core_log_level, 1889 "%d: PAUSE received in state PAUSED", 1890 state->id); 1891 break; 1892 } 1893 if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) { 1894 /* Send a PAUSE in response */ 1895 if (queue_message(state, NULL, MAKE_PAUSE, 1896 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK) 1897 == VCHIQ_RETRY) 1898 goto bail_not_ready; 1899 } 1900 /* At this point slot_mutex is held */ 1901 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED); 1902 break; 1903 case VCHIQ_MSG_RESUME: 1904 vchiq_log_trace(vchiq_core_log_level, 1905 "%d: prs RESUME@%pK,%x", 1906 state->id, header, size); 1907 /* Release the slot mutex */ 1908 mutex_unlock(&state->slot_mutex); 1909 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED); 1910 break; 1911 1912 case VCHIQ_MSG_REMOTE_USE: 1913 vchiq_on_remote_use(state); 1914 break; 1915 case VCHIQ_MSG_REMOTE_RELEASE: 1916 vchiq_on_remote_release(state); 1917 break; 1918 case VCHIQ_MSG_REMOTE_USE_ACTIVE: 1919 break; 1920 1921 default: 1922 vchiq_log_error(vchiq_core_log_level, 1923 "%d: prs invalid msgid %x@%pK,%x", 1924 state->id, msgid, header, size); 1925 WARN(1, "invalid message\n"); 1926 break; 1927 } 1928 1929 skip_message: 1930 ret = size; 1931 1932 bail_not_ready: 1933 if (service) 1934 vchiq_service_put(service); 1935 1936 return ret; 1937 } 1938 1939 /* Called by the slot handler thread */ 1940 static void 1941 parse_rx_slots(struct vchiq_state *state) 1942 { 1943 struct vchiq_shared_state *remote = state->remote; 1944 int tx_pos; 1945 1946 DEBUG_INITIALISE(state->local) 1947 1948 tx_pos = remote->tx_pos; 1949 1950 while (state->rx_pos != tx_pos) { 1951 struct vchiq_header *header; 1952 int size; 1953 1954 DEBUG_TRACE(PARSE_LINE); 1955 if (!state->rx_data) { 1956 int rx_index; 1957 1958 WARN_ON(state->rx_pos & VCHIQ_SLOT_MASK); 1959 rx_index = remote->slot_queue[ 1960 SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)]; 1961 state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state, 1962 rx_index); 1963 state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index); 1964 1965 /* 1966 * Initialise use_count to one, and increment 1967 * release_count at the end of the slot to avoid 1968 * releasing the slot prematurely. 1969 */ 1970 state->rx_info->use_count = 1; 1971 state->rx_info->release_count = 0; 1972 } 1973 1974 header = (struct vchiq_header *)(state->rx_data + 1975 (state->rx_pos & VCHIQ_SLOT_MASK)); 1976 size = parse_message(state, header); 1977 if (size < 0) 1978 return; 1979 1980 state->rx_pos += calc_stride(size); 1981 1982 DEBUG_TRACE(PARSE_LINE); 1983 /* 1984 * Perform some housekeeping when the end of the slot is 1985 * reached. 1986 */ 1987 if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) { 1988 /* Remove the extra reference count. */ 1989 release_slot(state, state->rx_info, NULL, NULL); 1990 state->rx_data = NULL; 1991 } 1992 } 1993 } 1994 1995 /** 1996 * handle_poll() - handle service polling and other rare conditions 1997 * @state: vchiq state struct 1998 * 1999 * Context: Process context 2000 * 2001 * Return: 2002 * * 0 - poll handled successful 2003 * * -EAGAIN - retry later 2004 */ 2005 static int 2006 handle_poll(struct vchiq_state *state) 2007 { 2008 switch (state->conn_state) { 2009 case VCHIQ_CONNSTATE_CONNECTED: 2010 /* Poll the services as requested */ 2011 poll_services(state); 2012 break; 2013 2014 case VCHIQ_CONNSTATE_PAUSING: 2015 if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0, 2016 QMFLAGS_NO_MUTEX_UNLOCK) != VCHIQ_RETRY) { 2017 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSE_SENT); 2018 } else { 2019 /* Retry later */ 2020 return -EAGAIN; 2021 } 2022 break; 2023 2024 case VCHIQ_CONNSTATE_RESUMING: 2025 if (queue_message(state, NULL, MAKE_RESUME, NULL, NULL, 0, 2026 QMFLAGS_NO_MUTEX_LOCK) != VCHIQ_RETRY) { 2027 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED); 2028 } else { 2029 /* 2030 * This should really be impossible, 2031 * since the PAUSE should have flushed 2032 * through outstanding messages. 2033 */ 2034 vchiq_log_error(vchiq_core_log_level, 2035 "Failed to send RESUME message"); 2036 } 2037 break; 2038 default: 2039 break; 2040 } 2041 2042 return 0; 2043 } 2044 2045 /* Called by the slot handler thread */ 2046 static int 2047 slot_handler_func(void *v) 2048 { 2049 struct vchiq_state *state = v; 2050 struct vchiq_shared_state *local = state->local; 2051 2052 DEBUG_INITIALISE(local) 2053 2054 while (1) { 2055 DEBUG_COUNT(SLOT_HANDLER_COUNT); 2056 DEBUG_TRACE(SLOT_HANDLER_LINE); 2057 remote_event_wait(&state->trigger_event, &local->trigger); 2058 2059 rmb(); 2060 2061 DEBUG_TRACE(SLOT_HANDLER_LINE); 2062 if (state->poll_needed) { 2063 state->poll_needed = 0; 2064 2065 /* 2066 * Handle service polling and other rare conditions here 2067 * out of the mainline code 2068 */ 2069 if (handle_poll(state) == -EAGAIN) 2070 state->poll_needed = 1; 2071 } 2072 2073 DEBUG_TRACE(SLOT_HANDLER_LINE); 2074 parse_rx_slots(state); 2075 } 2076 return 0; 2077 } 2078 2079 /* Called by the recycle thread */ 2080 static int 2081 recycle_func(void *v) 2082 { 2083 struct vchiq_state *state = v; 2084 struct vchiq_shared_state *local = state->local; 2085 BITSET_T *found; 2086 size_t length; 2087 2088 length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES); 2089 2090 found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found), 2091 GFP_KERNEL); 2092 if (!found) 2093 return -ENOMEM; 2094 2095 while (1) { 2096 remote_event_wait(&state->recycle_event, &local->recycle); 2097 2098 process_free_queue(state, found, length); 2099 } 2100 return 0; 2101 } 2102 2103 /* Called by the sync thread */ 2104 static int 2105 sync_func(void *v) 2106 { 2107 struct vchiq_state *state = v; 2108 struct vchiq_shared_state *local = state->local; 2109 struct vchiq_header *header = 2110 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state, 2111 state->remote->slot_sync); 2112 2113 while (1) { 2114 struct vchiq_service *service; 2115 int msgid, size; 2116 int type; 2117 unsigned int localport, remoteport; 2118 2119 remote_event_wait(&state->sync_trigger_event, &local->sync_trigger); 2120 2121 rmb(); 2122 2123 msgid = header->msgid; 2124 size = header->size; 2125 type = VCHIQ_MSG_TYPE(msgid); 2126 localport = VCHIQ_MSG_DSTPORT(msgid); 2127 remoteport = VCHIQ_MSG_SRCPORT(msgid); 2128 2129 service = find_service_by_port(state, localport); 2130 2131 if (!service) { 2132 vchiq_log_error(vchiq_sync_log_level, 2133 "%d: sf %s@%pK (%d->%d) - invalid/closed service %d", 2134 state->id, msg_type_str(type), 2135 header, remoteport, localport, localport); 2136 release_message_sync(state, header); 2137 continue; 2138 } 2139 2140 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) { 2141 int svc_fourcc; 2142 2143 svc_fourcc = service 2144 ? service->base.fourcc 2145 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); 2146 vchiq_log_trace(vchiq_sync_log_level, 2147 "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d", 2148 msg_type_str(type), 2149 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), 2150 remoteport, localport, size); 2151 if (size > 0) 2152 vchiq_log_dump_mem("Rcvd", 0, header->data, 2153 min(16, size)); 2154 } 2155 2156 switch (type) { 2157 case VCHIQ_MSG_OPENACK: 2158 if (size >= sizeof(struct vchiq_openack_payload)) { 2159 const struct vchiq_openack_payload *payload = 2160 (struct vchiq_openack_payload *) 2161 header->data; 2162 service->peer_version = payload->version; 2163 } 2164 vchiq_log_info(vchiq_sync_log_level, 2165 "%d: sf OPENACK@%pK,%x (%d->%d) v:%d", 2166 state->id, header, size, remoteport, localport, 2167 service->peer_version); 2168 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) { 2169 service->remoteport = remoteport; 2170 vchiq_set_service_state(service, 2171 VCHIQ_SRVSTATE_OPENSYNC); 2172 service->sync = 1; 2173 complete(&service->remove_event); 2174 } 2175 release_message_sync(state, header); 2176 break; 2177 2178 case VCHIQ_MSG_DATA: 2179 vchiq_log_trace(vchiq_sync_log_level, 2180 "%d: sf DATA@%pK,%x (%d->%d)", 2181 state->id, header, size, remoteport, localport); 2182 2183 if ((service->remoteport == remoteport) && 2184 (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) { 2185 if (make_service_callback(service, 2186 VCHIQ_MESSAGE_AVAILABLE, header, 2187 NULL) == VCHIQ_RETRY) 2188 vchiq_log_error(vchiq_sync_log_level, 2189 "synchronous callback to service %d returns VCHIQ_RETRY", 2190 localport); 2191 } 2192 break; 2193 2194 default: 2195 vchiq_log_error(vchiq_sync_log_level, 2196 "%d: sf unexpected msgid %x@%pK,%x", 2197 state->id, msgid, header, size); 2198 release_message_sync(state, header); 2199 break; 2200 } 2201 2202 vchiq_service_put(service); 2203 } 2204 2205 return 0; 2206 } 2207 2208 static void 2209 init_bulk_queue(struct vchiq_bulk_queue *queue) 2210 { 2211 queue->local_insert = 0; 2212 queue->remote_insert = 0; 2213 queue->process = 0; 2214 queue->remote_notify = 0; 2215 queue->remove = 0; 2216 } 2217 2218 inline const char * 2219 get_conn_state_name(enum vchiq_connstate conn_state) 2220 { 2221 return conn_state_names[conn_state]; 2222 } 2223 2224 struct vchiq_slot_zero * 2225 vchiq_init_slots(void *mem_base, int mem_size) 2226 { 2227 int mem_align = 2228 (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK); 2229 struct vchiq_slot_zero *slot_zero = 2230 (struct vchiq_slot_zero *)(mem_base + mem_align); 2231 int num_slots = (mem_size - mem_align) / VCHIQ_SLOT_SIZE; 2232 int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS; 2233 2234 check_sizes(); 2235 2236 /* Ensure there is enough memory to run an absolutely minimum system */ 2237 num_slots -= first_data_slot; 2238 2239 if (num_slots < 4) { 2240 vchiq_log_error(vchiq_core_log_level, 2241 "%s - insufficient memory %x bytes", 2242 __func__, mem_size); 2243 return NULL; 2244 } 2245 2246 memset(slot_zero, 0, sizeof(struct vchiq_slot_zero)); 2247 2248 slot_zero->magic = VCHIQ_MAGIC; 2249 slot_zero->version = VCHIQ_VERSION; 2250 slot_zero->version_min = VCHIQ_VERSION_MIN; 2251 slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero); 2252 slot_zero->slot_size = VCHIQ_SLOT_SIZE; 2253 slot_zero->max_slots = VCHIQ_MAX_SLOTS; 2254 slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE; 2255 2256 slot_zero->master.slot_sync = first_data_slot; 2257 slot_zero->master.slot_first = first_data_slot + 1; 2258 slot_zero->master.slot_last = first_data_slot + (num_slots / 2) - 1; 2259 slot_zero->slave.slot_sync = first_data_slot + (num_slots / 2); 2260 slot_zero->slave.slot_first = first_data_slot + (num_slots / 2) + 1; 2261 slot_zero->slave.slot_last = first_data_slot + num_slots - 1; 2262 2263 return slot_zero; 2264 } 2265 2266 int 2267 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero) 2268 { 2269 struct vchiq_shared_state *local; 2270 struct vchiq_shared_state *remote; 2271 char threadname[16]; 2272 int i, ret; 2273 2274 if (vchiq_states[0]) { 2275 pr_err("%s: VCHIQ state already initialized\n", __func__); 2276 return -EINVAL; 2277 } 2278 2279 local = &slot_zero->slave; 2280 remote = &slot_zero->master; 2281 2282 if (local->initialised) { 2283 vchiq_loud_error_header(); 2284 if (remote->initialised) 2285 vchiq_loud_error("local state has already been initialised"); 2286 else 2287 vchiq_loud_error("master/slave mismatch two slaves"); 2288 vchiq_loud_error_footer(); 2289 return -EINVAL; 2290 } 2291 2292 memset(state, 0, sizeof(struct vchiq_state)); 2293 2294 /* 2295 * initialize shared state pointers 2296 */ 2297 2298 state->local = local; 2299 state->remote = remote; 2300 state->slot_data = (struct vchiq_slot *)slot_zero; 2301 2302 /* 2303 * initialize events and mutexes 2304 */ 2305 2306 init_completion(&state->connect); 2307 mutex_init(&state->mutex); 2308 mutex_init(&state->slot_mutex); 2309 mutex_init(&state->recycle_mutex); 2310 mutex_init(&state->sync_mutex); 2311 mutex_init(&state->bulk_transfer_mutex); 2312 2313 init_completion(&state->slot_available_event); 2314 init_completion(&state->slot_remove_event); 2315 init_completion(&state->data_quota_event); 2316 2317 state->slot_queue_available = 0; 2318 2319 for (i = 0; i < VCHIQ_MAX_SERVICES; i++) { 2320 struct vchiq_service_quota *quota = &state->service_quotas[i]; 2321 init_completion("a->quota_event); 2322 } 2323 2324 for (i = local->slot_first; i <= local->slot_last; i++) { 2325 local->slot_queue[state->slot_queue_available] = i; 2326 state->slot_queue_available++; 2327 complete(&state->slot_available_event); 2328 } 2329 2330 state->default_slot_quota = state->slot_queue_available / 2; 2331 state->default_message_quota = 2332 min((unsigned short)(state->default_slot_quota * 256), 2333 (unsigned short)~0); 2334 2335 state->previous_data_index = -1; 2336 state->data_use_count = 0; 2337 state->data_quota = state->slot_queue_available - 1; 2338 2339 remote_event_create(&state->trigger_event, &local->trigger); 2340 local->tx_pos = 0; 2341 remote_event_create(&state->recycle_event, &local->recycle); 2342 local->slot_queue_recycle = state->slot_queue_available; 2343 remote_event_create(&state->sync_trigger_event, &local->sync_trigger); 2344 remote_event_create(&state->sync_release_event, &local->sync_release); 2345 2346 /* At start-of-day, the slot is empty and available */ 2347 ((struct vchiq_header *) 2348 SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid = 2349 VCHIQ_MSGID_PADDING; 2350 remote_event_signal_local(&state->sync_release_event, &local->sync_release); 2351 2352 local->debug[DEBUG_ENTRIES] = DEBUG_MAX; 2353 2354 ret = vchiq_platform_init_state(state); 2355 if (ret) 2356 return ret; 2357 2358 /* 2359 * bring up slot handler thread 2360 */ 2361 snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id); 2362 state->slot_handler_thread = kthread_create(&slot_handler_func, 2363 (void *)state, 2364 threadname); 2365 2366 if (IS_ERR(state->slot_handler_thread)) { 2367 vchiq_loud_error_header(); 2368 vchiq_loud_error("couldn't create thread %s", threadname); 2369 vchiq_loud_error_footer(); 2370 return PTR_ERR(state->slot_handler_thread); 2371 } 2372 set_user_nice(state->slot_handler_thread, -19); 2373 2374 snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id); 2375 state->recycle_thread = kthread_create(&recycle_func, 2376 (void *)state, 2377 threadname); 2378 if (IS_ERR(state->recycle_thread)) { 2379 vchiq_loud_error_header(); 2380 vchiq_loud_error("couldn't create thread %s", threadname); 2381 vchiq_loud_error_footer(); 2382 ret = PTR_ERR(state->recycle_thread); 2383 goto fail_free_handler_thread; 2384 } 2385 set_user_nice(state->recycle_thread, -19); 2386 2387 snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id); 2388 state->sync_thread = kthread_create(&sync_func, 2389 (void *)state, 2390 threadname); 2391 if (IS_ERR(state->sync_thread)) { 2392 vchiq_loud_error_header(); 2393 vchiq_loud_error("couldn't create thread %s", threadname); 2394 vchiq_loud_error_footer(); 2395 ret = PTR_ERR(state->sync_thread); 2396 goto fail_free_recycle_thread; 2397 } 2398 set_user_nice(state->sync_thread, -20); 2399 2400 wake_up_process(state->slot_handler_thread); 2401 wake_up_process(state->recycle_thread); 2402 wake_up_process(state->sync_thread); 2403 2404 vchiq_states[0] = state; 2405 2406 /* Indicate readiness to the other side */ 2407 local->initialised = 1; 2408 2409 return 0; 2410 2411 fail_free_recycle_thread: 2412 kthread_stop(state->recycle_thread); 2413 fail_free_handler_thread: 2414 kthread_stop(state->slot_handler_thread); 2415 2416 return ret; 2417 } 2418 2419 void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header) 2420 { 2421 struct vchiq_service *service = find_service_by_handle(handle); 2422 int pos; 2423 2424 while (service->msg_queue_write == service->msg_queue_read + 2425 VCHIQ_MAX_SLOTS) { 2426 if (wait_for_completion_interruptible(&service->msg_queue_pop)) 2427 flush_signals(current); 2428 } 2429 2430 pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1); 2431 service->msg_queue_write++; 2432 service->msg_queue[pos] = header; 2433 2434 complete(&service->msg_queue_push); 2435 } 2436 EXPORT_SYMBOL(vchiq_msg_queue_push); 2437 2438 struct vchiq_header *vchiq_msg_hold(unsigned int handle) 2439 { 2440 struct vchiq_service *service = find_service_by_handle(handle); 2441 struct vchiq_header *header; 2442 int pos; 2443 2444 if (service->msg_queue_write == service->msg_queue_read) 2445 return NULL; 2446 2447 while (service->msg_queue_write == service->msg_queue_read) { 2448 if (wait_for_completion_interruptible(&service->msg_queue_push)) 2449 flush_signals(current); 2450 } 2451 2452 pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1); 2453 service->msg_queue_read++; 2454 header = service->msg_queue[pos]; 2455 2456 complete(&service->msg_queue_pop); 2457 2458 return header; 2459 } 2460 EXPORT_SYMBOL(vchiq_msg_hold); 2461 2462 static int vchiq_validate_params(const struct vchiq_service_params_kernel *params) 2463 { 2464 if (!params->callback || !params->fourcc) { 2465 vchiq_loud_error("Can't add service, invalid params\n"); 2466 return -EINVAL; 2467 } 2468 2469 return 0; 2470 } 2471 2472 /* Called from application thread when a client or server service is created. */ 2473 struct vchiq_service * 2474 vchiq_add_service_internal(struct vchiq_state *state, 2475 const struct vchiq_service_params_kernel *params, 2476 int srvstate, struct vchiq_instance *instance, 2477 vchiq_userdata_term userdata_term) 2478 { 2479 struct vchiq_service *service; 2480 struct vchiq_service __rcu **pservice = NULL; 2481 struct vchiq_service_quota *quota; 2482 int ret; 2483 int i; 2484 2485 ret = vchiq_validate_params(params); 2486 if (ret) 2487 return NULL; 2488 2489 service = kmalloc(sizeof(*service), GFP_KERNEL); 2490 if (!service) 2491 return service; 2492 2493 service->base.fourcc = params->fourcc; 2494 service->base.callback = params->callback; 2495 service->base.userdata = params->userdata; 2496 service->handle = VCHIQ_SERVICE_HANDLE_INVALID; 2497 kref_init(&service->ref_count); 2498 service->srvstate = VCHIQ_SRVSTATE_FREE; 2499 service->userdata_term = userdata_term; 2500 service->localport = VCHIQ_PORT_FREE; 2501 service->remoteport = VCHIQ_PORT_FREE; 2502 2503 service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ? 2504 VCHIQ_FOURCC_INVALID : params->fourcc; 2505 service->client_id = 0; 2506 service->auto_close = 1; 2507 service->sync = 0; 2508 service->closing = 0; 2509 service->trace = 0; 2510 atomic_set(&service->poll_flags, 0); 2511 service->version = params->version; 2512 service->version_min = params->version_min; 2513 service->state = state; 2514 service->instance = instance; 2515 service->service_use_count = 0; 2516 service->msg_queue_read = 0; 2517 service->msg_queue_write = 0; 2518 init_bulk_queue(&service->bulk_tx); 2519 init_bulk_queue(&service->bulk_rx); 2520 init_completion(&service->remove_event); 2521 init_completion(&service->bulk_remove_event); 2522 init_completion(&service->msg_queue_pop); 2523 init_completion(&service->msg_queue_push); 2524 mutex_init(&service->bulk_mutex); 2525 memset(&service->stats, 0, sizeof(service->stats)); 2526 memset(&service->msg_queue, 0, sizeof(service->msg_queue)); 2527 2528 /* 2529 * Although it is perfectly possible to use a spinlock 2530 * to protect the creation of services, it is overkill as it 2531 * disables interrupts while the array is searched. 2532 * The only danger is of another thread trying to create a 2533 * service - service deletion is safe. 2534 * Therefore it is preferable to use state->mutex which, 2535 * although slower to claim, doesn't block interrupts while 2536 * it is held. 2537 */ 2538 2539 mutex_lock(&state->mutex); 2540 2541 /* Prepare to use a previously unused service */ 2542 if (state->unused_service < VCHIQ_MAX_SERVICES) 2543 pservice = &state->services[state->unused_service]; 2544 2545 if (srvstate == VCHIQ_SRVSTATE_OPENING) { 2546 for (i = 0; i < state->unused_service; i++) { 2547 if (!rcu_access_pointer(state->services[i])) { 2548 pservice = &state->services[i]; 2549 break; 2550 } 2551 } 2552 } else { 2553 rcu_read_lock(); 2554 for (i = (state->unused_service - 1); i >= 0; i--) { 2555 struct vchiq_service *srv; 2556 2557 srv = rcu_dereference(state->services[i]); 2558 if (!srv) { 2559 pservice = &state->services[i]; 2560 } else if ((srv->public_fourcc == params->fourcc) && 2561 ((srv->instance != instance) || 2562 (srv->base.callback != params->callback))) { 2563 /* 2564 * There is another server using this 2565 * fourcc which doesn't match. 2566 */ 2567 pservice = NULL; 2568 break; 2569 } 2570 } 2571 rcu_read_unlock(); 2572 } 2573 2574 if (pservice) { 2575 service->localport = (pservice - state->services); 2576 if (!handle_seq) 2577 handle_seq = VCHIQ_MAX_STATES * 2578 VCHIQ_MAX_SERVICES; 2579 service->handle = handle_seq | 2580 (state->id * VCHIQ_MAX_SERVICES) | 2581 service->localport; 2582 handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES; 2583 rcu_assign_pointer(*pservice, service); 2584 if (pservice == &state->services[state->unused_service]) 2585 state->unused_service++; 2586 } 2587 2588 mutex_unlock(&state->mutex); 2589 2590 if (!pservice) { 2591 kfree(service); 2592 return NULL; 2593 } 2594 2595 quota = &state->service_quotas[service->localport]; 2596 quota->slot_quota = state->default_slot_quota; 2597 quota->message_quota = state->default_message_quota; 2598 if (quota->slot_use_count == 0) 2599 quota->previous_tx_index = 2600 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos) 2601 - 1; 2602 2603 /* Bring this service online */ 2604 vchiq_set_service_state(service, srvstate); 2605 2606 vchiq_log_info(vchiq_core_msg_log_level, 2607 "%s Service %c%c%c%c SrcPort:%d", 2608 (srvstate == VCHIQ_SRVSTATE_OPENING) 2609 ? "Open" : "Add", 2610 VCHIQ_FOURCC_AS_4CHARS(params->fourcc), 2611 service->localport); 2612 2613 /* Don't unlock the service - leave it with a ref_count of 1. */ 2614 2615 return service; 2616 } 2617 2618 enum vchiq_status 2619 vchiq_open_service_internal(struct vchiq_service *service, int client_id) 2620 { 2621 struct vchiq_open_payload payload = { 2622 service->base.fourcc, 2623 client_id, 2624 service->version, 2625 service->version_min 2626 }; 2627 enum vchiq_status status = VCHIQ_SUCCESS; 2628 2629 service->client_id = client_id; 2630 vchiq_use_service_internal(service); 2631 status = queue_message(service->state, 2632 NULL, MAKE_OPEN(service->localport), 2633 memcpy_copy_callback, 2634 &payload, 2635 sizeof(payload), 2636 QMFLAGS_IS_BLOCKING); 2637 2638 if (status != VCHIQ_SUCCESS) 2639 return status; 2640 2641 /* Wait for the ACK/NAK */ 2642 if (wait_for_completion_interruptible(&service->remove_event)) { 2643 status = VCHIQ_RETRY; 2644 vchiq_release_service_internal(service); 2645 } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) && 2646 (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) { 2647 if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) 2648 vchiq_log_error(vchiq_core_log_level, 2649 "%d: osi - srvstate = %s (ref %u)", 2650 service->state->id, 2651 srvstate_names[service->srvstate], 2652 kref_read(&service->ref_count)); 2653 status = VCHIQ_ERROR; 2654 VCHIQ_SERVICE_STATS_INC(service, error_count); 2655 vchiq_release_service_internal(service); 2656 } 2657 2658 return status; 2659 } 2660 2661 static void 2662 release_service_messages(struct vchiq_service *service) 2663 { 2664 struct vchiq_state *state = service->state; 2665 int slot_last = state->remote->slot_last; 2666 int i; 2667 2668 /* Release any claimed messages aimed at this service */ 2669 2670 if (service->sync) { 2671 struct vchiq_header *header = 2672 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state, 2673 state->remote->slot_sync); 2674 if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport) 2675 release_message_sync(state, header); 2676 2677 return; 2678 } 2679 2680 for (i = state->remote->slot_first; i <= slot_last; i++) { 2681 struct vchiq_slot_info *slot_info = 2682 SLOT_INFO_FROM_INDEX(state, i); 2683 unsigned int pos, end; 2684 char *data; 2685 2686 if (slot_info->release_count == slot_info->use_count) 2687 continue; 2688 2689 data = (char *)SLOT_DATA_FROM_INDEX(state, i); 2690 end = VCHIQ_SLOT_SIZE; 2691 if (data == state->rx_data) 2692 /* 2693 * This buffer is still being read from - stop 2694 * at the current read position 2695 */ 2696 end = state->rx_pos & VCHIQ_SLOT_MASK; 2697 2698 pos = 0; 2699 2700 while (pos < end) { 2701 struct vchiq_header *header = 2702 (struct vchiq_header *)(data + pos); 2703 int msgid = header->msgid; 2704 int port = VCHIQ_MSG_DSTPORT(msgid); 2705 2706 if ((port == service->localport) && 2707 (msgid & VCHIQ_MSGID_CLAIMED)) { 2708 vchiq_log_info(vchiq_core_log_level, 2709 " fsi - hdr %pK", header); 2710 release_slot(state, slot_info, header, 2711 NULL); 2712 } 2713 pos += calc_stride(header->size); 2714 if (pos > VCHIQ_SLOT_SIZE) { 2715 vchiq_log_error(vchiq_core_log_level, 2716 "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x", 2717 pos, header, msgid, 2718 header->msgid, header->size); 2719 WARN(1, "invalid slot position\n"); 2720 } 2721 } 2722 } 2723 } 2724 2725 static int 2726 do_abort_bulks(struct vchiq_service *service) 2727 { 2728 enum vchiq_status status; 2729 2730 /* Abort any outstanding bulk transfers */ 2731 if (mutex_lock_killable(&service->bulk_mutex)) 2732 return 0; 2733 abort_outstanding_bulks(service, &service->bulk_tx); 2734 abort_outstanding_bulks(service, &service->bulk_rx); 2735 mutex_unlock(&service->bulk_mutex); 2736 2737 status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL); 2738 if (status != VCHIQ_SUCCESS) 2739 return 0; 2740 2741 status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL); 2742 return (status == VCHIQ_SUCCESS); 2743 } 2744 2745 static enum vchiq_status 2746 close_service_complete(struct vchiq_service *service, int failstate) 2747 { 2748 enum vchiq_status status; 2749 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID); 2750 int newstate; 2751 2752 switch (service->srvstate) { 2753 case VCHIQ_SRVSTATE_OPEN: 2754 case VCHIQ_SRVSTATE_CLOSESENT: 2755 case VCHIQ_SRVSTATE_CLOSERECVD: 2756 if (is_server) { 2757 if (service->auto_close) { 2758 service->client_id = 0; 2759 service->remoteport = VCHIQ_PORT_FREE; 2760 newstate = VCHIQ_SRVSTATE_LISTENING; 2761 } else { 2762 newstate = VCHIQ_SRVSTATE_CLOSEWAIT; 2763 } 2764 } else { 2765 newstate = VCHIQ_SRVSTATE_CLOSED; 2766 } 2767 vchiq_set_service_state(service, newstate); 2768 break; 2769 case VCHIQ_SRVSTATE_LISTENING: 2770 break; 2771 default: 2772 vchiq_log_error(vchiq_core_log_level, 2773 "%s(%x) called in state %s", __func__, 2774 service->handle, srvstate_names[service->srvstate]); 2775 WARN(1, "%s in unexpected state\n", __func__); 2776 return VCHIQ_ERROR; 2777 } 2778 2779 status = make_service_callback(service, 2780 VCHIQ_SERVICE_CLOSED, NULL, NULL); 2781 2782 if (status != VCHIQ_RETRY) { 2783 int uc = service->service_use_count; 2784 int i; 2785 /* Complete the close process */ 2786 for (i = 0; i < uc; i++) 2787 /* 2788 * cater for cases where close is forced and the 2789 * client may not close all it's handles 2790 */ 2791 vchiq_release_service_internal(service); 2792 2793 service->client_id = 0; 2794 service->remoteport = VCHIQ_PORT_FREE; 2795 2796 if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) { 2797 vchiq_free_service_internal(service); 2798 } else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) { 2799 if (is_server) 2800 service->closing = 0; 2801 2802 complete(&service->remove_event); 2803 } 2804 } else { 2805 vchiq_set_service_state(service, failstate); 2806 } 2807 2808 return status; 2809 } 2810 2811 /* Called by the slot handler */ 2812 enum vchiq_status 2813 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd) 2814 { 2815 struct vchiq_state *state = service->state; 2816 enum vchiq_status status = VCHIQ_SUCCESS; 2817 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID); 2818 int close_id = MAKE_CLOSE(service->localport, 2819 VCHIQ_MSG_DSTPORT(service->remoteport)); 2820 2821 vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)", 2822 service->state->id, service->localport, close_recvd, 2823 srvstate_names[service->srvstate]); 2824 2825 switch (service->srvstate) { 2826 case VCHIQ_SRVSTATE_CLOSED: 2827 case VCHIQ_SRVSTATE_HIDDEN: 2828 case VCHIQ_SRVSTATE_LISTENING: 2829 case VCHIQ_SRVSTATE_CLOSEWAIT: 2830 if (close_recvd) { 2831 vchiq_log_error(vchiq_core_log_level, 2832 "%s(1) called in state %s", 2833 __func__, srvstate_names[service->srvstate]); 2834 } else if (is_server) { 2835 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) { 2836 status = VCHIQ_ERROR; 2837 } else { 2838 service->client_id = 0; 2839 service->remoteport = VCHIQ_PORT_FREE; 2840 if (service->srvstate == 2841 VCHIQ_SRVSTATE_CLOSEWAIT) 2842 vchiq_set_service_state(service, 2843 VCHIQ_SRVSTATE_LISTENING); 2844 } 2845 complete(&service->remove_event); 2846 } else { 2847 vchiq_free_service_internal(service); 2848 } 2849 break; 2850 case VCHIQ_SRVSTATE_OPENING: 2851 if (close_recvd) { 2852 /* The open was rejected - tell the user */ 2853 vchiq_set_service_state(service, 2854 VCHIQ_SRVSTATE_CLOSEWAIT); 2855 complete(&service->remove_event); 2856 } else { 2857 /* Shutdown mid-open - let the other side know */ 2858 status = queue_message(state, service, close_id, 2859 NULL, NULL, 0, 0); 2860 } 2861 break; 2862 2863 case VCHIQ_SRVSTATE_OPENSYNC: 2864 mutex_lock(&state->sync_mutex); 2865 fallthrough; 2866 case VCHIQ_SRVSTATE_OPEN: 2867 if (close_recvd) { 2868 if (!do_abort_bulks(service)) 2869 status = VCHIQ_RETRY; 2870 } 2871 2872 release_service_messages(service); 2873 2874 if (status == VCHIQ_SUCCESS) 2875 status = queue_message(state, service, close_id, 2876 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK); 2877 2878 if (status != VCHIQ_SUCCESS) { 2879 if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) 2880 mutex_unlock(&state->sync_mutex); 2881 break; 2882 } 2883 2884 if (!close_recvd) { 2885 /* Change the state while the mutex is still held */ 2886 vchiq_set_service_state(service, 2887 VCHIQ_SRVSTATE_CLOSESENT); 2888 mutex_unlock(&state->slot_mutex); 2889 if (service->sync) 2890 mutex_unlock(&state->sync_mutex); 2891 break; 2892 } 2893 2894 /* Change the state while the mutex is still held */ 2895 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD); 2896 mutex_unlock(&state->slot_mutex); 2897 if (service->sync) 2898 mutex_unlock(&state->sync_mutex); 2899 2900 status = close_service_complete(service, 2901 VCHIQ_SRVSTATE_CLOSERECVD); 2902 break; 2903 2904 case VCHIQ_SRVSTATE_CLOSESENT: 2905 if (!close_recvd) 2906 /* This happens when a process is killed mid-close */ 2907 break; 2908 2909 if (!do_abort_bulks(service)) { 2910 status = VCHIQ_RETRY; 2911 break; 2912 } 2913 2914 if (status == VCHIQ_SUCCESS) 2915 status = close_service_complete(service, 2916 VCHIQ_SRVSTATE_CLOSERECVD); 2917 break; 2918 2919 case VCHIQ_SRVSTATE_CLOSERECVD: 2920 if (!close_recvd && is_server) 2921 /* Force into LISTENING mode */ 2922 vchiq_set_service_state(service, 2923 VCHIQ_SRVSTATE_LISTENING); 2924 status = close_service_complete(service, 2925 VCHIQ_SRVSTATE_CLOSERECVD); 2926 break; 2927 2928 default: 2929 vchiq_log_error(vchiq_core_log_level, 2930 "%s(%d) called in state %s", __func__, 2931 close_recvd, srvstate_names[service->srvstate]); 2932 break; 2933 } 2934 2935 return status; 2936 } 2937 2938 /* Called from the application process upon process death */ 2939 void 2940 vchiq_terminate_service_internal(struct vchiq_service *service) 2941 { 2942 struct vchiq_state *state = service->state; 2943 2944 vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)", 2945 state->id, service->localport, service->remoteport); 2946 2947 mark_service_closing(service); 2948 2949 /* Mark the service for removal by the slot handler */ 2950 request_poll(state, service, VCHIQ_POLL_REMOVE); 2951 } 2952 2953 /* Called from the slot handler */ 2954 void 2955 vchiq_free_service_internal(struct vchiq_service *service) 2956 { 2957 struct vchiq_state *state = service->state; 2958 2959 vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)", 2960 state->id, service->localport); 2961 2962 switch (service->srvstate) { 2963 case VCHIQ_SRVSTATE_OPENING: 2964 case VCHIQ_SRVSTATE_CLOSED: 2965 case VCHIQ_SRVSTATE_HIDDEN: 2966 case VCHIQ_SRVSTATE_LISTENING: 2967 case VCHIQ_SRVSTATE_CLOSEWAIT: 2968 break; 2969 default: 2970 vchiq_log_error(vchiq_core_log_level, 2971 "%d: fsi - (%d) in state %s", 2972 state->id, service->localport, 2973 srvstate_names[service->srvstate]); 2974 return; 2975 } 2976 2977 vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE); 2978 2979 complete(&service->remove_event); 2980 2981 /* Release the initial lock */ 2982 vchiq_service_put(service); 2983 } 2984 2985 enum vchiq_status 2986 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance) 2987 { 2988 struct vchiq_service *service; 2989 int i; 2990 2991 /* Find all services registered to this client and enable them. */ 2992 i = 0; 2993 while ((service = next_service_by_instance(state, instance, 2994 &i)) != NULL) { 2995 if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN) 2996 vchiq_set_service_state(service, 2997 VCHIQ_SRVSTATE_LISTENING); 2998 vchiq_service_put(service); 2999 } 3000 3001 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) { 3002 if (queue_message(state, NULL, MAKE_CONNECT, NULL, NULL, 3003 0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY) 3004 return VCHIQ_RETRY; 3005 3006 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING); 3007 } 3008 3009 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) { 3010 if (wait_for_completion_interruptible(&state->connect)) 3011 return VCHIQ_RETRY; 3012 3013 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED); 3014 complete(&state->connect); 3015 } 3016 3017 return VCHIQ_SUCCESS; 3018 } 3019 3020 void 3021 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance) 3022 { 3023 struct vchiq_service *service; 3024 int i; 3025 3026 /* Find all services registered to this client and remove them. */ 3027 i = 0; 3028 while ((service = next_service_by_instance(state, instance, 3029 &i)) != NULL) { 3030 (void)vchiq_remove_service(service->handle); 3031 vchiq_service_put(service); 3032 } 3033 } 3034 3035 enum vchiq_status 3036 vchiq_close_service(unsigned int handle) 3037 { 3038 /* Unregister the service */ 3039 struct vchiq_service *service = find_service_by_handle(handle); 3040 enum vchiq_status status = VCHIQ_SUCCESS; 3041 3042 if (!service) 3043 return VCHIQ_ERROR; 3044 3045 vchiq_log_info(vchiq_core_log_level, 3046 "%d: close_service:%d", 3047 service->state->id, service->localport); 3048 3049 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) || 3050 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) || 3051 (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) { 3052 vchiq_service_put(service); 3053 return VCHIQ_ERROR; 3054 } 3055 3056 mark_service_closing(service); 3057 3058 if (current == service->state->slot_handler_thread) { 3059 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD); 3060 WARN_ON(status == VCHIQ_RETRY); 3061 } else { 3062 /* Mark the service for termination by the slot handler */ 3063 request_poll(service->state, service, VCHIQ_POLL_TERMINATE); 3064 } 3065 3066 while (1) { 3067 if (wait_for_completion_interruptible(&service->remove_event)) { 3068 status = VCHIQ_RETRY; 3069 break; 3070 } 3071 3072 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) || 3073 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) || 3074 (service->srvstate == VCHIQ_SRVSTATE_OPEN)) 3075 break; 3076 3077 vchiq_log_warning(vchiq_core_log_level, 3078 "%d: close_service:%d - waiting in state %s", 3079 service->state->id, service->localport, 3080 srvstate_names[service->srvstate]); 3081 } 3082 3083 if ((status == VCHIQ_SUCCESS) && 3084 (service->srvstate != VCHIQ_SRVSTATE_FREE) && 3085 (service->srvstate != VCHIQ_SRVSTATE_LISTENING)) 3086 status = VCHIQ_ERROR; 3087 3088 vchiq_service_put(service); 3089 3090 return status; 3091 } 3092 EXPORT_SYMBOL(vchiq_close_service); 3093 3094 enum vchiq_status 3095 vchiq_remove_service(unsigned int handle) 3096 { 3097 /* Unregister the service */ 3098 struct vchiq_service *service = find_service_by_handle(handle); 3099 enum vchiq_status status = VCHIQ_SUCCESS; 3100 3101 if (!service) 3102 return VCHIQ_ERROR; 3103 3104 vchiq_log_info(vchiq_core_log_level, 3105 "%d: remove_service:%d", 3106 service->state->id, service->localport); 3107 3108 if (service->srvstate == VCHIQ_SRVSTATE_FREE) { 3109 vchiq_service_put(service); 3110 return VCHIQ_ERROR; 3111 } 3112 3113 mark_service_closing(service); 3114 3115 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) || 3116 (current == service->state->slot_handler_thread)) { 3117 /* 3118 * Make it look like a client, because it must be removed and 3119 * not left in the LISTENING state. 3120 */ 3121 service->public_fourcc = VCHIQ_FOURCC_INVALID; 3122 3123 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD); 3124 WARN_ON(status == VCHIQ_RETRY); 3125 } else { 3126 /* Mark the service for removal by the slot handler */ 3127 request_poll(service->state, service, VCHIQ_POLL_REMOVE); 3128 } 3129 while (1) { 3130 if (wait_for_completion_interruptible(&service->remove_event)) { 3131 status = VCHIQ_RETRY; 3132 break; 3133 } 3134 3135 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) || 3136 (service->srvstate == VCHIQ_SRVSTATE_OPEN)) 3137 break; 3138 3139 vchiq_log_warning(vchiq_core_log_level, 3140 "%d: remove_service:%d - waiting in state %s", 3141 service->state->id, service->localport, 3142 srvstate_names[service->srvstate]); 3143 } 3144 3145 if ((status == VCHIQ_SUCCESS) && 3146 (service->srvstate != VCHIQ_SRVSTATE_FREE)) 3147 status = VCHIQ_ERROR; 3148 3149 vchiq_service_put(service); 3150 3151 return status; 3152 } 3153 3154 /* 3155 * This function may be called by kernel threads or user threads. 3156 * User threads may receive VCHIQ_RETRY to indicate that a signal has been 3157 * received and the call should be retried after being returned to user 3158 * context. 3159 * When called in blocking mode, the userdata field points to a bulk_waiter 3160 * structure. 3161 */ 3162 enum vchiq_status vchiq_bulk_transfer(unsigned int handle, 3163 void *offset, void __user *uoffset, 3164 int size, void *userdata, 3165 enum vchiq_bulk_mode mode, 3166 enum vchiq_bulk_dir dir) 3167 { 3168 struct vchiq_service *service = find_service_by_handle(handle); 3169 struct vchiq_bulk_queue *queue; 3170 struct vchiq_bulk *bulk; 3171 struct vchiq_state *state; 3172 struct bulk_waiter *bulk_waiter = NULL; 3173 const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r'; 3174 const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ? 3175 VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX; 3176 enum vchiq_status status = VCHIQ_ERROR; 3177 int payload[2]; 3178 3179 if (!service) 3180 goto error_exit; 3181 3182 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) 3183 goto error_exit; 3184 3185 if (!offset && !uoffset) 3186 goto error_exit; 3187 3188 if (vchiq_check_service(service) != VCHIQ_SUCCESS) 3189 goto error_exit; 3190 3191 switch (mode) { 3192 case VCHIQ_BULK_MODE_NOCALLBACK: 3193 case VCHIQ_BULK_MODE_CALLBACK: 3194 break; 3195 case VCHIQ_BULK_MODE_BLOCKING: 3196 bulk_waiter = userdata; 3197 init_completion(&bulk_waiter->event); 3198 bulk_waiter->actual = 0; 3199 bulk_waiter->bulk = NULL; 3200 break; 3201 case VCHIQ_BULK_MODE_WAITING: 3202 bulk_waiter = userdata; 3203 bulk = bulk_waiter->bulk; 3204 goto waiting; 3205 default: 3206 goto error_exit; 3207 } 3208 3209 state = service->state; 3210 3211 queue = (dir == VCHIQ_BULK_TRANSMIT) ? 3212 &service->bulk_tx : &service->bulk_rx; 3213 3214 if (mutex_lock_killable(&service->bulk_mutex)) { 3215 status = VCHIQ_RETRY; 3216 goto error_exit; 3217 } 3218 3219 if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) { 3220 VCHIQ_SERVICE_STATS_INC(service, bulk_stalls); 3221 do { 3222 mutex_unlock(&service->bulk_mutex); 3223 if (wait_for_completion_interruptible( 3224 &service->bulk_remove_event)) { 3225 status = VCHIQ_RETRY; 3226 goto error_exit; 3227 } 3228 if (mutex_lock_killable(&service->bulk_mutex)) { 3229 status = VCHIQ_RETRY; 3230 goto error_exit; 3231 } 3232 } while (queue->local_insert == queue->remove + 3233 VCHIQ_NUM_SERVICE_BULKS); 3234 } 3235 3236 bulk = &queue->bulks[BULK_INDEX(queue->local_insert)]; 3237 3238 bulk->mode = mode; 3239 bulk->dir = dir; 3240 bulk->userdata = userdata; 3241 bulk->size = size; 3242 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED; 3243 3244 if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir)) 3245 goto unlock_error_exit; 3246 3247 wmb(); 3248 3249 vchiq_log_info(vchiq_core_log_level, 3250 "%d: bt (%d->%d) %cx %x@%pad %pK", 3251 state->id, service->localport, service->remoteport, dir_char, 3252 size, &bulk->data, userdata); 3253 3254 /* 3255 * The slot mutex must be held when the service is being closed, so 3256 * claim it here to ensure that isn't happening 3257 */ 3258 if (mutex_lock_killable(&state->slot_mutex)) { 3259 status = VCHIQ_RETRY; 3260 goto cancel_bulk_error_exit; 3261 } 3262 3263 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) 3264 goto unlock_both_error_exit; 3265 3266 payload[0] = lower_32_bits(bulk->data); 3267 payload[1] = bulk->size; 3268 status = queue_message(state, 3269 NULL, 3270 VCHIQ_MAKE_MSG(dir_msgtype, 3271 service->localport, 3272 service->remoteport), 3273 memcpy_copy_callback, 3274 &payload, 3275 sizeof(payload), 3276 QMFLAGS_IS_BLOCKING | 3277 QMFLAGS_NO_MUTEX_LOCK | 3278 QMFLAGS_NO_MUTEX_UNLOCK); 3279 if (status != VCHIQ_SUCCESS) 3280 goto unlock_both_error_exit; 3281 3282 queue->local_insert++; 3283 3284 mutex_unlock(&state->slot_mutex); 3285 mutex_unlock(&service->bulk_mutex); 3286 3287 vchiq_log_trace(vchiq_core_log_level, 3288 "%d: bt:%d %cx li=%x ri=%x p=%x", 3289 state->id, 3290 service->localport, dir_char, 3291 queue->local_insert, queue->remote_insert, queue->process); 3292 3293 waiting: 3294 vchiq_service_put(service); 3295 3296 status = VCHIQ_SUCCESS; 3297 3298 if (bulk_waiter) { 3299 bulk_waiter->bulk = bulk; 3300 if (wait_for_completion_interruptible(&bulk_waiter->event)) 3301 status = VCHIQ_RETRY; 3302 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED) 3303 status = VCHIQ_ERROR; 3304 } 3305 3306 return status; 3307 3308 unlock_both_error_exit: 3309 mutex_unlock(&state->slot_mutex); 3310 cancel_bulk_error_exit: 3311 vchiq_complete_bulk(bulk); 3312 unlock_error_exit: 3313 mutex_unlock(&service->bulk_mutex); 3314 3315 error_exit: 3316 if (service) 3317 vchiq_service_put(service); 3318 return status; 3319 } 3320 3321 enum vchiq_status 3322 vchiq_queue_message(unsigned int handle, 3323 ssize_t (*copy_callback)(void *context, void *dest, 3324 size_t offset, size_t maxsize), 3325 void *context, 3326 size_t size) 3327 { 3328 struct vchiq_service *service = find_service_by_handle(handle); 3329 enum vchiq_status status = VCHIQ_ERROR; 3330 int data_id; 3331 3332 if (!service) 3333 goto error_exit; 3334 3335 if (vchiq_check_service(service) != VCHIQ_SUCCESS) 3336 goto error_exit; 3337 3338 if (!size) { 3339 VCHIQ_SERVICE_STATS_INC(service, error_count); 3340 goto error_exit; 3341 3342 } 3343 3344 if (size > VCHIQ_MAX_MSG_SIZE) { 3345 VCHIQ_SERVICE_STATS_INC(service, error_count); 3346 goto error_exit; 3347 } 3348 3349 data_id = MAKE_DATA(service->localport, service->remoteport); 3350 3351 switch (service->srvstate) { 3352 case VCHIQ_SRVSTATE_OPEN: 3353 status = queue_message(service->state, service, data_id, 3354 copy_callback, context, size, 1); 3355 break; 3356 case VCHIQ_SRVSTATE_OPENSYNC: 3357 status = queue_message_sync(service->state, service, data_id, 3358 copy_callback, context, size, 1); 3359 break; 3360 default: 3361 status = VCHIQ_ERROR; 3362 break; 3363 } 3364 3365 error_exit: 3366 if (service) 3367 vchiq_service_put(service); 3368 3369 return status; 3370 } 3371 3372 int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int size) 3373 { 3374 enum vchiq_status status; 3375 3376 while (1) { 3377 status = vchiq_queue_message(handle, memcpy_copy_callback, 3378 data, size); 3379 3380 /* 3381 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to 3382 * implement a retry mechanism since this function is supposed 3383 * to block until queued 3384 */ 3385 if (status != VCHIQ_RETRY) 3386 break; 3387 3388 msleep(1); 3389 } 3390 3391 return status; 3392 } 3393 EXPORT_SYMBOL(vchiq_queue_kernel_message); 3394 3395 void 3396 vchiq_release_message(unsigned int handle, 3397 struct vchiq_header *header) 3398 { 3399 struct vchiq_service *service = find_service_by_handle(handle); 3400 struct vchiq_shared_state *remote; 3401 struct vchiq_state *state; 3402 int slot_index; 3403 3404 if (!service) 3405 return; 3406 3407 state = service->state; 3408 remote = state->remote; 3409 3410 slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header); 3411 3412 if ((slot_index >= remote->slot_first) && 3413 (slot_index <= remote->slot_last)) { 3414 int msgid = header->msgid; 3415 3416 if (msgid & VCHIQ_MSGID_CLAIMED) { 3417 struct vchiq_slot_info *slot_info = 3418 SLOT_INFO_FROM_INDEX(state, slot_index); 3419 3420 release_slot(state, slot_info, header, service); 3421 } 3422 } else if (slot_index == remote->slot_sync) { 3423 release_message_sync(state, header); 3424 } 3425 3426 vchiq_service_put(service); 3427 } 3428 EXPORT_SYMBOL(vchiq_release_message); 3429 3430 static void 3431 release_message_sync(struct vchiq_state *state, struct vchiq_header *header) 3432 { 3433 header->msgid = VCHIQ_MSGID_PADDING; 3434 remote_event_signal(&state->remote->sync_release); 3435 } 3436 3437 enum vchiq_status 3438 vchiq_get_peer_version(unsigned int handle, short *peer_version) 3439 { 3440 enum vchiq_status status = VCHIQ_ERROR; 3441 struct vchiq_service *service = find_service_by_handle(handle); 3442 3443 if (!service) 3444 goto exit; 3445 3446 if (vchiq_check_service(service) != VCHIQ_SUCCESS) 3447 goto exit; 3448 3449 if (!peer_version) 3450 goto exit; 3451 3452 *peer_version = service->peer_version; 3453 status = VCHIQ_SUCCESS; 3454 3455 exit: 3456 if (service) 3457 vchiq_service_put(service); 3458 return status; 3459 } 3460 EXPORT_SYMBOL(vchiq_get_peer_version); 3461 3462 void vchiq_get_config(struct vchiq_config *config) 3463 { 3464 config->max_msg_size = VCHIQ_MAX_MSG_SIZE; 3465 config->bulk_threshold = VCHIQ_MAX_MSG_SIZE; 3466 config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS; 3467 config->max_services = VCHIQ_MAX_SERVICES; 3468 config->version = VCHIQ_VERSION; 3469 config->version_min = VCHIQ_VERSION_MIN; 3470 } 3471 3472 int 3473 vchiq_set_service_option(unsigned int handle, 3474 enum vchiq_service_option option, int value) 3475 { 3476 struct vchiq_service *service = find_service_by_handle(handle); 3477 struct vchiq_service_quota *quota; 3478 int ret = -EINVAL; 3479 3480 if (!service) 3481 return -EINVAL; 3482 3483 switch (option) { 3484 case VCHIQ_SERVICE_OPTION_AUTOCLOSE: 3485 service->auto_close = value; 3486 ret = 0; 3487 break; 3488 3489 case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: 3490 quota = &service->state->service_quotas[service->localport]; 3491 if (value == 0) 3492 value = service->state->default_slot_quota; 3493 if ((value >= quota->slot_use_count) && 3494 (value < (unsigned short)~0)) { 3495 quota->slot_quota = value; 3496 if ((value >= quota->slot_use_count) && 3497 (quota->message_quota >= quota->message_use_count)) 3498 /* 3499 * Signal the service that it may have 3500 * dropped below its quota 3501 */ 3502 complete("a->quota_event); 3503 ret = 0; 3504 } 3505 break; 3506 3507 case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: 3508 quota = &service->state->service_quotas[service->localport]; 3509 if (value == 0) 3510 value = service->state->default_message_quota; 3511 if ((value >= quota->message_use_count) && 3512 (value < (unsigned short)~0)) { 3513 quota->message_quota = value; 3514 if ((value >= quota->message_use_count) && 3515 (quota->slot_quota >= quota->slot_use_count)) 3516 /* 3517 * Signal the service that it may have 3518 * dropped below its quota 3519 */ 3520 complete("a->quota_event); 3521 ret = 0; 3522 } 3523 break; 3524 3525 case VCHIQ_SERVICE_OPTION_SYNCHRONOUS: 3526 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) || 3527 (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) { 3528 service->sync = value; 3529 ret = 0; 3530 } 3531 break; 3532 3533 case VCHIQ_SERVICE_OPTION_TRACE: 3534 service->trace = value; 3535 ret = 0; 3536 break; 3537 3538 default: 3539 break; 3540 } 3541 vchiq_service_put(service); 3542 3543 return ret; 3544 } 3545 3546 static int 3547 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state, 3548 struct vchiq_shared_state *shared, const char *label) 3549 { 3550 static const char *const debug_names[] = { 3551 "<entries>", 3552 "SLOT_HANDLER_COUNT", 3553 "SLOT_HANDLER_LINE", 3554 "PARSE_LINE", 3555 "PARSE_HEADER", 3556 "PARSE_MSGID", 3557 "AWAIT_COMPLETION_LINE", 3558 "DEQUEUE_MESSAGE_LINE", 3559 "SERVICE_CALLBACK_LINE", 3560 "MSG_QUEUE_FULL_COUNT", 3561 "COMPLETION_QUEUE_FULL_COUNT" 3562 }; 3563 int i; 3564 char buf[80]; 3565 int len; 3566 int err; 3567 3568 len = scnprintf(buf, sizeof(buf), 3569 " %s: slots %d-%d tx_pos=%x recycle=%x", 3570 label, shared->slot_first, shared->slot_last, 3571 shared->tx_pos, shared->slot_queue_recycle); 3572 err = vchiq_dump(dump_context, buf, len + 1); 3573 if (err) 3574 return err; 3575 3576 len = scnprintf(buf, sizeof(buf), 3577 " Slots claimed:"); 3578 err = vchiq_dump(dump_context, buf, len + 1); 3579 if (err) 3580 return err; 3581 3582 for (i = shared->slot_first; i <= shared->slot_last; i++) { 3583 struct vchiq_slot_info slot_info = 3584 *SLOT_INFO_FROM_INDEX(state, i); 3585 if (slot_info.use_count != slot_info.release_count) { 3586 len = scnprintf(buf, sizeof(buf), 3587 " %d: %d/%d", i, slot_info.use_count, 3588 slot_info.release_count); 3589 err = vchiq_dump(dump_context, buf, len + 1); 3590 if (err) 3591 return err; 3592 } 3593 } 3594 3595 for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) { 3596 len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)", 3597 debug_names[i], shared->debug[i], shared->debug[i]); 3598 err = vchiq_dump(dump_context, buf, len + 1); 3599 if (err) 3600 return err; 3601 } 3602 return 0; 3603 } 3604 3605 int vchiq_dump_state(void *dump_context, struct vchiq_state *state) 3606 { 3607 char buf[80]; 3608 int len; 3609 int i; 3610 int err; 3611 3612 len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id, 3613 conn_state_names[state->conn_state]); 3614 err = vchiq_dump(dump_context, buf, len + 1); 3615 if (err) 3616 return err; 3617 3618 len = scnprintf(buf, sizeof(buf), 3619 " tx_pos=%x(@%pK), rx_pos=%x(@%pK)", 3620 state->local->tx_pos, 3621 state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK), 3622 state->rx_pos, 3623 state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK)); 3624 err = vchiq_dump(dump_context, buf, len + 1); 3625 if (err) 3626 return err; 3627 3628 len = scnprintf(buf, sizeof(buf), 3629 " Version: %d (min %d)", 3630 VCHIQ_VERSION, VCHIQ_VERSION_MIN); 3631 err = vchiq_dump(dump_context, buf, len + 1); 3632 if (err) 3633 return err; 3634 3635 if (VCHIQ_ENABLE_STATS) { 3636 len = scnprintf(buf, sizeof(buf), 3637 " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d", 3638 state->stats.ctrl_tx_count, state->stats.ctrl_rx_count, 3639 state->stats.error_count); 3640 err = vchiq_dump(dump_context, buf, len + 1); 3641 if (err) 3642 return err; 3643 } 3644 3645 len = scnprintf(buf, sizeof(buf), 3646 " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)", 3647 ((state->slot_queue_available * VCHIQ_SLOT_SIZE) - 3648 state->local_tx_pos) / VCHIQ_SLOT_SIZE, 3649 state->data_quota - state->data_use_count, 3650 state->local->slot_queue_recycle - state->slot_queue_available, 3651 state->stats.slot_stalls, state->stats.data_stalls); 3652 err = vchiq_dump(dump_context, buf, len + 1); 3653 if (err) 3654 return err; 3655 3656 err = vchiq_dump_platform_state(dump_context); 3657 if (err) 3658 return err; 3659 3660 err = vchiq_dump_shared_state(dump_context, 3661 state, 3662 state->local, 3663 "Local"); 3664 if (err) 3665 return err; 3666 err = vchiq_dump_shared_state(dump_context, 3667 state, 3668 state->remote, 3669 "Remote"); 3670 if (err) 3671 return err; 3672 3673 err = vchiq_dump_platform_instances(dump_context); 3674 if (err) 3675 return err; 3676 3677 for (i = 0; i < state->unused_service; i++) { 3678 struct vchiq_service *service = find_service_by_port(state, i); 3679 3680 if (service) { 3681 err = vchiq_dump_service_state(dump_context, service); 3682 vchiq_service_put(service); 3683 if (err) 3684 return err; 3685 } 3686 } 3687 return 0; 3688 } 3689 3690 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service) 3691 { 3692 char buf[80]; 3693 int len; 3694 int err; 3695 unsigned int ref_count; 3696 3697 /*Don't include the lock just taken*/ 3698 ref_count = kref_read(&service->ref_count) - 1; 3699 len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)", 3700 service->localport, srvstate_names[service->srvstate], 3701 ref_count); 3702 3703 if (service->srvstate != VCHIQ_SRVSTATE_FREE) { 3704 char remoteport[30]; 3705 struct vchiq_service_quota *quota = 3706 &service->state->service_quotas[service->localport]; 3707 int fourcc = service->base.fourcc; 3708 int tx_pending, rx_pending; 3709 3710 if (service->remoteport != VCHIQ_PORT_FREE) { 3711 int len2 = scnprintf(remoteport, sizeof(remoteport), 3712 "%u", service->remoteport); 3713 3714 if (service->public_fourcc != VCHIQ_FOURCC_INVALID) 3715 scnprintf(remoteport + len2, 3716 sizeof(remoteport) - len2, 3717 " (client %x)", service->client_id); 3718 } else { 3719 strscpy(remoteport, "n/a", sizeof(remoteport)); 3720 } 3721 3722 len += scnprintf(buf + len, sizeof(buf) - len, 3723 " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)", 3724 VCHIQ_FOURCC_AS_4CHARS(fourcc), 3725 remoteport, 3726 quota->message_use_count, 3727 quota->message_quota, 3728 quota->slot_use_count, 3729 quota->slot_quota); 3730 3731 err = vchiq_dump(dump_context, buf, len + 1); 3732 if (err) 3733 return err; 3734 3735 tx_pending = service->bulk_tx.local_insert - 3736 service->bulk_tx.remote_insert; 3737 3738 rx_pending = service->bulk_rx.local_insert - 3739 service->bulk_rx.remote_insert; 3740 3741 len = scnprintf(buf, sizeof(buf), 3742 " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)", 3743 tx_pending, 3744 tx_pending ? service->bulk_tx.bulks[ 3745 BULK_INDEX(service->bulk_tx.remove)].size : 0, 3746 rx_pending, 3747 rx_pending ? service->bulk_rx.bulks[ 3748 BULK_INDEX(service->bulk_rx.remove)].size : 0); 3749 3750 if (VCHIQ_ENABLE_STATS) { 3751 err = vchiq_dump(dump_context, buf, len + 1); 3752 if (err) 3753 return err; 3754 3755 len = scnprintf(buf, sizeof(buf), 3756 " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu", 3757 service->stats.ctrl_tx_count, 3758 service->stats.ctrl_tx_bytes, 3759 service->stats.ctrl_rx_count, 3760 service->stats.ctrl_rx_bytes); 3761 err = vchiq_dump(dump_context, buf, len + 1); 3762 if (err) 3763 return err; 3764 3765 len = scnprintf(buf, sizeof(buf), 3766 " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu", 3767 service->stats.bulk_tx_count, 3768 service->stats.bulk_tx_bytes, 3769 service->stats.bulk_rx_count, 3770 service->stats.bulk_rx_bytes); 3771 err = vchiq_dump(dump_context, buf, len + 1); 3772 if (err) 3773 return err; 3774 3775 len = scnprintf(buf, sizeof(buf), 3776 " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors", 3777 service->stats.quota_stalls, 3778 service->stats.slot_stalls, 3779 service->stats.bulk_stalls, 3780 service->stats.bulk_aborted_count, 3781 service->stats.error_count); 3782 } 3783 } 3784 3785 err = vchiq_dump(dump_context, buf, len + 1); 3786 if (err) 3787 return err; 3788 3789 if (service->srvstate != VCHIQ_SRVSTATE_FREE) 3790 err = vchiq_dump_platform_service_state(dump_context, service); 3791 return err; 3792 } 3793 3794 void 3795 vchiq_loud_error_header(void) 3796 { 3797 vchiq_log_error(vchiq_core_log_level, 3798 "============================================================================"); 3799 vchiq_log_error(vchiq_core_log_level, 3800 "============================================================================"); 3801 vchiq_log_error(vchiq_core_log_level, "====="); 3802 } 3803 3804 void 3805 vchiq_loud_error_footer(void) 3806 { 3807 vchiq_log_error(vchiq_core_log_level, "====="); 3808 vchiq_log_error(vchiq_core_log_level, 3809 "============================================================================"); 3810 vchiq_log_error(vchiq_core_log_level, 3811 "============================================================================"); 3812 } 3813 3814 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state) 3815 { 3816 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) 3817 return VCHIQ_RETRY; 3818 3819 return queue_message(state, NULL, MAKE_REMOTE_USE, NULL, NULL, 0, 0); 3820 } 3821 3822 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state) 3823 { 3824 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) 3825 return VCHIQ_RETRY; 3826 3827 return queue_message(state, NULL, MAKE_REMOTE_USE_ACTIVE, 3828 NULL, NULL, 0, 0); 3829 } 3830 3831 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, 3832 size_t num_bytes) 3833 { 3834 const u8 *mem = void_mem; 3835 size_t offset; 3836 char line_buf[100]; 3837 char *s; 3838 3839 while (num_bytes > 0) { 3840 s = line_buf; 3841 3842 for (offset = 0; offset < 16; offset++) { 3843 if (offset < num_bytes) 3844 s += scnprintf(s, 4, "%02x ", mem[offset]); 3845 else 3846 s += scnprintf(s, 4, " "); 3847 } 3848 3849 for (offset = 0; offset < 16; offset++) { 3850 if (offset < num_bytes) { 3851 u8 ch = mem[offset]; 3852 3853 if ((ch < ' ') || (ch > '~')) 3854 ch = '.'; 3855 *s++ = (char)ch; 3856 } 3857 } 3858 *s++ = '\0'; 3859 3860 if (label && (*label != '\0')) 3861 vchiq_log_trace(VCHIQ_LOG_TRACE, 3862 "%s: %08x: %s", label, addr, line_buf); 3863 else 3864 vchiq_log_trace(VCHIQ_LOG_TRACE, 3865 "%08x: %s", addr, line_buf); 3866 3867 addr += 16; 3868 mem += 16; 3869 if (num_bytes > 16) 3870 num_bytes -= 16; 3871 else 3872 num_bytes = 0; 3873 } 3874 } 3875