1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. 7 */ 8 9 /* 10 * Cross Partition Communication (XPC) channel support. 11 * 12 * This is the part of XPC that manages the channels and 13 * sends/receives messages across them to/from other partitions. 14 * 15 */ 16 17 #include <linux/device.h> 18 #include "xpc.h" 19 20 /* 21 * Process a connect message from a remote partition. 22 * 23 * Note: xpc_process_connect() is expecting to be called with the 24 * spin_lock_irqsave held and will leave it locked upon return. 25 */ 26 static void 27 xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) 28 { 29 enum xp_retval ret; 30 31 DBUG_ON(!spin_is_locked(&ch->lock)); 32 33 if (!(ch->flags & XPC_C_OPENREQUEST) || 34 !(ch->flags & XPC_C_ROPENREQUEST)) { 35 /* nothing more to do for now */ 36 return; 37 } 38 DBUG_ON(!(ch->flags & XPC_C_CONNECTING)); 39 40 if (!(ch->flags & XPC_C_SETUP)) { 41 spin_unlock_irqrestore(&ch->lock, *irq_flags); 42 ret = xpc_setup_msg_structures(ch); 43 spin_lock_irqsave(&ch->lock, *irq_flags); 44 45 if (ret != xpSuccess) 46 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); 47 48 ch->flags |= XPC_C_SETUP; 49 50 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) 51 return; 52 } 53 54 if (!(ch->flags & XPC_C_OPENREPLY)) { 55 ch->flags |= XPC_C_OPENREPLY; 56 xpc_send_chctl_openreply(ch, irq_flags); 57 } 58 59 if (!(ch->flags & XPC_C_ROPENREPLY)) 60 return; 61 62 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ 63 64 dev_info(xpc_chan, "channel %d to partition %d connected\n", 65 ch->number, ch->partid); 66 67 spin_unlock_irqrestore(&ch->lock, *irq_flags); 68 xpc_create_kthreads(ch, 1, 0); 69 spin_lock_irqsave(&ch->lock, *irq_flags); 70 } 71 72 /* 73 * spin_lock_irqsave() is expected to be held on entry. 74 */ 75 static void 76 xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) 77 { 78 struct xpc_partition *part = &xpc_partitions[ch->partid]; 79 u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED); 80 81 DBUG_ON(!spin_is_locked(&ch->lock)); 82 83 if (!(ch->flags & XPC_C_DISCONNECTING)) 84 return; 85 86 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); 87 88 /* make sure all activity has settled down first */ 89 90 if (atomic_read(&ch->kthreads_assigned) > 0 || 91 atomic_read(&ch->references) > 0) { 92 return; 93 } 94 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 95 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); 96 97 if (part->act_state == XPC_P_AS_DEACTIVATING) { 98 /* can't proceed until the other side disengages from us */ 99 if (xpc_partition_engaged(ch->partid)) 100 return; 101 102 } else { 103 104 /* as long as the other side is up do the full protocol */ 105 106 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) 107 return; 108 109 if (!(ch->flags & XPC_C_CLOSEREPLY)) { 110 ch->flags |= XPC_C_CLOSEREPLY; 111 xpc_send_chctl_closereply(ch, irq_flags); 112 } 113 114 if (!(ch->flags & XPC_C_RCLOSEREPLY)) 115 return; 116 } 117 118 /* wake those waiting for notify completion */ 119 if (atomic_read(&ch->n_to_notify) > 0) { 120 /* we do callout while holding ch->lock, callout can't block */ 121 xpc_notify_senders_of_disconnect(ch); 122 } 123 124 /* both sides are disconnected now */ 125 126 if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) { 127 spin_unlock_irqrestore(&ch->lock, *irq_flags); 128 xpc_disconnect_callout(ch, xpDisconnected); 129 spin_lock_irqsave(&ch->lock, *irq_flags); 130 } 131 132 DBUG_ON(atomic_read(&ch->n_to_notify) != 0); 133 134 /* it's now safe to free the channel's message queues */ 135 xpc_teardown_msg_structures(ch); 136 137 ch->func = NULL; 138 ch->key = NULL; 139 ch->entry_size = 0; 140 ch->local_nentries = 0; 141 ch->remote_nentries = 0; 142 ch->kthreads_assigned_limit = 0; 143 ch->kthreads_idle_limit = 0; 144 145 /* 146 * Mark the channel disconnected and clear all other flags, including 147 * XPC_C_SETUP (because of call to xpc_teardown_msg_structures()) but 148 * not including XPC_C_WDISCONNECT (if it was set). 149 */ 150 ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT)); 151 152 atomic_dec(&part->nchannels_active); 153 154 if (channel_was_connected) { 155 dev_info(xpc_chan, "channel %d to partition %d disconnected, " 156 "reason=%d\n", ch->number, ch->partid, ch->reason); 157 } 158 159 if (ch->flags & XPC_C_WDISCONNECT) { 160 /* we won't lose the CPU since we're holding ch->lock */ 161 complete(&ch->wdisconnect_wait); 162 } else if (ch->delayed_chctl_flags) { 163 if (part->act_state != XPC_P_AS_DEACTIVATING) { 164 /* time to take action on any delayed chctl flags */ 165 spin_lock(&part->chctl_lock); 166 part->chctl.flags[ch->number] |= 167 ch->delayed_chctl_flags; 168 spin_unlock(&part->chctl_lock); 169 } 170 ch->delayed_chctl_flags = 0; 171 } 172 } 173 174 /* 175 * Process a change in the channel's remote connection state. 176 */ 177 static void 178 xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number, 179 u8 chctl_flags) 180 { 181 unsigned long irq_flags; 182 struct xpc_openclose_args *args = 183 &part->remote_openclose_args[ch_number]; 184 struct xpc_channel *ch = &part->channels[ch_number]; 185 enum xp_retval reason; 186 187 spin_lock_irqsave(&ch->lock, irq_flags); 188 189 again: 190 191 if ((ch->flags & XPC_C_DISCONNECTED) && 192 (ch->flags & XPC_C_WDISCONNECT)) { 193 /* 194 * Delay processing chctl flags until thread waiting disconnect 195 * has had a chance to see that the channel is disconnected. 196 */ 197 ch->delayed_chctl_flags |= chctl_flags; 198 spin_unlock_irqrestore(&ch->lock, irq_flags); 199 return; 200 } 201 202 if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) { 203 204 dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received " 205 "from partid=%d, channel=%d\n", args->reason, 206 ch->partid, ch->number); 207 208 /* 209 * If RCLOSEREQUEST is set, we're probably waiting for 210 * RCLOSEREPLY. We should find it and a ROPENREQUEST packed 211 * with this RCLOSEREQUEST in the chctl_flags. 212 */ 213 214 if (ch->flags & XPC_C_RCLOSEREQUEST) { 215 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); 216 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); 217 DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY)); 218 DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY); 219 220 DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY)); 221 chctl_flags &= ~XPC_CHCTL_CLOSEREPLY; 222 ch->flags |= XPC_C_RCLOSEREPLY; 223 224 /* both sides have finished disconnecting */ 225 xpc_process_disconnect(ch, &irq_flags); 226 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); 227 goto again; 228 } 229 230 if (ch->flags & XPC_C_DISCONNECTED) { 231 if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) { 232 if (part->chctl.flags[ch_number] & 233 XPC_CHCTL_OPENREQUEST) { 234 235 DBUG_ON(ch->delayed_chctl_flags != 0); 236 spin_lock(&part->chctl_lock); 237 part->chctl.flags[ch_number] |= 238 XPC_CHCTL_CLOSEREQUEST; 239 spin_unlock(&part->chctl_lock); 240 } 241 spin_unlock_irqrestore(&ch->lock, irq_flags); 242 return; 243 } 244 245 XPC_SET_REASON(ch, 0, 0); 246 ch->flags &= ~XPC_C_DISCONNECTED; 247 248 atomic_inc(&part->nchannels_active); 249 ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); 250 } 251 252 chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY); 253 254 /* 255 * The meaningful CLOSEREQUEST connection state fields are: 256 * reason = reason connection is to be closed 257 */ 258 259 ch->flags |= XPC_C_RCLOSEREQUEST; 260 261 if (!(ch->flags & XPC_C_DISCONNECTING)) { 262 reason = args->reason; 263 if (reason <= xpSuccess || reason > xpUnknownReason) 264 reason = xpUnknownReason; 265 else if (reason == xpUnregistering) 266 reason = xpOtherUnregistering; 267 268 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); 269 270 DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY); 271 spin_unlock_irqrestore(&ch->lock, irq_flags); 272 return; 273 } 274 275 xpc_process_disconnect(ch, &irq_flags); 276 } 277 278 if (chctl_flags & XPC_CHCTL_CLOSEREPLY) { 279 280 dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid=" 281 "%d, channel=%d\n", ch->partid, ch->number); 282 283 if (ch->flags & XPC_C_DISCONNECTED) { 284 DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING); 285 spin_unlock_irqrestore(&ch->lock, irq_flags); 286 return; 287 } 288 289 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); 290 291 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { 292 if (part->chctl.flags[ch_number] & 293 XPC_CHCTL_CLOSEREQUEST) { 294 295 DBUG_ON(ch->delayed_chctl_flags != 0); 296 spin_lock(&part->chctl_lock); 297 part->chctl.flags[ch_number] |= 298 XPC_CHCTL_CLOSEREPLY; 299 spin_unlock(&part->chctl_lock); 300 } 301 spin_unlock_irqrestore(&ch->lock, irq_flags); 302 return; 303 } 304 305 ch->flags |= XPC_C_RCLOSEREPLY; 306 307 if (ch->flags & XPC_C_CLOSEREPLY) { 308 /* both sides have finished disconnecting */ 309 xpc_process_disconnect(ch, &irq_flags); 310 } 311 } 312 313 if (chctl_flags & XPC_CHCTL_OPENREQUEST) { 314 315 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, " 316 "local_nentries=%d) received from partid=%d, " 317 "channel=%d\n", args->entry_size, args->local_nentries, 318 ch->partid, ch->number); 319 320 if (part->act_state == XPC_P_AS_DEACTIVATING || 321 (ch->flags & XPC_C_ROPENREQUEST)) { 322 spin_unlock_irqrestore(&ch->lock, irq_flags); 323 return; 324 } 325 326 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) { 327 ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST; 328 spin_unlock_irqrestore(&ch->lock, irq_flags); 329 return; 330 } 331 DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | 332 XPC_C_OPENREQUEST))); 333 DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | 334 XPC_C_OPENREPLY | XPC_C_CONNECTED)); 335 336 /* 337 * The meaningful OPENREQUEST connection state fields are: 338 * entry_size = size of channel's messages in bytes 339 * local_nentries = remote partition's local_nentries 340 */ 341 if (args->entry_size == 0 || args->local_nentries == 0) { 342 /* assume OPENREQUEST was delayed by mistake */ 343 spin_unlock_irqrestore(&ch->lock, irq_flags); 344 return; 345 } 346 347 ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); 348 ch->remote_nentries = args->local_nentries; 349 350 if (ch->flags & XPC_C_OPENREQUEST) { 351 if (args->entry_size != ch->entry_size) { 352 XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, 353 &irq_flags); 354 spin_unlock_irqrestore(&ch->lock, irq_flags); 355 return; 356 } 357 } else { 358 ch->entry_size = args->entry_size; 359 360 XPC_SET_REASON(ch, 0, 0); 361 ch->flags &= ~XPC_C_DISCONNECTED; 362 363 atomic_inc(&part->nchannels_active); 364 } 365 366 xpc_process_connect(ch, &irq_flags); 367 } 368 369 if (chctl_flags & XPC_CHCTL_OPENREPLY) { 370 371 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa=" 372 "0x%lx, local_nentries=%d, remote_nentries=%d) " 373 "received from partid=%d, channel=%d\n", 374 args->local_msgqueue_pa, args->local_nentries, 375 args->remote_nentries, ch->partid, ch->number); 376 377 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { 378 spin_unlock_irqrestore(&ch->lock, irq_flags); 379 return; 380 } 381 if (!(ch->flags & XPC_C_OPENREQUEST)) { 382 XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, 383 &irq_flags); 384 spin_unlock_irqrestore(&ch->lock, irq_flags); 385 return; 386 } 387 388 DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); 389 DBUG_ON(ch->flags & XPC_C_CONNECTED); 390 391 /* 392 * The meaningful OPENREPLY connection state fields are: 393 * local_msgqueue_pa = physical address of remote 394 * partition's local_msgqueue 395 * local_nentries = remote partition's local_nentries 396 * remote_nentries = remote partition's remote_nentries 397 */ 398 DBUG_ON(args->local_msgqueue_pa == 0); 399 DBUG_ON(args->local_nentries == 0); 400 DBUG_ON(args->remote_nentries == 0); 401 402 ch->flags |= XPC_C_ROPENREPLY; 403 xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa); 404 405 if (args->local_nentries < ch->remote_nentries) { 406 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new " 407 "remote_nentries=%d, old remote_nentries=%d, " 408 "partid=%d, channel=%d\n", 409 args->local_nentries, ch->remote_nentries, 410 ch->partid, ch->number); 411 412 ch->remote_nentries = args->local_nentries; 413 } 414 if (args->remote_nentries < ch->local_nentries) { 415 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new " 416 "local_nentries=%d, old local_nentries=%d, " 417 "partid=%d, channel=%d\n", 418 args->remote_nentries, ch->local_nentries, 419 ch->partid, ch->number); 420 421 ch->local_nentries = args->remote_nentries; 422 } 423 424 xpc_process_connect(ch, &irq_flags); 425 } 426 427 spin_unlock_irqrestore(&ch->lock, irq_flags); 428 } 429 430 /* 431 * Attempt to establish a channel connection to a remote partition. 432 */ 433 static enum xp_retval 434 xpc_connect_channel(struct xpc_channel *ch) 435 { 436 unsigned long irq_flags; 437 struct xpc_registration *registration = &xpc_registrations[ch->number]; 438 439 if (mutex_trylock(®istration->mutex) == 0) 440 return xpRetry; 441 442 if (!XPC_CHANNEL_REGISTERED(ch->number)) { 443 mutex_unlock(®istration->mutex); 444 return xpUnregistered; 445 } 446 447 spin_lock_irqsave(&ch->lock, irq_flags); 448 449 DBUG_ON(ch->flags & XPC_C_CONNECTED); 450 DBUG_ON(ch->flags & XPC_C_OPENREQUEST); 451 452 if (ch->flags & XPC_C_DISCONNECTING) { 453 spin_unlock_irqrestore(&ch->lock, irq_flags); 454 mutex_unlock(®istration->mutex); 455 return ch->reason; 456 } 457 458 /* add info from the channel connect registration to the channel */ 459 460 ch->kthreads_assigned_limit = registration->assigned_limit; 461 ch->kthreads_idle_limit = registration->idle_limit; 462 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); 463 DBUG_ON(atomic_read(&ch->kthreads_idle) != 0); 464 DBUG_ON(atomic_read(&ch->kthreads_active) != 0); 465 466 ch->func = registration->func; 467 DBUG_ON(registration->func == NULL); 468 ch->key = registration->key; 469 470 ch->local_nentries = registration->nentries; 471 472 if (ch->flags & XPC_C_ROPENREQUEST) { 473 if (registration->entry_size != ch->entry_size) { 474 /* the local and remote sides aren't the same */ 475 476 /* 477 * Because XPC_DISCONNECT_CHANNEL() can block we're 478 * forced to up the registration sema before we unlock 479 * the channel lock. But that's okay here because we're 480 * done with the part that required the registration 481 * sema. XPC_DISCONNECT_CHANNEL() requires that the 482 * channel lock be locked and will unlock and relock 483 * the channel lock as needed. 484 */ 485 mutex_unlock(®istration->mutex); 486 XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, 487 &irq_flags); 488 spin_unlock_irqrestore(&ch->lock, irq_flags); 489 return xpUnequalMsgSizes; 490 } 491 } else { 492 ch->entry_size = registration->entry_size; 493 494 XPC_SET_REASON(ch, 0, 0); 495 ch->flags &= ~XPC_C_DISCONNECTED; 496 497 atomic_inc(&xpc_partitions[ch->partid].nchannels_active); 498 } 499 500 mutex_unlock(®istration->mutex); 501 502 /* initiate the connection */ 503 504 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); 505 xpc_send_chctl_openrequest(ch, &irq_flags); 506 507 xpc_process_connect(ch, &irq_flags); 508 509 spin_unlock_irqrestore(&ch->lock, irq_flags); 510 511 return xpSuccess; 512 } 513 514 void 515 xpc_process_sent_chctl_flags(struct xpc_partition *part) 516 { 517 unsigned long irq_flags; 518 union xpc_channel_ctl_flags chctl; 519 struct xpc_channel *ch; 520 int ch_number; 521 u32 ch_flags; 522 523 chctl.all_flags = xpc_get_chctl_all_flags(part); 524 525 /* 526 * Initiate channel connections for registered channels. 527 * 528 * For each connected channel that has pending messages activate idle 529 * kthreads and/or create new kthreads as needed. 530 */ 531 532 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 533 ch = &part->channels[ch_number]; 534 535 /* 536 * Process any open or close related chctl flags, and then deal 537 * with connecting or disconnecting the channel as required. 538 */ 539 540 if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) { 541 xpc_process_openclose_chctl_flags(part, ch_number, 542 chctl.flags[ch_number]); 543 } 544 545 ch_flags = ch->flags; /* need an atomic snapshot of flags */ 546 547 if (ch_flags & XPC_C_DISCONNECTING) { 548 spin_lock_irqsave(&ch->lock, irq_flags); 549 xpc_process_disconnect(ch, &irq_flags); 550 spin_unlock_irqrestore(&ch->lock, irq_flags); 551 continue; 552 } 553 554 if (part->act_state == XPC_P_AS_DEACTIVATING) 555 continue; 556 557 if (!(ch_flags & XPC_C_CONNECTED)) { 558 if (!(ch_flags & XPC_C_OPENREQUEST)) { 559 DBUG_ON(ch_flags & XPC_C_SETUP); 560 (void)xpc_connect_channel(ch); 561 } else { 562 spin_lock_irqsave(&ch->lock, irq_flags); 563 xpc_process_connect(ch, &irq_flags); 564 spin_unlock_irqrestore(&ch->lock, irq_flags); 565 } 566 continue; 567 } 568 569 /* 570 * Process any message related chctl flags, this may involve 571 * the activation of kthreads to deliver any pending messages 572 * sent from the other partition. 573 */ 574 575 if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS) 576 xpc_process_msg_chctl_flags(part, ch_number); 577 } 578 } 579 580 /* 581 * XPC's heartbeat code calls this function to inform XPC that a partition is 582 * going down. XPC responds by tearing down the XPartition Communication 583 * infrastructure used for the just downed partition. 584 * 585 * XPC's heartbeat code will never call this function and xpc_partition_up() 586 * at the same time. Nor will it ever make multiple calls to either function 587 * at the same time. 588 */ 589 void 590 xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason) 591 { 592 unsigned long irq_flags; 593 int ch_number; 594 struct xpc_channel *ch; 595 596 dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", 597 XPC_PARTID(part), reason); 598 599 if (!xpc_part_ref(part)) { 600 /* infrastructure for this partition isn't currently set up */ 601 return; 602 } 603 604 /* disconnect channels associated with the partition going down */ 605 606 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 607 ch = &part->channels[ch_number]; 608 609 xpc_msgqueue_ref(ch); 610 spin_lock_irqsave(&ch->lock, irq_flags); 611 612 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); 613 614 spin_unlock_irqrestore(&ch->lock, irq_flags); 615 xpc_msgqueue_deref(ch); 616 } 617 618 xpc_wakeup_channel_mgr(part); 619 620 xpc_part_deref(part); 621 } 622 623 /* 624 * Called by XP at the time of channel connection registration to cause 625 * XPC to establish connections to all currently active partitions. 626 */ 627 void 628 xpc_initiate_connect(int ch_number) 629 { 630 short partid; 631 struct xpc_partition *part; 632 struct xpc_channel *ch; 633 634 DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); 635 636 for (partid = 0; partid < xp_max_npartitions; partid++) { 637 part = &xpc_partitions[partid]; 638 639 if (xpc_part_ref(part)) { 640 ch = &part->channels[ch_number]; 641 642 /* 643 * Initiate the establishment of a connection on the 644 * newly registered channel to the remote partition. 645 */ 646 xpc_wakeup_channel_mgr(part); 647 xpc_part_deref(part); 648 } 649 } 650 } 651 652 void 653 xpc_connected_callout(struct xpc_channel *ch) 654 { 655 /* let the registerer know that a connection has been established */ 656 657 if (ch->func != NULL) { 658 dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, " 659 "partid=%d, channel=%d\n", ch->partid, ch->number); 660 661 ch->func(xpConnected, ch->partid, ch->number, 662 (void *)(u64)ch->local_nentries, ch->key); 663 664 dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, " 665 "partid=%d, channel=%d\n", ch->partid, ch->number); 666 } 667 } 668 669 /* 670 * Called by XP at the time of channel connection unregistration to cause 671 * XPC to teardown all current connections for the specified channel. 672 * 673 * Before returning xpc_initiate_disconnect() will wait until all connections 674 * on the specified channel have been closed/torndown. So the caller can be 675 * assured that they will not be receiving any more callouts from XPC to the 676 * function they registered via xpc_connect(). 677 * 678 * Arguments: 679 * 680 * ch_number - channel # to unregister. 681 */ 682 void 683 xpc_initiate_disconnect(int ch_number) 684 { 685 unsigned long irq_flags; 686 short partid; 687 struct xpc_partition *part; 688 struct xpc_channel *ch; 689 690 DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); 691 692 /* initiate the channel disconnect for every active partition */ 693 for (partid = 0; partid < xp_max_npartitions; partid++) { 694 part = &xpc_partitions[partid]; 695 696 if (xpc_part_ref(part)) { 697 ch = &part->channels[ch_number]; 698 xpc_msgqueue_ref(ch); 699 700 spin_lock_irqsave(&ch->lock, irq_flags); 701 702 if (!(ch->flags & XPC_C_DISCONNECTED)) { 703 ch->flags |= XPC_C_WDISCONNECT; 704 705 XPC_DISCONNECT_CHANNEL(ch, xpUnregistering, 706 &irq_flags); 707 } 708 709 spin_unlock_irqrestore(&ch->lock, irq_flags); 710 711 xpc_msgqueue_deref(ch); 712 xpc_part_deref(part); 713 } 714 } 715 716 xpc_disconnect_wait(ch_number); 717 } 718 719 /* 720 * To disconnect a channel, and reflect it back to all who may be waiting. 721 * 722 * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by 723 * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by 724 * xpc_disconnect_wait(). 725 * 726 * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN. 727 */ 728 void 729 xpc_disconnect_channel(const int line, struct xpc_channel *ch, 730 enum xp_retval reason, unsigned long *irq_flags) 731 { 732 u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); 733 734 DBUG_ON(!spin_is_locked(&ch->lock)); 735 736 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) 737 return; 738 739 DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); 740 741 dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", 742 reason, line, ch->partid, ch->number); 743 744 XPC_SET_REASON(ch, reason, line); 745 746 ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); 747 /* some of these may not have been set */ 748 ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | 749 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | 750 XPC_C_CONNECTING | XPC_C_CONNECTED); 751 752 xpc_send_chctl_closerequest(ch, irq_flags); 753 754 if (channel_was_connected) 755 ch->flags |= XPC_C_WASCONNECTED; 756 757 spin_unlock_irqrestore(&ch->lock, *irq_flags); 758 759 /* wake all idle kthreads so they can exit */ 760 if (atomic_read(&ch->kthreads_idle) > 0) { 761 wake_up_all(&ch->idle_wq); 762 763 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 764 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { 765 /* start a kthread that will do the xpDisconnecting callout */ 766 xpc_create_kthreads(ch, 1, 1); 767 } 768 769 /* wake those waiting to allocate an entry from the local msg queue */ 770 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) 771 wake_up(&ch->msg_allocate_wq); 772 773 spin_lock_irqsave(&ch->lock, *irq_flags); 774 } 775 776 void 777 xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason) 778 { 779 /* 780 * Let the channel's registerer know that the channel is being 781 * disconnected. We don't want to do this if the registerer was never 782 * informed of a connection being made. 783 */ 784 785 if (ch->func != NULL) { 786 dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, " 787 "channel=%d\n", reason, ch->partid, ch->number); 788 789 ch->func(reason, ch->partid, ch->number, NULL, ch->key); 790 791 dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, " 792 "channel=%d\n", reason, ch->partid, ch->number); 793 } 794 } 795 796 /* 797 * Wait for a message entry to become available for the specified channel, 798 * but don't wait any longer than 1 jiffy. 799 */ 800 enum xp_retval 801 xpc_allocate_msg_wait(struct xpc_channel *ch) 802 { 803 enum xp_retval ret; 804 805 if (ch->flags & XPC_C_DISCONNECTING) { 806 DBUG_ON(ch->reason == xpInterrupted); 807 return ch->reason; 808 } 809 810 atomic_inc(&ch->n_on_msg_allocate_wq); 811 ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); 812 atomic_dec(&ch->n_on_msg_allocate_wq); 813 814 if (ch->flags & XPC_C_DISCONNECTING) { 815 ret = ch->reason; 816 DBUG_ON(ch->reason == xpInterrupted); 817 } else if (ret == 0) { 818 ret = xpTimeout; 819 } else { 820 ret = xpInterrupted; 821 } 822 823 return ret; 824 } 825 826 /* 827 * Send a message that contains the user's payload on the specified channel 828 * connected to the specified partition. 829 * 830 * NOTE that this routine can sleep waiting for a message entry to become 831 * available. To not sleep, pass in the XPC_NOWAIT flag. 832 * 833 * Once sent, this routine will not wait for the message to be received, nor 834 * will notification be given when it does happen. 835 * 836 * Arguments: 837 * 838 * partid - ID of partition to which the channel is connected. 839 * ch_number - channel # to send message on. 840 * flags - see xp.h for valid flags. 841 * payload - pointer to the payload which is to be sent. 842 * payload_size - size of the payload in bytes. 843 */ 844 enum xp_retval 845 xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload, 846 u16 payload_size) 847 { 848 struct xpc_partition *part = &xpc_partitions[partid]; 849 enum xp_retval ret = xpUnknownReason; 850 851 dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload, 852 partid, ch_number); 853 854 DBUG_ON(partid < 0 || partid >= xp_max_npartitions); 855 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 856 DBUG_ON(payload == NULL); 857 858 if (xpc_part_ref(part)) { 859 ret = xpc_send_payload(&part->channels[ch_number], flags, 860 payload, payload_size, 0, NULL, NULL); 861 xpc_part_deref(part); 862 } 863 864 return ret; 865 } 866 867 /* 868 * Send a message that contains the user's payload on the specified channel 869 * connected to the specified partition. 870 * 871 * NOTE that this routine can sleep waiting for a message entry to become 872 * available. To not sleep, pass in the XPC_NOWAIT flag. 873 * 874 * This routine will not wait for the message to be sent or received. 875 * 876 * Once the remote end of the channel has received the message, the function 877 * passed as an argument to xpc_initiate_send_notify() will be called. This 878 * allows the sender to free up or re-use any buffers referenced by the 879 * message, but does NOT mean the message has been processed at the remote 880 * end by a receiver. 881 * 882 * If this routine returns an error, the caller's function will NOT be called. 883 * 884 * Arguments: 885 * 886 * partid - ID of partition to which the channel is connected. 887 * ch_number - channel # to send message on. 888 * flags - see xp.h for valid flags. 889 * payload - pointer to the payload which is to be sent. 890 * payload_size - size of the payload in bytes. 891 * func - function to call with asynchronous notification of message 892 * receipt. THIS FUNCTION MUST BE NON-BLOCKING. 893 * key - user-defined key to be passed to the function when it's called. 894 */ 895 enum xp_retval 896 xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload, 897 u16 payload_size, xpc_notify_func func, void *key) 898 { 899 struct xpc_partition *part = &xpc_partitions[partid]; 900 enum xp_retval ret = xpUnknownReason; 901 902 dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload, 903 partid, ch_number); 904 905 DBUG_ON(partid < 0 || partid >= xp_max_npartitions); 906 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 907 DBUG_ON(payload == NULL); 908 DBUG_ON(func == NULL); 909 910 if (xpc_part_ref(part)) { 911 ret = xpc_send_payload(&part->channels[ch_number], flags, 912 payload, payload_size, XPC_N_CALL, func, 913 key); 914 xpc_part_deref(part); 915 } 916 return ret; 917 } 918 919 /* 920 * Deliver a message's payload to its intended recipient. 921 */ 922 void 923 xpc_deliver_payload(struct xpc_channel *ch) 924 { 925 void *payload; 926 927 payload = xpc_get_deliverable_payload(ch); 928 if (payload != NULL) { 929 930 /* 931 * This ref is taken to protect the payload itself from being 932 * freed before the user is finished with it, which the user 933 * indicates by calling xpc_initiate_received(). 934 */ 935 xpc_msgqueue_ref(ch); 936 937 atomic_inc(&ch->kthreads_active); 938 939 if (ch->func != NULL) { 940 dev_dbg(xpc_chan, "ch->func() called, payload=0x%p " 941 "partid=%d channel=%d\n", payload, ch->partid, 942 ch->number); 943 944 /* deliver the message to its intended recipient */ 945 ch->func(xpMsgReceived, ch->partid, ch->number, payload, 946 ch->key); 947 948 dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p " 949 "partid=%d channel=%d\n", payload, ch->partid, 950 ch->number); 951 } 952 953 atomic_dec(&ch->kthreads_active); 954 } 955 } 956 957 /* 958 * Acknowledge receipt of a delivered message's payload. 959 * 960 * This function, although called by users, does not call xpc_part_ref() to 961 * ensure that the partition infrastructure is in place. It relies on the 962 * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload(). 963 * 964 * Arguments: 965 * 966 * partid - ID of partition to which the channel is connected. 967 * ch_number - channel # message received on. 968 * payload - pointer to the payload area allocated via 969 * xpc_initiate_send() or xpc_initiate_send_notify(). 970 */ 971 void 972 xpc_initiate_received(short partid, int ch_number, void *payload) 973 { 974 struct xpc_partition *part = &xpc_partitions[partid]; 975 struct xpc_channel *ch; 976 977 DBUG_ON(partid < 0 || partid >= xp_max_npartitions); 978 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 979 980 ch = &part->channels[ch_number]; 981 xpc_received_payload(ch, payload); 982 983 /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */ 984 xpc_msgqueue_deref(ch); 985 } 986