1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. 7 */ 8 9 /* 10 * Cross Partition Communication (XPC) channel support. 11 * 12 * This is the part of XPC that manages the channels and 13 * sends/receives messages across them to/from other partitions. 14 * 15 */ 16 17 #include <linux/device.h> 18 #include "xpc.h" 19 20 /* 21 * Process a connect message from a remote partition. 22 * 23 * Note: xpc_process_connect() is expecting to be called with the 24 * spin_lock_irqsave held and will leave it locked upon return. 25 */ 26 static void 27 xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) 28 { 29 enum xp_retval ret; 30 31 DBUG_ON(!spin_is_locked(&ch->lock)); 32 33 if (!(ch->flags & XPC_C_OPENREQUEST) || 34 !(ch->flags & XPC_C_ROPENREQUEST)) { 35 /* nothing more to do for now */ 36 return; 37 } 38 DBUG_ON(!(ch->flags & XPC_C_CONNECTING)); 39 40 if (!(ch->flags & XPC_C_SETUP)) { 41 spin_unlock_irqrestore(&ch->lock, *irq_flags); 42 ret = xpc_setup_msg_structures(ch); 43 spin_lock_irqsave(&ch->lock, *irq_flags); 44 45 if (ret != xpSuccess) 46 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); 47 48 ch->flags |= XPC_C_SETUP; 49 50 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) 51 return; 52 53 DBUG_ON(ch->local_msgqueue == NULL); 54 DBUG_ON(ch->remote_msgqueue == NULL); 55 } 56 57 if (!(ch->flags & XPC_C_OPENREPLY)) { 58 ch->flags |= XPC_C_OPENREPLY; 59 xpc_send_chctl_openreply(ch, irq_flags); 60 } 61 62 if (!(ch->flags & XPC_C_ROPENREPLY)) 63 return; 64 65 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ 66 67 dev_info(xpc_chan, "channel %d to partition %d connected\n", 68 ch->number, ch->partid); 69 70 spin_unlock_irqrestore(&ch->lock, *irq_flags); 71 xpc_create_kthreads(ch, 1, 0); 72 spin_lock_irqsave(&ch->lock, *irq_flags); 73 } 74 75 /* 76 * spin_lock_irqsave() is expected to be held on entry. 77 */ 78 static void 79 xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) 80 { 81 struct xpc_partition *part = &xpc_partitions[ch->partid]; 82 u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED); 83 84 DBUG_ON(!spin_is_locked(&ch->lock)); 85 86 if (!(ch->flags & XPC_C_DISCONNECTING)) 87 return; 88 89 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); 90 91 /* make sure all activity has settled down first */ 92 93 if (atomic_read(&ch->kthreads_assigned) > 0 || 94 atomic_read(&ch->references) > 0) { 95 return; 96 } 97 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 98 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); 99 100 if (part->act_state == XPC_P_AS_DEACTIVATING) { 101 /* can't proceed until the other side disengages from us */ 102 if (xpc_partition_engaged(ch->partid)) 103 return; 104 105 } else { 106 107 /* as long as the other side is up do the full protocol */ 108 109 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) 110 return; 111 112 if (!(ch->flags & XPC_C_CLOSEREPLY)) { 113 ch->flags |= XPC_C_CLOSEREPLY; 114 xpc_send_chctl_closereply(ch, irq_flags); 115 } 116 117 if (!(ch->flags & XPC_C_RCLOSEREPLY)) 118 return; 119 } 120 121 /* wake those waiting for notify completion */ 122 if (atomic_read(&ch->n_to_notify) > 0) { 123 /* we do callout while holding ch->lock, callout can't block */ 124 xpc_notify_senders_of_disconnect(ch); 125 } 126 127 /* both sides are disconnected now */ 128 129 if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) { 130 spin_unlock_irqrestore(&ch->lock, *irq_flags); 131 xpc_disconnect_callout(ch, xpDisconnected); 132 spin_lock_irqsave(&ch->lock, *irq_flags); 133 } 134 135 DBUG_ON(atomic_read(&ch->n_to_notify) != 0); 136 137 /* it's now safe to free the channel's message queues */ 138 xpc_teardown_msg_structures(ch); 139 140 ch->func = NULL; 141 ch->key = NULL; 142 ch->entry_size = 0; 143 ch->local_nentries = 0; 144 ch->remote_nentries = 0; 145 ch->kthreads_assigned_limit = 0; 146 ch->kthreads_idle_limit = 0; 147 148 /* 149 * Mark the channel disconnected and clear all other flags, including 150 * XPC_C_SETUP (because of call to xpc_teardown_msg_structures()) but 151 * not including XPC_C_WDISCONNECT (if it was set). 152 */ 153 ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT)); 154 155 atomic_dec(&part->nchannels_active); 156 157 if (channel_was_connected) { 158 dev_info(xpc_chan, "channel %d to partition %d disconnected, " 159 "reason=%d\n", ch->number, ch->partid, ch->reason); 160 } 161 162 if (ch->flags & XPC_C_WDISCONNECT) { 163 /* we won't lose the CPU since we're holding ch->lock */ 164 complete(&ch->wdisconnect_wait); 165 } else if (ch->delayed_chctl_flags) { 166 if (part->act_state != XPC_P_AS_DEACTIVATING) { 167 /* time to take action on any delayed chctl flags */ 168 spin_lock(&part->chctl_lock); 169 part->chctl.flags[ch->number] |= 170 ch->delayed_chctl_flags; 171 spin_unlock(&part->chctl_lock); 172 } 173 ch->delayed_chctl_flags = 0; 174 } 175 } 176 177 /* 178 * Process a change in the channel's remote connection state. 179 */ 180 static void 181 xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number, 182 u8 chctl_flags) 183 { 184 unsigned long irq_flags; 185 struct xpc_openclose_args *args = 186 &part->remote_openclose_args[ch_number]; 187 struct xpc_channel *ch = &part->channels[ch_number]; 188 enum xp_retval reason; 189 190 spin_lock_irqsave(&ch->lock, irq_flags); 191 192 again: 193 194 if ((ch->flags & XPC_C_DISCONNECTED) && 195 (ch->flags & XPC_C_WDISCONNECT)) { 196 /* 197 * Delay processing chctl flags until thread waiting disconnect 198 * has had a chance to see that the channel is disconnected. 199 */ 200 ch->delayed_chctl_flags |= chctl_flags; 201 spin_unlock_irqrestore(&ch->lock, irq_flags); 202 return; 203 } 204 205 if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) { 206 207 dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received " 208 "from partid=%d, channel=%d\n", args->reason, 209 ch->partid, ch->number); 210 211 /* 212 * If RCLOSEREQUEST is set, we're probably waiting for 213 * RCLOSEREPLY. We should find it and a ROPENREQUEST packed 214 * with this RCLOSEREQUEST in the chctl_flags. 215 */ 216 217 if (ch->flags & XPC_C_RCLOSEREQUEST) { 218 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); 219 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); 220 DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY)); 221 DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY); 222 223 DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY)); 224 chctl_flags &= ~XPC_CHCTL_CLOSEREPLY; 225 ch->flags |= XPC_C_RCLOSEREPLY; 226 227 /* both sides have finished disconnecting */ 228 xpc_process_disconnect(ch, &irq_flags); 229 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); 230 goto again; 231 } 232 233 if (ch->flags & XPC_C_DISCONNECTED) { 234 if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) { 235 if (part->chctl.flags[ch_number] & 236 XPC_CHCTL_OPENREQUEST) { 237 238 DBUG_ON(ch->delayed_chctl_flags != 0); 239 spin_lock(&part->chctl_lock); 240 part->chctl.flags[ch_number] |= 241 XPC_CHCTL_CLOSEREQUEST; 242 spin_unlock(&part->chctl_lock); 243 } 244 spin_unlock_irqrestore(&ch->lock, irq_flags); 245 return; 246 } 247 248 XPC_SET_REASON(ch, 0, 0); 249 ch->flags &= ~XPC_C_DISCONNECTED; 250 251 atomic_inc(&part->nchannels_active); 252 ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); 253 } 254 255 chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY); 256 257 /* 258 * The meaningful CLOSEREQUEST connection state fields are: 259 * reason = reason connection is to be closed 260 */ 261 262 ch->flags |= XPC_C_RCLOSEREQUEST; 263 264 if (!(ch->flags & XPC_C_DISCONNECTING)) { 265 reason = args->reason; 266 if (reason <= xpSuccess || reason > xpUnknownReason) 267 reason = xpUnknownReason; 268 else if (reason == xpUnregistering) 269 reason = xpOtherUnregistering; 270 271 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); 272 273 DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY); 274 spin_unlock_irqrestore(&ch->lock, irq_flags); 275 return; 276 } 277 278 xpc_process_disconnect(ch, &irq_flags); 279 } 280 281 if (chctl_flags & XPC_CHCTL_CLOSEREPLY) { 282 283 dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid=" 284 "%d, channel=%d\n", ch->partid, ch->number); 285 286 if (ch->flags & XPC_C_DISCONNECTED) { 287 DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING); 288 spin_unlock_irqrestore(&ch->lock, irq_flags); 289 return; 290 } 291 292 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); 293 294 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { 295 if (part->chctl.flags[ch_number] & 296 XPC_CHCTL_CLOSEREQUEST) { 297 298 DBUG_ON(ch->delayed_chctl_flags != 0); 299 spin_lock(&part->chctl_lock); 300 part->chctl.flags[ch_number] |= 301 XPC_CHCTL_CLOSEREPLY; 302 spin_unlock(&part->chctl_lock); 303 } 304 spin_unlock_irqrestore(&ch->lock, irq_flags); 305 return; 306 } 307 308 ch->flags |= XPC_C_RCLOSEREPLY; 309 310 if (ch->flags & XPC_C_CLOSEREPLY) { 311 /* both sides have finished disconnecting */ 312 xpc_process_disconnect(ch, &irq_flags); 313 } 314 } 315 316 if (chctl_flags & XPC_CHCTL_OPENREQUEST) { 317 318 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, " 319 "local_nentries=%d) received from partid=%d, " 320 "channel=%d\n", args->entry_size, args->local_nentries, 321 ch->partid, ch->number); 322 323 if (part->act_state == XPC_P_AS_DEACTIVATING || 324 (ch->flags & XPC_C_ROPENREQUEST)) { 325 spin_unlock_irqrestore(&ch->lock, irq_flags); 326 return; 327 } 328 329 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) { 330 ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST; 331 spin_unlock_irqrestore(&ch->lock, irq_flags); 332 return; 333 } 334 DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | 335 XPC_C_OPENREQUEST))); 336 DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | 337 XPC_C_OPENREPLY | XPC_C_CONNECTED)); 338 339 /* 340 * The meaningful OPENREQUEST connection state fields are: 341 * entry_size = size of channel's messages in bytes 342 * local_nentries = remote partition's local_nentries 343 */ 344 if (args->entry_size == 0 || args->local_nentries == 0) { 345 /* assume OPENREQUEST was delayed by mistake */ 346 spin_unlock_irqrestore(&ch->lock, irq_flags); 347 return; 348 } 349 350 ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); 351 ch->remote_nentries = args->local_nentries; 352 353 if (ch->flags & XPC_C_OPENREQUEST) { 354 if (args->entry_size != ch->entry_size) { 355 XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, 356 &irq_flags); 357 spin_unlock_irqrestore(&ch->lock, irq_flags); 358 return; 359 } 360 } else { 361 ch->entry_size = args->entry_size; 362 363 XPC_SET_REASON(ch, 0, 0); 364 ch->flags &= ~XPC_C_DISCONNECTED; 365 366 atomic_inc(&part->nchannels_active); 367 } 368 369 xpc_process_connect(ch, &irq_flags); 370 } 371 372 if (chctl_flags & XPC_CHCTL_OPENREPLY) { 373 374 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa=" 375 "0x%lx, local_nentries=%d, remote_nentries=%d) " 376 "received from partid=%d, channel=%d\n", 377 args->local_msgqueue_pa, args->local_nentries, 378 args->remote_nentries, ch->partid, ch->number); 379 380 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { 381 spin_unlock_irqrestore(&ch->lock, irq_flags); 382 return; 383 } 384 if (!(ch->flags & XPC_C_OPENREQUEST)) { 385 XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, 386 &irq_flags); 387 spin_unlock_irqrestore(&ch->lock, irq_flags); 388 return; 389 } 390 391 DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); 392 DBUG_ON(ch->flags & XPC_C_CONNECTED); 393 394 /* 395 * The meaningful OPENREPLY connection state fields are: 396 * local_msgqueue_pa = physical address of remote 397 * partition's local_msgqueue 398 * local_nentries = remote partition's local_nentries 399 * remote_nentries = remote partition's remote_nentries 400 */ 401 DBUG_ON(args->local_msgqueue_pa == 0); 402 DBUG_ON(args->local_nentries == 0); 403 DBUG_ON(args->remote_nentries == 0); 404 405 ch->flags |= XPC_C_ROPENREPLY; 406 xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa); 407 408 if (args->local_nentries < ch->remote_nentries) { 409 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new " 410 "remote_nentries=%d, old remote_nentries=%d, " 411 "partid=%d, channel=%d\n", 412 args->local_nentries, ch->remote_nentries, 413 ch->partid, ch->number); 414 415 ch->remote_nentries = args->local_nentries; 416 } 417 if (args->remote_nentries < ch->local_nentries) { 418 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new " 419 "local_nentries=%d, old local_nentries=%d, " 420 "partid=%d, channel=%d\n", 421 args->remote_nentries, ch->local_nentries, 422 ch->partid, ch->number); 423 424 ch->local_nentries = args->remote_nentries; 425 } 426 427 xpc_process_connect(ch, &irq_flags); 428 } 429 430 spin_unlock_irqrestore(&ch->lock, irq_flags); 431 } 432 433 /* 434 * Attempt to establish a channel connection to a remote partition. 435 */ 436 static enum xp_retval 437 xpc_connect_channel(struct xpc_channel *ch) 438 { 439 unsigned long irq_flags; 440 struct xpc_registration *registration = &xpc_registrations[ch->number]; 441 442 if (mutex_trylock(®istration->mutex) == 0) 443 return xpRetry; 444 445 if (!XPC_CHANNEL_REGISTERED(ch->number)) { 446 mutex_unlock(®istration->mutex); 447 return xpUnregistered; 448 } 449 450 spin_lock_irqsave(&ch->lock, irq_flags); 451 452 DBUG_ON(ch->flags & XPC_C_CONNECTED); 453 DBUG_ON(ch->flags & XPC_C_OPENREQUEST); 454 455 if (ch->flags & XPC_C_DISCONNECTING) { 456 spin_unlock_irqrestore(&ch->lock, irq_flags); 457 mutex_unlock(®istration->mutex); 458 return ch->reason; 459 } 460 461 /* add info from the channel connect registration to the channel */ 462 463 ch->kthreads_assigned_limit = registration->assigned_limit; 464 ch->kthreads_idle_limit = registration->idle_limit; 465 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); 466 DBUG_ON(atomic_read(&ch->kthreads_idle) != 0); 467 DBUG_ON(atomic_read(&ch->kthreads_active) != 0); 468 469 ch->func = registration->func; 470 DBUG_ON(registration->func == NULL); 471 ch->key = registration->key; 472 473 ch->local_nentries = registration->nentries; 474 475 if (ch->flags & XPC_C_ROPENREQUEST) { 476 if (registration->entry_size != ch->entry_size) { 477 /* the local and remote sides aren't the same */ 478 479 /* 480 * Because XPC_DISCONNECT_CHANNEL() can block we're 481 * forced to up the registration sema before we unlock 482 * the channel lock. But that's okay here because we're 483 * done with the part that required the registration 484 * sema. XPC_DISCONNECT_CHANNEL() requires that the 485 * channel lock be locked and will unlock and relock 486 * the channel lock as needed. 487 */ 488 mutex_unlock(®istration->mutex); 489 XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, 490 &irq_flags); 491 spin_unlock_irqrestore(&ch->lock, irq_flags); 492 return xpUnequalMsgSizes; 493 } 494 } else { 495 ch->entry_size = registration->entry_size; 496 497 XPC_SET_REASON(ch, 0, 0); 498 ch->flags &= ~XPC_C_DISCONNECTED; 499 500 atomic_inc(&xpc_partitions[ch->partid].nchannels_active); 501 } 502 503 mutex_unlock(®istration->mutex); 504 505 /* initiate the connection */ 506 507 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); 508 xpc_send_chctl_openrequest(ch, &irq_flags); 509 510 xpc_process_connect(ch, &irq_flags); 511 512 spin_unlock_irqrestore(&ch->lock, irq_flags); 513 514 return xpSuccess; 515 } 516 517 void 518 xpc_process_sent_chctl_flags(struct xpc_partition *part) 519 { 520 unsigned long irq_flags; 521 union xpc_channel_ctl_flags chctl; 522 struct xpc_channel *ch; 523 int ch_number; 524 u32 ch_flags; 525 526 chctl.all_flags = xpc_get_chctl_all_flags(part); 527 528 /* 529 * Initiate channel connections for registered channels. 530 * 531 * For each connected channel that has pending messages activate idle 532 * kthreads and/or create new kthreads as needed. 533 */ 534 535 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 536 ch = &part->channels[ch_number]; 537 538 /* 539 * Process any open or close related chctl flags, and then deal 540 * with connecting or disconnecting the channel as required. 541 */ 542 543 if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) { 544 xpc_process_openclose_chctl_flags(part, ch_number, 545 chctl.flags[ch_number]); 546 } 547 548 ch_flags = ch->flags; /* need an atomic snapshot of flags */ 549 550 if (ch_flags & XPC_C_DISCONNECTING) { 551 spin_lock_irqsave(&ch->lock, irq_flags); 552 xpc_process_disconnect(ch, &irq_flags); 553 spin_unlock_irqrestore(&ch->lock, irq_flags); 554 continue; 555 } 556 557 if (part->act_state == XPC_P_AS_DEACTIVATING) 558 continue; 559 560 if (!(ch_flags & XPC_C_CONNECTED)) { 561 if (!(ch_flags & XPC_C_OPENREQUEST)) { 562 DBUG_ON(ch_flags & XPC_C_SETUP); 563 (void)xpc_connect_channel(ch); 564 } else { 565 spin_lock_irqsave(&ch->lock, irq_flags); 566 xpc_process_connect(ch, &irq_flags); 567 spin_unlock_irqrestore(&ch->lock, irq_flags); 568 } 569 continue; 570 } 571 572 /* 573 * Process any message related chctl flags, this may involve 574 * the activation of kthreads to deliver any pending messages 575 * sent from the other partition. 576 */ 577 578 if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS) 579 xpc_process_msg_chctl_flags(part, ch_number); 580 } 581 } 582 583 /* 584 * XPC's heartbeat code calls this function to inform XPC that a partition is 585 * going down. XPC responds by tearing down the XPartition Communication 586 * infrastructure used for the just downed partition. 587 * 588 * XPC's heartbeat code will never call this function and xpc_partition_up() 589 * at the same time. Nor will it ever make multiple calls to either function 590 * at the same time. 591 */ 592 void 593 xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason) 594 { 595 unsigned long irq_flags; 596 int ch_number; 597 struct xpc_channel *ch; 598 599 dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", 600 XPC_PARTID(part), reason); 601 602 if (!xpc_part_ref(part)) { 603 /* infrastructure for this partition isn't currently set up */ 604 return; 605 } 606 607 /* disconnect channels associated with the partition going down */ 608 609 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 610 ch = &part->channels[ch_number]; 611 612 xpc_msgqueue_ref(ch); 613 spin_lock_irqsave(&ch->lock, irq_flags); 614 615 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); 616 617 spin_unlock_irqrestore(&ch->lock, irq_flags); 618 xpc_msgqueue_deref(ch); 619 } 620 621 xpc_wakeup_channel_mgr(part); 622 623 xpc_part_deref(part); 624 } 625 626 /* 627 * Called by XP at the time of channel connection registration to cause 628 * XPC to establish connections to all currently active partitions. 629 */ 630 void 631 xpc_initiate_connect(int ch_number) 632 { 633 short partid; 634 struct xpc_partition *part; 635 struct xpc_channel *ch; 636 637 DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); 638 639 for (partid = 0; partid < xp_max_npartitions; partid++) { 640 part = &xpc_partitions[partid]; 641 642 if (xpc_part_ref(part)) { 643 ch = &part->channels[ch_number]; 644 645 /* 646 * Initiate the establishment of a connection on the 647 * newly registered channel to the remote partition. 648 */ 649 xpc_wakeup_channel_mgr(part); 650 xpc_part_deref(part); 651 } 652 } 653 } 654 655 void 656 xpc_connected_callout(struct xpc_channel *ch) 657 { 658 /* let the registerer know that a connection has been established */ 659 660 if (ch->func != NULL) { 661 dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, " 662 "partid=%d, channel=%d\n", ch->partid, ch->number); 663 664 ch->func(xpConnected, ch->partid, ch->number, 665 (void *)(u64)ch->local_nentries, ch->key); 666 667 dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, " 668 "partid=%d, channel=%d\n", ch->partid, ch->number); 669 } 670 } 671 672 /* 673 * Called by XP at the time of channel connection unregistration to cause 674 * XPC to teardown all current connections for the specified channel. 675 * 676 * Before returning xpc_initiate_disconnect() will wait until all connections 677 * on the specified channel have been closed/torndown. So the caller can be 678 * assured that they will not be receiving any more callouts from XPC to the 679 * function they registered via xpc_connect(). 680 * 681 * Arguments: 682 * 683 * ch_number - channel # to unregister. 684 */ 685 void 686 xpc_initiate_disconnect(int ch_number) 687 { 688 unsigned long irq_flags; 689 short partid; 690 struct xpc_partition *part; 691 struct xpc_channel *ch; 692 693 DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); 694 695 /* initiate the channel disconnect for every active partition */ 696 for (partid = 0; partid < xp_max_npartitions; partid++) { 697 part = &xpc_partitions[partid]; 698 699 if (xpc_part_ref(part)) { 700 ch = &part->channels[ch_number]; 701 xpc_msgqueue_ref(ch); 702 703 spin_lock_irqsave(&ch->lock, irq_flags); 704 705 if (!(ch->flags & XPC_C_DISCONNECTED)) { 706 ch->flags |= XPC_C_WDISCONNECT; 707 708 XPC_DISCONNECT_CHANNEL(ch, xpUnregistering, 709 &irq_flags); 710 } 711 712 spin_unlock_irqrestore(&ch->lock, irq_flags); 713 714 xpc_msgqueue_deref(ch); 715 xpc_part_deref(part); 716 } 717 } 718 719 xpc_disconnect_wait(ch_number); 720 } 721 722 /* 723 * To disconnect a channel, and reflect it back to all who may be waiting. 724 * 725 * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by 726 * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by 727 * xpc_disconnect_wait(). 728 * 729 * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN. 730 */ 731 void 732 xpc_disconnect_channel(const int line, struct xpc_channel *ch, 733 enum xp_retval reason, unsigned long *irq_flags) 734 { 735 u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); 736 737 DBUG_ON(!spin_is_locked(&ch->lock)); 738 739 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) 740 return; 741 742 DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); 743 744 dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", 745 reason, line, ch->partid, ch->number); 746 747 XPC_SET_REASON(ch, reason, line); 748 749 ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); 750 /* some of these may not have been set */ 751 ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | 752 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | 753 XPC_C_CONNECTING | XPC_C_CONNECTED); 754 755 xpc_send_chctl_closerequest(ch, irq_flags); 756 757 if (channel_was_connected) 758 ch->flags |= XPC_C_WASCONNECTED; 759 760 spin_unlock_irqrestore(&ch->lock, *irq_flags); 761 762 /* wake all idle kthreads so they can exit */ 763 if (atomic_read(&ch->kthreads_idle) > 0) { 764 wake_up_all(&ch->idle_wq); 765 766 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 767 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { 768 /* start a kthread that will do the xpDisconnecting callout */ 769 xpc_create_kthreads(ch, 1, 1); 770 } 771 772 /* wake those waiting to allocate an entry from the local msg queue */ 773 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) 774 wake_up(&ch->msg_allocate_wq); 775 776 spin_lock_irqsave(&ch->lock, *irq_flags); 777 } 778 779 void 780 xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason) 781 { 782 /* 783 * Let the channel's registerer know that the channel is being 784 * disconnected. We don't want to do this if the registerer was never 785 * informed of a connection being made. 786 */ 787 788 if (ch->func != NULL) { 789 dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, " 790 "channel=%d\n", reason, ch->partid, ch->number); 791 792 ch->func(reason, ch->partid, ch->number, NULL, ch->key); 793 794 dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, " 795 "channel=%d\n", reason, ch->partid, ch->number); 796 } 797 } 798 799 /* 800 * Wait for a message entry to become available for the specified channel, 801 * but don't wait any longer than 1 jiffy. 802 */ 803 enum xp_retval 804 xpc_allocate_msg_wait(struct xpc_channel *ch) 805 { 806 enum xp_retval ret; 807 808 if (ch->flags & XPC_C_DISCONNECTING) { 809 DBUG_ON(ch->reason == xpInterrupted); 810 return ch->reason; 811 } 812 813 atomic_inc(&ch->n_on_msg_allocate_wq); 814 ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); 815 atomic_dec(&ch->n_on_msg_allocate_wq); 816 817 if (ch->flags & XPC_C_DISCONNECTING) { 818 ret = ch->reason; 819 DBUG_ON(ch->reason == xpInterrupted); 820 } else if (ret == 0) { 821 ret = xpTimeout; 822 } else { 823 ret = xpInterrupted; 824 } 825 826 return ret; 827 } 828 829 /* 830 * Send a message that contains the user's payload on the specified channel 831 * connected to the specified partition. 832 * 833 * NOTE that this routine can sleep waiting for a message entry to become 834 * available. To not sleep, pass in the XPC_NOWAIT flag. 835 * 836 * Once sent, this routine will not wait for the message to be received, nor 837 * will notification be given when it does happen. 838 * 839 * Arguments: 840 * 841 * partid - ID of partition to which the channel is connected. 842 * ch_number - channel # to send message on. 843 * flags - see xp.h for valid flags. 844 * payload - pointer to the payload which is to be sent. 845 * payload_size - size of the payload in bytes. 846 */ 847 enum xp_retval 848 xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload, 849 u16 payload_size) 850 { 851 struct xpc_partition *part = &xpc_partitions[partid]; 852 enum xp_retval ret = xpUnknownReason; 853 854 dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload, 855 partid, ch_number); 856 857 DBUG_ON(partid < 0 || partid >= xp_max_npartitions); 858 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 859 DBUG_ON(payload == NULL); 860 861 if (xpc_part_ref(part)) { 862 ret = xpc_send_payload(&part->channels[ch_number], flags, 863 payload, payload_size, 0, NULL, NULL); 864 xpc_part_deref(part); 865 } 866 867 return ret; 868 } 869 870 /* 871 * Send a message that contains the user's payload on the specified channel 872 * connected to the specified partition. 873 * 874 * NOTE that this routine can sleep waiting for a message entry to become 875 * available. To not sleep, pass in the XPC_NOWAIT flag. 876 * 877 * This routine will not wait for the message to be sent or received. 878 * 879 * Once the remote end of the channel has received the message, the function 880 * passed as an argument to xpc_initiate_send_notify() will be called. This 881 * allows the sender to free up or re-use any buffers referenced by the 882 * message, but does NOT mean the message has been processed at the remote 883 * end by a receiver. 884 * 885 * If this routine returns an error, the caller's function will NOT be called. 886 * 887 * Arguments: 888 * 889 * partid - ID of partition to which the channel is connected. 890 * ch_number - channel # to send message on. 891 * flags - see xp.h for valid flags. 892 * payload - pointer to the payload which is to be sent. 893 * payload_size - size of the payload in bytes. 894 * func - function to call with asynchronous notification of message 895 * receipt. THIS FUNCTION MUST BE NON-BLOCKING. 896 * key - user-defined key to be passed to the function when it's called. 897 */ 898 enum xp_retval 899 xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload, 900 u16 payload_size, xpc_notify_func func, void *key) 901 { 902 struct xpc_partition *part = &xpc_partitions[partid]; 903 enum xp_retval ret = xpUnknownReason; 904 905 dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload, 906 partid, ch_number); 907 908 DBUG_ON(partid < 0 || partid >= xp_max_npartitions); 909 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 910 DBUG_ON(payload == NULL); 911 DBUG_ON(func == NULL); 912 913 if (xpc_part_ref(part)) { 914 ret = xpc_send_payload(&part->channels[ch_number], flags, 915 payload, payload_size, XPC_N_CALL, func, 916 key); 917 xpc_part_deref(part); 918 } 919 return ret; 920 } 921 922 /* 923 * Deliver a message's payload to its intended recipient. 924 */ 925 void 926 xpc_deliver_payload(struct xpc_channel *ch) 927 { 928 void *payload; 929 930 payload = xpc_get_deliverable_payload(ch); 931 if (payload != NULL) { 932 933 /* 934 * This ref is taken to protect the payload itself from being 935 * freed before the user is finished with it, which the user 936 * indicates by calling xpc_initiate_received(). 937 */ 938 xpc_msgqueue_ref(ch); 939 940 atomic_inc(&ch->kthreads_active); 941 942 if (ch->func != NULL) { 943 dev_dbg(xpc_chan, "ch->func() called, payload=0x%p " 944 "partid=%d channel=%d\n", payload, ch->partid, 945 ch->number); 946 947 /* deliver the message to its intended recipient */ 948 ch->func(xpMsgReceived, ch->partid, ch->number, payload, 949 ch->key); 950 951 dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p " 952 "partid=%d channel=%d\n", payload, ch->partid, 953 ch->number); 954 } 955 956 atomic_dec(&ch->kthreads_active); 957 } 958 } 959 960 /* 961 * Acknowledge receipt of a delivered message's payload. 962 * 963 * This function, although called by users, does not call xpc_part_ref() to 964 * ensure that the partition infrastructure is in place. It relies on the 965 * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload(). 966 * 967 * Arguments: 968 * 969 * partid - ID of partition to which the channel is connected. 970 * ch_number - channel # message received on. 971 * payload - pointer to the payload area allocated via 972 * xpc_initiate_send() or xpc_initiate_send_notify(). 973 */ 974 void 975 xpc_initiate_received(short partid, int ch_number, void *payload) 976 { 977 struct xpc_partition *part = &xpc_partitions[partid]; 978 struct xpc_channel *ch; 979 980 DBUG_ON(partid < 0 || partid >= xp_max_npartitions); 981 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 982 983 ch = &part->channels[ch_number]; 984 xpc_received_payload(ch, payload); 985 986 /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */ 987 xpc_msgqueue_deref(ch); 988 } 989