1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 15 * Place - Suite 330, Boston, MA 02111-1307 USA. 16 * 17 * Authors: 18 * Haiyang Zhang <haiyangz@microsoft.com> 19 * Hank Janssen <hjanssen@microsoft.com> 20 */ 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/kernel.h> 24 #include <linux/sched.h> 25 #include <linux/wait.h> 26 #include <linux/delay.h> 27 #include <linux/mm.h> 28 #include <linux/slab.h> 29 #include <linux/list.h> 30 #include <linux/module.h> 31 #include <linux/completion.h> 32 #include <linux/hyperv.h> 33 34 #include "hyperv_vmbus.h" 35 36 struct vmbus_channel_message_table_entry { 37 enum vmbus_channel_message_type message_type; 38 void (*message_handler)(struct vmbus_channel_message_header *msg); 39 }; 40 41 struct vmbus_rescind_work { 42 struct work_struct work; 43 struct vmbus_channel *channel; 44 }; 45 46 /** 47 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message 48 * @icmsghdrp: Pointer to msg header structure 49 * @icmsg_negotiate: Pointer to negotiate message structure 50 * @buf: Raw buffer channel data 51 * 52 * @icmsghdrp is of type &struct icmsg_hdr. 53 * @negop is of type &struct icmsg_negotiate. 54 * Set up and fill in default negotiate response message. 55 * 56 * The fw_version specifies the framework version that 57 * we can support and srv_version specifies the service 58 * version we can support. 59 * 60 * Mainly used by Hyper-V drivers. 61 */ 62 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, 63 struct icmsg_negotiate *negop, u8 *buf, 64 int fw_version, int srv_version) 65 { 66 int icframe_major, icframe_minor; 67 int icmsg_major, icmsg_minor; 68 int fw_major, fw_minor; 69 int srv_major, srv_minor; 70 int i; 71 bool found_match = false; 72 73 icmsghdrp->icmsgsize = 0x10; 74 fw_major = (fw_version >> 16); 75 fw_minor = (fw_version & 0xFFFF); 76 77 srv_major = (srv_version >> 16); 78 srv_minor = (srv_version & 0xFFFF); 79 80 negop = (struct icmsg_negotiate *)&buf[ 81 sizeof(struct vmbuspipe_hdr) + 82 sizeof(struct icmsg_hdr)]; 83 84 icframe_major = negop->icframe_vercnt; 85 icframe_minor = 0; 86 87 icmsg_major = negop->icmsg_vercnt; 88 icmsg_minor = 0; 89 90 /* 91 * Select the framework version number we will 92 * support. 93 */ 94 95 for (i = 0; i < negop->icframe_vercnt; i++) { 96 if ((negop->icversion_data[i].major == fw_major) && 97 (negop->icversion_data[i].minor == fw_minor)) { 98 icframe_major = negop->icversion_data[i].major; 99 icframe_minor = negop->icversion_data[i].minor; 100 found_match = true; 101 } 102 } 103 104 if (!found_match) 105 goto fw_error; 106 107 found_match = false; 108 109 for (i = negop->icframe_vercnt; 110 (i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) { 111 if ((negop->icversion_data[i].major == srv_major) && 112 (negop->icversion_data[i].minor == srv_minor)) { 113 icmsg_major = negop->icversion_data[i].major; 114 icmsg_minor = negop->icversion_data[i].minor; 115 found_match = true; 116 } 117 } 118 119 /* 120 * Respond with the framework and service 121 * version numbers we can support. 122 */ 123 124 fw_error: 125 if (!found_match) { 126 negop->icframe_vercnt = 0; 127 negop->icmsg_vercnt = 0; 128 } else { 129 negop->icframe_vercnt = 1; 130 negop->icmsg_vercnt = 1; 131 } 132 133 negop->icversion_data[0].major = icframe_major; 134 negop->icversion_data[0].minor = icframe_minor; 135 negop->icversion_data[1].major = icmsg_major; 136 negop->icversion_data[1].minor = icmsg_minor; 137 return found_match; 138 } 139 140 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp); 141 142 static void vmbus_sc_creation_cb(struct work_struct *work) 143 { 144 struct vmbus_channel *newchannel = container_of(work, 145 struct vmbus_channel, 146 work); 147 struct vmbus_channel *primary_channel = newchannel->primary_channel; 148 149 /* 150 * On entry sc_creation_callback has been already verified to 151 * be non-NULL. 152 */ 153 primary_channel->sc_creation_callback(newchannel); 154 } 155 156 /* 157 * alloc_channel - Allocate and initialize a vmbus channel object 158 */ 159 static struct vmbus_channel *alloc_channel(void) 160 { 161 static atomic_t chan_num = ATOMIC_INIT(0); 162 struct vmbus_channel *channel; 163 164 channel = kzalloc(sizeof(*channel), GFP_ATOMIC); 165 if (!channel) 166 return NULL; 167 168 channel->id = atomic_inc_return(&chan_num); 169 spin_lock_init(&channel->inbound_lock); 170 spin_lock_init(&channel->lock); 171 172 INIT_LIST_HEAD(&channel->sc_list); 173 INIT_LIST_HEAD(&channel->percpu_list); 174 175 channel->controlwq = alloc_workqueue("hv_vmbus_ctl/%d", WQ_MEM_RECLAIM, 176 1, channel->id); 177 if (!channel->controlwq) { 178 kfree(channel); 179 return NULL; 180 } 181 182 return channel; 183 } 184 185 /* 186 * release_hannel - Release the vmbus channel object itself 187 */ 188 static void release_channel(struct work_struct *work) 189 { 190 struct vmbus_channel *channel = container_of(work, 191 struct vmbus_channel, 192 work); 193 194 destroy_workqueue(channel->controlwq); 195 196 kfree(channel); 197 } 198 199 /* 200 * free_channel - Release the resources used by the vmbus channel object 201 */ 202 static void free_channel(struct vmbus_channel *channel) 203 { 204 205 /* 206 * We have to release the channel's workqueue/thread in the vmbus's 207 * workqueue/thread context 208 * ie we can't destroy ourselves. 209 */ 210 INIT_WORK(&channel->work, release_channel); 211 queue_work(vmbus_connection.work_queue, &channel->work); 212 } 213 214 static void process_rescind_fn(struct work_struct *work) 215 { 216 struct vmbus_rescind_work *rc_work; 217 struct vmbus_channel *channel; 218 struct device *dev; 219 220 rc_work = container_of(work, struct vmbus_rescind_work, work); 221 channel = rc_work->channel; 222 223 /* 224 * We have already acquired a reference on the channel 225 * and so it cannot vanish underneath us. 226 * It is possible (while very unlikely) that we may 227 * get here while the processing of the initial offer 228 * is still not complete. Deal with this situation by 229 * just waiting until the channel is in the correct state. 230 */ 231 232 while (channel->work.func != release_channel) 233 msleep(1000); 234 235 if (channel->device_obj) { 236 dev = get_device(&channel->device_obj->device); 237 if (dev) { 238 vmbus_device_unregister(channel->device_obj); 239 put_device(dev); 240 } 241 } else { 242 hv_process_channel_removal(channel, 243 channel->offermsg.child_relid); 244 } 245 kfree(work); 246 } 247 248 static void percpu_channel_enq(void *arg) 249 { 250 struct vmbus_channel *channel = arg; 251 int cpu = smp_processor_id(); 252 253 list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]); 254 } 255 256 static void percpu_channel_deq(void *arg) 257 { 258 struct vmbus_channel *channel = arg; 259 260 list_del(&channel->percpu_list); 261 } 262 263 264 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) 265 { 266 struct vmbus_channel_relid_released msg; 267 unsigned long flags; 268 struct vmbus_channel *primary_channel; 269 270 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released)); 271 msg.child_relid = relid; 272 msg.header.msgtype = CHANNELMSG_RELID_RELEASED; 273 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); 274 275 if (channel == NULL) 276 return; 277 278 if (channel->target_cpu != get_cpu()) { 279 put_cpu(); 280 smp_call_function_single(channel->target_cpu, 281 percpu_channel_deq, channel, true); 282 } else { 283 percpu_channel_deq(channel); 284 put_cpu(); 285 } 286 287 if (channel->primary_channel == NULL) { 288 spin_lock_irqsave(&vmbus_connection.channel_lock, flags); 289 list_del(&channel->listentry); 290 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 291 } else { 292 primary_channel = channel->primary_channel; 293 spin_lock_irqsave(&primary_channel->lock, flags); 294 list_del(&channel->sc_list); 295 spin_unlock_irqrestore(&primary_channel->lock, flags); 296 } 297 free_channel(channel); 298 } 299 300 void vmbus_free_channels(void) 301 { 302 struct vmbus_channel *channel; 303 304 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { 305 vmbus_device_unregister(channel->device_obj); 306 free_channel(channel); 307 } 308 } 309 310 static void vmbus_do_device_register(struct work_struct *work) 311 { 312 struct hv_device *device_obj; 313 int ret; 314 unsigned long flags; 315 struct vmbus_channel *newchannel = container_of(work, 316 struct vmbus_channel, 317 work); 318 319 ret = vmbus_device_register(newchannel->device_obj); 320 if (ret != 0) { 321 pr_err("unable to add child device object (relid %d)\n", 322 newchannel->offermsg.child_relid); 323 spin_lock_irqsave(&vmbus_connection.channel_lock, flags); 324 list_del(&newchannel->listentry); 325 device_obj = newchannel->device_obj; 326 newchannel->device_obj = NULL; 327 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 328 329 if (newchannel->target_cpu != get_cpu()) { 330 put_cpu(); 331 smp_call_function_single(newchannel->target_cpu, 332 percpu_channel_deq, newchannel, true); 333 } else { 334 percpu_channel_deq(newchannel); 335 put_cpu(); 336 } 337 338 kfree(device_obj); 339 if (!newchannel->rescind) { 340 free_channel(newchannel); 341 return; 342 } 343 } 344 /* 345 * The next state for this channel is to be freed. 346 */ 347 INIT_WORK(&newchannel->work, release_channel); 348 } 349 350 /* 351 * vmbus_process_offer - Process the offer by creating a channel/device 352 * associated with this offer 353 */ 354 static void vmbus_process_offer(struct vmbus_channel *newchannel) 355 { 356 struct vmbus_channel *channel; 357 bool fnew = true; 358 bool enq = false; 359 unsigned long flags; 360 361 /* Make sure this is a new offer */ 362 spin_lock_irqsave(&vmbus_connection.channel_lock, flags); 363 364 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { 365 if (!uuid_le_cmp(channel->offermsg.offer.if_type, 366 newchannel->offermsg.offer.if_type) && 367 !uuid_le_cmp(channel->offermsg.offer.if_instance, 368 newchannel->offermsg.offer.if_instance)) { 369 fnew = false; 370 break; 371 } 372 } 373 374 if (fnew) { 375 list_add_tail(&newchannel->listentry, 376 &vmbus_connection.chn_list); 377 enq = true; 378 } 379 380 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 381 382 if (enq) { 383 if (newchannel->target_cpu != get_cpu()) { 384 put_cpu(); 385 smp_call_function_single(newchannel->target_cpu, 386 percpu_channel_enq, 387 newchannel, true); 388 } else { 389 percpu_channel_enq(newchannel); 390 put_cpu(); 391 } 392 } 393 if (!fnew) { 394 /* 395 * Check to see if this is a sub-channel. 396 */ 397 if (newchannel->offermsg.offer.sub_channel_index != 0) { 398 /* 399 * Process the sub-channel. 400 */ 401 newchannel->primary_channel = channel; 402 spin_lock_irqsave(&channel->lock, flags); 403 list_add_tail(&newchannel->sc_list, &channel->sc_list); 404 spin_unlock_irqrestore(&channel->lock, flags); 405 406 if (newchannel->target_cpu != get_cpu()) { 407 put_cpu(); 408 smp_call_function_single(newchannel->target_cpu, 409 percpu_channel_enq, 410 newchannel, true); 411 } else { 412 percpu_channel_enq(newchannel); 413 put_cpu(); 414 } 415 416 newchannel->state = CHANNEL_OPEN_STATE; 417 channel->num_sc++; 418 if (channel->sc_creation_callback != NULL) { 419 /* 420 * We need to invoke the sub-channel creation 421 * callback; invoke this in a seperate work 422 * context since we are currently running on 423 * the global work context in which we handle 424 * messages from the host. 425 */ 426 INIT_WORK(&newchannel->work, 427 vmbus_sc_creation_cb); 428 queue_work(newchannel->controlwq, 429 &newchannel->work); 430 } 431 432 return; 433 } 434 435 goto err_free_chan; 436 } 437 438 /* 439 * This state is used to indicate a successful open 440 * so that when we do close the channel normally, we 441 * can cleanup properly 442 */ 443 newchannel->state = CHANNEL_OPEN_STATE; 444 445 /* 446 * Start the process of binding this offer to the driver 447 * We need to set the DeviceObject field before calling 448 * vmbus_child_dev_add() 449 */ 450 newchannel->device_obj = vmbus_device_create( 451 &newchannel->offermsg.offer.if_type, 452 &newchannel->offermsg.offer.if_instance, 453 newchannel); 454 if (!newchannel->device_obj) 455 goto err_deq_chan; 456 457 /* 458 * Add the new device to the bus. This will kick off device-driver 459 * binding which eventually invokes the device driver's AddDevice() 460 * method. 461 * Invoke this call on the per-channel work context. 462 * Until we return from this function, rescind offer message 463 * cannot be processed as we are running on the global message 464 * handling work. 465 */ 466 INIT_WORK(&newchannel->work, vmbus_do_device_register); 467 queue_work(newchannel->controlwq, &newchannel->work); 468 return; 469 470 err_deq_chan: 471 spin_lock_irqsave(&vmbus_connection.channel_lock, flags); 472 list_del(&newchannel->listentry); 473 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 474 475 if (newchannel->target_cpu != get_cpu()) { 476 put_cpu(); 477 smp_call_function_single(newchannel->target_cpu, 478 percpu_channel_deq, newchannel, true); 479 } else { 480 percpu_channel_deq(newchannel); 481 put_cpu(); 482 } 483 484 err_free_chan: 485 free_channel(newchannel); 486 } 487 488 enum { 489 IDE = 0, 490 SCSI, 491 NIC, 492 MAX_PERF_CHN, 493 }; 494 495 /* 496 * This is an array of device_ids (device types) that are performance critical. 497 * We attempt to distribute the interrupt load for these devices across 498 * all available CPUs. 499 */ 500 static const struct hv_vmbus_device_id hp_devs[] = { 501 /* IDE */ 502 { HV_IDE_GUID, }, 503 /* Storage - SCSI */ 504 { HV_SCSI_GUID, }, 505 /* Network */ 506 { HV_NIC_GUID, }, 507 /* NetworkDirect Guest RDMA */ 508 { HV_ND_GUID, }, 509 }; 510 511 512 /* 513 * We use this state to statically distribute the channel interrupt load. 514 */ 515 static u32 next_vp; 516 517 /* 518 * Starting with Win8, we can statically distribute the incoming 519 * channel interrupt load by binding a channel to VCPU. We 520 * implement here a simple round robin scheme for distributing 521 * the interrupt load. 522 * We will bind channels that are not performance critical to cpu 0 and 523 * performance critical channels (IDE, SCSI and Network) will be uniformly 524 * distributed across all available CPUs. 525 */ 526 static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid) 527 { 528 u32 cur_cpu; 529 int i; 530 bool perf_chn = false; 531 u32 max_cpus = num_online_cpus(); 532 533 for (i = IDE; i < MAX_PERF_CHN; i++) { 534 if (!memcmp(type_guid->b, hp_devs[i].guid, 535 sizeof(uuid_le))) { 536 perf_chn = true; 537 break; 538 } 539 } 540 if ((vmbus_proto_version == VERSION_WS2008) || 541 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) { 542 /* 543 * Prior to win8, all channel interrupts are 544 * delivered on cpu 0. 545 * Also if the channel is not a performance critical 546 * channel, bind it to cpu 0. 547 */ 548 channel->target_cpu = 0; 549 channel->target_vp = 0; 550 return; 551 } 552 cur_cpu = (++next_vp % max_cpus); 553 channel->target_cpu = cur_cpu; 554 channel->target_vp = hv_context.vp_index[cur_cpu]; 555 } 556 557 /* 558 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition. 559 * 560 */ 561 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) 562 { 563 struct vmbus_channel_offer_channel *offer; 564 struct vmbus_channel *newchannel; 565 566 offer = (struct vmbus_channel_offer_channel *)hdr; 567 568 /* Allocate the channel object and save this offer. */ 569 newchannel = alloc_channel(); 570 if (!newchannel) { 571 pr_err("Unable to allocate channel object\n"); 572 return; 573 } 574 575 /* 576 * By default we setup state to enable batched 577 * reading. A specific service can choose to 578 * disable this prior to opening the channel. 579 */ 580 newchannel->batched_reading = true; 581 582 /* 583 * Setup state for signalling the host. 584 */ 585 newchannel->sig_event = (struct hv_input_signal_event *) 586 (ALIGN((unsigned long) 587 &newchannel->sig_buf, 588 HV_HYPERCALL_PARAM_ALIGN)); 589 590 newchannel->sig_event->connectionid.asu32 = 0; 591 newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID; 592 newchannel->sig_event->flag_number = 0; 593 newchannel->sig_event->rsvdz = 0; 594 595 if (vmbus_proto_version != VERSION_WS2008) { 596 newchannel->is_dedicated_interrupt = 597 (offer->is_dedicated_interrupt != 0); 598 newchannel->sig_event->connectionid.u.id = 599 offer->connection_id; 600 } 601 602 init_vp_index(newchannel, &offer->offer.if_type); 603 604 memcpy(&newchannel->offermsg, offer, 605 sizeof(struct vmbus_channel_offer_channel)); 606 newchannel->monitor_grp = (u8)offer->monitorid / 32; 607 newchannel->monitor_bit = (u8)offer->monitorid % 32; 608 609 vmbus_process_offer(newchannel); 610 } 611 612 /* 613 * vmbus_onoffer_rescind - Rescind offer handler. 614 * 615 * We queue a work item to process this offer synchronously 616 */ 617 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) 618 { 619 struct vmbus_channel_rescind_offer *rescind; 620 struct vmbus_channel *channel; 621 struct vmbus_rescind_work *rc_work; 622 623 rescind = (struct vmbus_channel_rescind_offer *)hdr; 624 channel = relid2channel(rescind->child_relid, true); 625 626 if (channel == NULL) { 627 hv_process_channel_removal(NULL, rescind->child_relid); 628 return; 629 } 630 631 /* 632 * We have acquired a reference on the channel and have posted 633 * the rescind state. Perform further cleanup in a work context 634 * that is different from the global work context in which 635 * we process messages from the host (we are currently executing 636 * on that global context. 637 */ 638 rc_work = kzalloc(sizeof(struct vmbus_rescind_work), GFP_KERNEL); 639 if (!rc_work) { 640 pr_err("Unable to allocate memory for rescind processing "); 641 return; 642 } 643 rc_work->channel = channel; 644 INIT_WORK(&rc_work->work, process_rescind_fn); 645 schedule_work(&rc_work->work); 646 } 647 648 /* 649 * vmbus_onoffers_delivered - 650 * This is invoked when all offers have been delivered. 651 * 652 * Nothing to do here. 653 */ 654 static void vmbus_onoffers_delivered( 655 struct vmbus_channel_message_header *hdr) 656 { 657 } 658 659 /* 660 * vmbus_onopen_result - Open result handler. 661 * 662 * This is invoked when we received a response to our channel open request. 663 * Find the matching request, copy the response and signal the requesting 664 * thread. 665 */ 666 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr) 667 { 668 struct vmbus_channel_open_result *result; 669 struct vmbus_channel_msginfo *msginfo; 670 struct vmbus_channel_message_header *requestheader; 671 struct vmbus_channel_open_channel *openmsg; 672 unsigned long flags; 673 674 result = (struct vmbus_channel_open_result *)hdr; 675 676 /* 677 * Find the open msg, copy the result and signal/unblock the wait event 678 */ 679 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 680 681 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, 682 msglistentry) { 683 requestheader = 684 (struct vmbus_channel_message_header *)msginfo->msg; 685 686 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) { 687 openmsg = 688 (struct vmbus_channel_open_channel *)msginfo->msg; 689 if (openmsg->child_relid == result->child_relid && 690 openmsg->openid == result->openid) { 691 memcpy(&msginfo->response.open_result, 692 result, 693 sizeof( 694 struct vmbus_channel_open_result)); 695 complete(&msginfo->waitevent); 696 break; 697 } 698 } 699 } 700 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 701 } 702 703 /* 704 * vmbus_ongpadl_created - GPADL created handler. 705 * 706 * This is invoked when we received a response to our gpadl create request. 707 * Find the matching request, copy the response and signal the requesting 708 * thread. 709 */ 710 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr) 711 { 712 struct vmbus_channel_gpadl_created *gpadlcreated; 713 struct vmbus_channel_msginfo *msginfo; 714 struct vmbus_channel_message_header *requestheader; 715 struct vmbus_channel_gpadl_header *gpadlheader; 716 unsigned long flags; 717 718 gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr; 719 720 /* 721 * Find the establish msg, copy the result and signal/unblock the wait 722 * event 723 */ 724 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 725 726 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, 727 msglistentry) { 728 requestheader = 729 (struct vmbus_channel_message_header *)msginfo->msg; 730 731 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) { 732 gpadlheader = 733 (struct vmbus_channel_gpadl_header *)requestheader; 734 735 if ((gpadlcreated->child_relid == 736 gpadlheader->child_relid) && 737 (gpadlcreated->gpadl == gpadlheader->gpadl)) { 738 memcpy(&msginfo->response.gpadl_created, 739 gpadlcreated, 740 sizeof( 741 struct vmbus_channel_gpadl_created)); 742 complete(&msginfo->waitevent); 743 break; 744 } 745 } 746 } 747 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 748 } 749 750 /* 751 * vmbus_ongpadl_torndown - GPADL torndown handler. 752 * 753 * This is invoked when we received a response to our gpadl teardown request. 754 * Find the matching request, copy the response and signal the requesting 755 * thread. 756 */ 757 static void vmbus_ongpadl_torndown( 758 struct vmbus_channel_message_header *hdr) 759 { 760 struct vmbus_channel_gpadl_torndown *gpadl_torndown; 761 struct vmbus_channel_msginfo *msginfo; 762 struct vmbus_channel_message_header *requestheader; 763 struct vmbus_channel_gpadl_teardown *gpadl_teardown; 764 unsigned long flags; 765 766 gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr; 767 768 /* 769 * Find the open msg, copy the result and signal/unblock the wait event 770 */ 771 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 772 773 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, 774 msglistentry) { 775 requestheader = 776 (struct vmbus_channel_message_header *)msginfo->msg; 777 778 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) { 779 gpadl_teardown = 780 (struct vmbus_channel_gpadl_teardown *)requestheader; 781 782 if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) { 783 memcpy(&msginfo->response.gpadl_torndown, 784 gpadl_torndown, 785 sizeof( 786 struct vmbus_channel_gpadl_torndown)); 787 complete(&msginfo->waitevent); 788 break; 789 } 790 } 791 } 792 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 793 } 794 795 /* 796 * vmbus_onversion_response - Version response handler 797 * 798 * This is invoked when we received a response to our initiate contact request. 799 * Find the matching request, copy the response and signal the requesting 800 * thread. 801 */ 802 static void vmbus_onversion_response( 803 struct vmbus_channel_message_header *hdr) 804 { 805 struct vmbus_channel_msginfo *msginfo; 806 struct vmbus_channel_message_header *requestheader; 807 struct vmbus_channel_version_response *version_response; 808 unsigned long flags; 809 810 version_response = (struct vmbus_channel_version_response *)hdr; 811 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 812 813 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, 814 msglistentry) { 815 requestheader = 816 (struct vmbus_channel_message_header *)msginfo->msg; 817 818 if (requestheader->msgtype == 819 CHANNELMSG_INITIATE_CONTACT) { 820 memcpy(&msginfo->response.version_response, 821 version_response, 822 sizeof(struct vmbus_channel_version_response)); 823 complete(&msginfo->waitevent); 824 } 825 } 826 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 827 } 828 829 /* Channel message dispatch table */ 830 static struct vmbus_channel_message_table_entry 831 channel_message_table[CHANNELMSG_COUNT] = { 832 {CHANNELMSG_INVALID, NULL}, 833 {CHANNELMSG_OFFERCHANNEL, vmbus_onoffer}, 834 {CHANNELMSG_RESCIND_CHANNELOFFER, vmbus_onoffer_rescind}, 835 {CHANNELMSG_REQUESTOFFERS, NULL}, 836 {CHANNELMSG_ALLOFFERS_DELIVERED, vmbus_onoffers_delivered}, 837 {CHANNELMSG_OPENCHANNEL, NULL}, 838 {CHANNELMSG_OPENCHANNEL_RESULT, vmbus_onopen_result}, 839 {CHANNELMSG_CLOSECHANNEL, NULL}, 840 {CHANNELMSG_GPADL_HEADER, NULL}, 841 {CHANNELMSG_GPADL_BODY, NULL}, 842 {CHANNELMSG_GPADL_CREATED, vmbus_ongpadl_created}, 843 {CHANNELMSG_GPADL_TEARDOWN, NULL}, 844 {CHANNELMSG_GPADL_TORNDOWN, vmbus_ongpadl_torndown}, 845 {CHANNELMSG_RELID_RELEASED, NULL}, 846 {CHANNELMSG_INITIATE_CONTACT, NULL}, 847 {CHANNELMSG_VERSION_RESPONSE, vmbus_onversion_response}, 848 {CHANNELMSG_UNLOAD, NULL}, 849 }; 850 851 /* 852 * vmbus_onmessage - Handler for channel protocol messages. 853 * 854 * This is invoked in the vmbus worker thread context. 855 */ 856 void vmbus_onmessage(void *context) 857 { 858 struct hv_message *msg = context; 859 struct vmbus_channel_message_header *hdr; 860 int size; 861 862 hdr = (struct vmbus_channel_message_header *)msg->u.payload; 863 size = msg->header.payload_size; 864 865 if (hdr->msgtype >= CHANNELMSG_COUNT) { 866 pr_err("Received invalid channel message type %d size %d\n", 867 hdr->msgtype, size); 868 print_hex_dump_bytes("", DUMP_PREFIX_NONE, 869 (unsigned char *)msg->u.payload, size); 870 return; 871 } 872 873 if (channel_message_table[hdr->msgtype].message_handler) 874 channel_message_table[hdr->msgtype].message_handler(hdr); 875 else 876 pr_err("Unhandled channel message type %d\n", hdr->msgtype); 877 } 878 879 /* 880 * vmbus_request_offers - Send a request to get all our pending offers. 881 */ 882 int vmbus_request_offers(void) 883 { 884 struct vmbus_channel_message_header *msg; 885 struct vmbus_channel_msginfo *msginfo; 886 int ret; 887 888 msginfo = kmalloc(sizeof(*msginfo) + 889 sizeof(struct vmbus_channel_message_header), 890 GFP_KERNEL); 891 if (!msginfo) 892 return -ENOMEM; 893 894 msg = (struct vmbus_channel_message_header *)msginfo->msg; 895 896 msg->msgtype = CHANNELMSG_REQUESTOFFERS; 897 898 899 ret = vmbus_post_msg(msg, 900 sizeof(struct vmbus_channel_message_header)); 901 if (ret != 0) { 902 pr_err("Unable to request offers - %d\n", ret); 903 904 goto cleanup; 905 } 906 907 cleanup: 908 kfree(msginfo); 909 910 return ret; 911 } 912 913 /* 914 * Retrieve the (sub) channel on which to send an outgoing request. 915 * When a primary channel has multiple sub-channels, we try to 916 * distribute the load equally amongst all available channels. 917 */ 918 struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary) 919 { 920 struct list_head *cur, *tmp; 921 int cur_cpu; 922 struct vmbus_channel *cur_channel; 923 struct vmbus_channel *outgoing_channel = primary; 924 int next_channel; 925 int i = 1; 926 927 if (list_empty(&primary->sc_list)) 928 return outgoing_channel; 929 930 next_channel = primary->next_oc++; 931 932 if (next_channel > (primary->num_sc)) { 933 primary->next_oc = 0; 934 return outgoing_channel; 935 } 936 937 cur_cpu = hv_context.vp_index[get_cpu()]; 938 put_cpu(); 939 list_for_each_safe(cur, tmp, &primary->sc_list) { 940 cur_channel = list_entry(cur, struct vmbus_channel, sc_list); 941 if (cur_channel->state != CHANNEL_OPENED_STATE) 942 continue; 943 944 if (cur_channel->target_vp == cur_cpu) 945 return cur_channel; 946 947 if (i == next_channel) 948 return cur_channel; 949 950 i++; 951 } 952 953 return outgoing_channel; 954 } 955 EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel); 956 957 static void invoke_sc_cb(struct vmbus_channel *primary_channel) 958 { 959 struct list_head *cur, *tmp; 960 struct vmbus_channel *cur_channel; 961 962 if (primary_channel->sc_creation_callback == NULL) 963 return; 964 965 list_for_each_safe(cur, tmp, &primary_channel->sc_list) { 966 cur_channel = list_entry(cur, struct vmbus_channel, sc_list); 967 968 primary_channel->sc_creation_callback(cur_channel); 969 } 970 } 971 972 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, 973 void (*sc_cr_cb)(struct vmbus_channel *new_sc)) 974 { 975 primary_channel->sc_creation_callback = sc_cr_cb; 976 } 977 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback); 978 979 bool vmbus_are_subchannels_present(struct vmbus_channel *primary) 980 { 981 bool ret; 982 983 ret = !list_empty(&primary->sc_list); 984 985 if (ret) { 986 /* 987 * Invoke the callback on sub-channel creation. 988 * This will present a uniform interface to the 989 * clients. 990 */ 991 invoke_sc_cb(primary); 992 } 993 994 return ret; 995 } 996 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present); 997