1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 15 * Place - Suite 330, Boston, MA 02111-1307 USA. 16 * 17 * Authors: 18 * Haiyang Zhang <haiyangz@microsoft.com> 19 * Hank Janssen <hjanssen@microsoft.com> 20 */ 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/kernel.h> 24 #include <linux/sched.h> 25 #include <linux/wait.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/list.h> 29 #include <linux/module.h> 30 #include <linux/completion.h> 31 #include <linux/hyperv.h> 32 33 #include "hyperv_vmbus.h" 34 35 static void init_vp_index(struct vmbus_channel *channel, 36 const uuid_le *type_guid); 37 38 /** 39 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message 40 * @icmsghdrp: Pointer to msg header structure 41 * @icmsg_negotiate: Pointer to negotiate message structure 42 * @buf: Raw buffer channel data 43 * 44 * @icmsghdrp is of type &struct icmsg_hdr. 45 * @negop is of type &struct icmsg_negotiate. 46 * Set up and fill in default negotiate response message. 47 * 48 * The fw_version specifies the framework version that 49 * we can support and srv_version specifies the service 50 * version we can support. 51 * 52 * Mainly used by Hyper-V drivers. 53 */ 54 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, 55 struct icmsg_negotiate *negop, u8 *buf, 56 int fw_version, int srv_version) 57 { 58 int icframe_major, icframe_minor; 59 int icmsg_major, icmsg_minor; 60 int fw_major, fw_minor; 61 int srv_major, srv_minor; 62 int i; 63 bool found_match = false; 64 65 icmsghdrp->icmsgsize = 0x10; 66 fw_major = (fw_version >> 16); 67 fw_minor = (fw_version & 0xFFFF); 68 69 srv_major = (srv_version >> 16); 70 srv_minor = (srv_version & 0xFFFF); 71 72 negop = (struct icmsg_negotiate *)&buf[ 73 sizeof(struct vmbuspipe_hdr) + 74 sizeof(struct icmsg_hdr)]; 75 76 icframe_major = negop->icframe_vercnt; 77 icframe_minor = 0; 78 79 icmsg_major = negop->icmsg_vercnt; 80 icmsg_minor = 0; 81 82 /* 83 * Select the framework version number we will 84 * support. 85 */ 86 87 for (i = 0; i < negop->icframe_vercnt; i++) { 88 if ((negop->icversion_data[i].major == fw_major) && 89 (negop->icversion_data[i].minor == fw_minor)) { 90 icframe_major = negop->icversion_data[i].major; 91 icframe_minor = negop->icversion_data[i].minor; 92 found_match = true; 93 } 94 } 95 96 if (!found_match) 97 goto fw_error; 98 99 found_match = false; 100 101 for (i = negop->icframe_vercnt; 102 (i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) { 103 if ((negop->icversion_data[i].major == srv_major) && 104 (negop->icversion_data[i].minor == srv_minor)) { 105 icmsg_major = negop->icversion_data[i].major; 106 icmsg_minor = negop->icversion_data[i].minor; 107 found_match = true; 108 } 109 } 110 111 /* 112 * Respond with the framework and service 113 * version numbers we can support. 114 */ 115 116 fw_error: 117 if (!found_match) { 118 negop->icframe_vercnt = 0; 119 negop->icmsg_vercnt = 0; 120 } else { 121 negop->icframe_vercnt = 1; 122 negop->icmsg_vercnt = 1; 123 } 124 125 negop->icversion_data[0].major = icframe_major; 126 negop->icversion_data[0].minor = icframe_minor; 127 negop->icversion_data[1].major = icmsg_major; 128 negop->icversion_data[1].minor = icmsg_minor; 129 return found_match; 130 } 131 132 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp); 133 134 /* 135 * alloc_channel - Allocate and initialize a vmbus channel object 136 */ 137 static struct vmbus_channel *alloc_channel(void) 138 { 139 static atomic_t chan_num = ATOMIC_INIT(0); 140 struct vmbus_channel *channel; 141 142 channel = kzalloc(sizeof(*channel), GFP_ATOMIC); 143 if (!channel) 144 return NULL; 145 146 channel->id = atomic_inc_return(&chan_num); 147 spin_lock_init(&channel->inbound_lock); 148 spin_lock_init(&channel->lock); 149 150 INIT_LIST_HEAD(&channel->sc_list); 151 INIT_LIST_HEAD(&channel->percpu_list); 152 153 return channel; 154 } 155 156 /* 157 * free_channel - Release the resources used by the vmbus channel object 158 */ 159 static void free_channel(struct vmbus_channel *channel) 160 { 161 kfree(channel); 162 } 163 164 static void percpu_channel_enq(void *arg) 165 { 166 struct vmbus_channel *channel = arg; 167 int cpu = smp_processor_id(); 168 169 list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]); 170 } 171 172 static void percpu_channel_deq(void *arg) 173 { 174 struct vmbus_channel *channel = arg; 175 176 list_del(&channel->percpu_list); 177 } 178 179 180 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) 181 { 182 struct vmbus_channel_relid_released msg; 183 unsigned long flags; 184 struct vmbus_channel *primary_channel; 185 186 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released)); 187 msg.child_relid = relid; 188 msg.header.msgtype = CHANNELMSG_RELID_RELEASED; 189 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); 190 191 if (channel == NULL) 192 return; 193 194 if (channel->target_cpu != get_cpu()) { 195 put_cpu(); 196 smp_call_function_single(channel->target_cpu, 197 percpu_channel_deq, channel, true); 198 } else { 199 percpu_channel_deq(channel); 200 put_cpu(); 201 } 202 203 if (channel->primary_channel == NULL) { 204 spin_lock_irqsave(&vmbus_connection.channel_lock, flags); 205 list_del(&channel->listentry); 206 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 207 208 primary_channel = channel; 209 } else { 210 primary_channel = channel->primary_channel; 211 spin_lock_irqsave(&primary_channel->lock, flags); 212 list_del(&channel->sc_list); 213 primary_channel->num_sc--; 214 spin_unlock_irqrestore(&primary_channel->lock, flags); 215 } 216 217 /* 218 * We need to free the bit for init_vp_index() to work in the case 219 * of sub-channel, when we reload drivers like hv_netvsc. 220 */ 221 cpumask_clear_cpu(channel->target_cpu, 222 &primary_channel->alloced_cpus_in_node); 223 224 free_channel(channel); 225 } 226 227 void vmbus_free_channels(void) 228 { 229 struct vmbus_channel *channel, *tmp; 230 231 list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list, 232 listentry) { 233 /* if we don't set rescind to true, vmbus_close_internal() 234 * won't invoke hv_process_channel_removal(). 235 */ 236 channel->rescind = true; 237 238 vmbus_device_unregister(channel->device_obj); 239 } 240 } 241 242 /* 243 * vmbus_process_offer - Process the offer by creating a channel/device 244 * associated with this offer 245 */ 246 static void vmbus_process_offer(struct vmbus_channel *newchannel) 247 { 248 struct vmbus_channel *channel; 249 bool fnew = true; 250 unsigned long flags; 251 252 /* Make sure this is a new offer */ 253 spin_lock_irqsave(&vmbus_connection.channel_lock, flags); 254 255 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { 256 if (!uuid_le_cmp(channel->offermsg.offer.if_type, 257 newchannel->offermsg.offer.if_type) && 258 !uuid_le_cmp(channel->offermsg.offer.if_instance, 259 newchannel->offermsg.offer.if_instance)) { 260 fnew = false; 261 break; 262 } 263 } 264 265 if (fnew) 266 list_add_tail(&newchannel->listentry, 267 &vmbus_connection.chn_list); 268 269 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 270 271 if (!fnew) { 272 /* 273 * Check to see if this is a sub-channel. 274 */ 275 if (newchannel->offermsg.offer.sub_channel_index != 0) { 276 /* 277 * Process the sub-channel. 278 */ 279 newchannel->primary_channel = channel; 280 spin_lock_irqsave(&channel->lock, flags); 281 list_add_tail(&newchannel->sc_list, &channel->sc_list); 282 channel->num_sc++; 283 spin_unlock_irqrestore(&channel->lock, flags); 284 } else 285 goto err_free_chan; 286 } 287 288 init_vp_index(newchannel, &newchannel->offermsg.offer.if_type); 289 290 if (newchannel->target_cpu != get_cpu()) { 291 put_cpu(); 292 smp_call_function_single(newchannel->target_cpu, 293 percpu_channel_enq, 294 newchannel, true); 295 } else { 296 percpu_channel_enq(newchannel); 297 put_cpu(); 298 } 299 300 /* 301 * This state is used to indicate a successful open 302 * so that when we do close the channel normally, we 303 * can cleanup properly 304 */ 305 newchannel->state = CHANNEL_OPEN_STATE; 306 307 if (!fnew) { 308 if (channel->sc_creation_callback != NULL) 309 channel->sc_creation_callback(newchannel); 310 return; 311 } 312 313 /* 314 * Start the process of binding this offer to the driver 315 * We need to set the DeviceObject field before calling 316 * vmbus_child_dev_add() 317 */ 318 newchannel->device_obj = vmbus_device_create( 319 &newchannel->offermsg.offer.if_type, 320 &newchannel->offermsg.offer.if_instance, 321 newchannel); 322 if (!newchannel->device_obj) 323 goto err_deq_chan; 324 325 /* 326 * Add the new device to the bus. This will kick off device-driver 327 * binding which eventually invokes the device driver's AddDevice() 328 * method. 329 */ 330 if (vmbus_device_register(newchannel->device_obj) != 0) { 331 pr_err("unable to add child device object (relid %d)\n", 332 newchannel->offermsg.child_relid); 333 kfree(newchannel->device_obj); 334 goto err_deq_chan; 335 } 336 return; 337 338 err_deq_chan: 339 spin_lock_irqsave(&vmbus_connection.channel_lock, flags); 340 list_del(&newchannel->listentry); 341 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 342 343 if (newchannel->target_cpu != get_cpu()) { 344 put_cpu(); 345 smp_call_function_single(newchannel->target_cpu, 346 percpu_channel_deq, newchannel, true); 347 } else { 348 percpu_channel_deq(newchannel); 349 put_cpu(); 350 } 351 352 err_free_chan: 353 free_channel(newchannel); 354 } 355 356 enum { 357 IDE = 0, 358 SCSI, 359 NIC, 360 ND_NIC, 361 MAX_PERF_CHN, 362 }; 363 364 /* 365 * This is an array of device_ids (device types) that are performance critical. 366 * We attempt to distribute the interrupt load for these devices across 367 * all available CPUs. 368 */ 369 static const struct hv_vmbus_device_id hp_devs[] = { 370 /* IDE */ 371 { HV_IDE_GUID, }, 372 /* Storage - SCSI */ 373 { HV_SCSI_GUID, }, 374 /* Network */ 375 { HV_NIC_GUID, }, 376 /* NetworkDirect Guest RDMA */ 377 { HV_ND_GUID, }, 378 }; 379 380 381 /* 382 * We use this state to statically distribute the channel interrupt load. 383 */ 384 static int next_numa_node_id; 385 386 /* 387 * Starting with Win8, we can statically distribute the incoming 388 * channel interrupt load by binding a channel to VCPU. 389 * We do this in a hierarchical fashion: 390 * First distribute the primary channels across available NUMA nodes 391 * and then distribute the subchannels amongst the CPUs in the NUMA 392 * node assigned to the primary channel. 393 * 394 * For pre-win8 hosts or non-performance critical channels we assign the 395 * first CPU in the first NUMA node. 396 */ 397 static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid) 398 { 399 u32 cur_cpu; 400 int i; 401 bool perf_chn = false; 402 struct vmbus_channel *primary = channel->primary_channel; 403 int next_node; 404 struct cpumask available_mask; 405 struct cpumask *alloced_mask; 406 407 for (i = IDE; i < MAX_PERF_CHN; i++) { 408 if (!memcmp(type_guid->b, hp_devs[i].guid, 409 sizeof(uuid_le))) { 410 perf_chn = true; 411 break; 412 } 413 } 414 if ((vmbus_proto_version == VERSION_WS2008) || 415 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) { 416 /* 417 * Prior to win8, all channel interrupts are 418 * delivered on cpu 0. 419 * Also if the channel is not a performance critical 420 * channel, bind it to cpu 0. 421 */ 422 channel->numa_node = 0; 423 channel->target_cpu = 0; 424 channel->target_vp = hv_context.vp_index[0]; 425 return; 426 } 427 428 /* 429 * We distribute primary channels evenly across all the available 430 * NUMA nodes and within the assigned NUMA node we will assign the 431 * first available CPU to the primary channel. 432 * The sub-channels will be assigned to the CPUs available in the 433 * NUMA node evenly. 434 */ 435 if (!primary) { 436 while (true) { 437 next_node = next_numa_node_id++; 438 if (next_node == nr_node_ids) 439 next_node = next_numa_node_id = 0; 440 if (cpumask_empty(cpumask_of_node(next_node))) 441 continue; 442 break; 443 } 444 channel->numa_node = next_node; 445 primary = channel; 446 } 447 alloced_mask = &hv_context.hv_numa_map[primary->numa_node]; 448 449 if (cpumask_weight(alloced_mask) == 450 cpumask_weight(cpumask_of_node(primary->numa_node))) { 451 /* 452 * We have cycled through all the CPUs in the node; 453 * reset the alloced map. 454 */ 455 cpumask_clear(alloced_mask); 456 } 457 458 cpumask_xor(&available_mask, alloced_mask, 459 cpumask_of_node(primary->numa_node)); 460 461 cur_cpu = -1; 462 while (true) { 463 cur_cpu = cpumask_next(cur_cpu, &available_mask); 464 if (cur_cpu >= nr_cpu_ids) { 465 cur_cpu = -1; 466 cpumask_copy(&available_mask, 467 cpumask_of_node(primary->numa_node)); 468 continue; 469 } 470 471 /* 472 * NOTE: in the case of sub-channel, we clear the sub-channel 473 * related bit(s) in primary->alloced_cpus_in_node in 474 * hv_process_channel_removal(), so when we reload drivers 475 * like hv_netvsc in SMP guest, here we're able to re-allocate 476 * bit from primary->alloced_cpus_in_node. 477 */ 478 if (!cpumask_test_cpu(cur_cpu, 479 &primary->alloced_cpus_in_node)) { 480 cpumask_set_cpu(cur_cpu, 481 &primary->alloced_cpus_in_node); 482 cpumask_set_cpu(cur_cpu, alloced_mask); 483 break; 484 } 485 } 486 487 channel->target_cpu = cur_cpu; 488 channel->target_vp = hv_context.vp_index[cur_cpu]; 489 } 490 491 /* 492 * vmbus_unload_response - Handler for the unload response. 493 */ 494 static void vmbus_unload_response(struct vmbus_channel_message_header *hdr) 495 { 496 /* 497 * This is a global event; just wakeup the waiting thread. 498 * Once we successfully unload, we can cleanup the monitor state. 499 */ 500 complete(&vmbus_connection.unload_event); 501 } 502 503 void vmbus_initiate_unload(void) 504 { 505 struct vmbus_channel_message_header hdr; 506 507 /* Pre-Win2012R2 hosts don't support reconnect */ 508 if (vmbus_proto_version < VERSION_WIN8_1) 509 return; 510 511 init_completion(&vmbus_connection.unload_event); 512 memset(&hdr, 0, sizeof(struct vmbus_channel_message_header)); 513 hdr.msgtype = CHANNELMSG_UNLOAD; 514 vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header)); 515 516 wait_for_completion(&vmbus_connection.unload_event); 517 } 518 519 /* 520 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition. 521 * 522 */ 523 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) 524 { 525 struct vmbus_channel_offer_channel *offer; 526 struct vmbus_channel *newchannel; 527 528 offer = (struct vmbus_channel_offer_channel *)hdr; 529 530 /* Allocate the channel object and save this offer. */ 531 newchannel = alloc_channel(); 532 if (!newchannel) { 533 pr_err("Unable to allocate channel object\n"); 534 return; 535 } 536 537 /* 538 * By default we setup state to enable batched 539 * reading. A specific service can choose to 540 * disable this prior to opening the channel. 541 */ 542 newchannel->batched_reading = true; 543 544 /* 545 * Setup state for signalling the host. 546 */ 547 newchannel->sig_event = (struct hv_input_signal_event *) 548 (ALIGN((unsigned long) 549 &newchannel->sig_buf, 550 HV_HYPERCALL_PARAM_ALIGN)); 551 552 newchannel->sig_event->connectionid.asu32 = 0; 553 newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID; 554 newchannel->sig_event->flag_number = 0; 555 newchannel->sig_event->rsvdz = 0; 556 557 if (vmbus_proto_version != VERSION_WS2008) { 558 newchannel->is_dedicated_interrupt = 559 (offer->is_dedicated_interrupt != 0); 560 newchannel->sig_event->connectionid.u.id = 561 offer->connection_id; 562 } 563 564 memcpy(&newchannel->offermsg, offer, 565 sizeof(struct vmbus_channel_offer_channel)); 566 newchannel->monitor_grp = (u8)offer->monitorid / 32; 567 newchannel->monitor_bit = (u8)offer->monitorid % 32; 568 569 vmbus_process_offer(newchannel); 570 } 571 572 /* 573 * vmbus_onoffer_rescind - Rescind offer handler. 574 * 575 * We queue a work item to process this offer synchronously 576 */ 577 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) 578 { 579 struct vmbus_channel_rescind_offer *rescind; 580 struct vmbus_channel *channel; 581 unsigned long flags; 582 struct device *dev; 583 584 rescind = (struct vmbus_channel_rescind_offer *)hdr; 585 channel = relid2channel(rescind->child_relid); 586 587 if (channel == NULL) { 588 hv_process_channel_removal(NULL, rescind->child_relid); 589 return; 590 } 591 592 spin_lock_irqsave(&channel->lock, flags); 593 channel->rescind = true; 594 spin_unlock_irqrestore(&channel->lock, flags); 595 596 if (channel->device_obj) { 597 /* 598 * We will have to unregister this device from the 599 * driver core. 600 */ 601 dev = get_device(&channel->device_obj->device); 602 if (dev) { 603 vmbus_device_unregister(channel->device_obj); 604 put_device(dev); 605 } 606 } else { 607 hv_process_channel_removal(channel, 608 channel->offermsg.child_relid); 609 } 610 } 611 612 /* 613 * vmbus_onoffers_delivered - 614 * This is invoked when all offers have been delivered. 615 * 616 * Nothing to do here. 617 */ 618 static void vmbus_onoffers_delivered( 619 struct vmbus_channel_message_header *hdr) 620 { 621 } 622 623 /* 624 * vmbus_onopen_result - Open result handler. 625 * 626 * This is invoked when we received a response to our channel open request. 627 * Find the matching request, copy the response and signal the requesting 628 * thread. 629 */ 630 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr) 631 { 632 struct vmbus_channel_open_result *result; 633 struct vmbus_channel_msginfo *msginfo; 634 struct vmbus_channel_message_header *requestheader; 635 struct vmbus_channel_open_channel *openmsg; 636 unsigned long flags; 637 638 result = (struct vmbus_channel_open_result *)hdr; 639 640 /* 641 * Find the open msg, copy the result and signal/unblock the wait event 642 */ 643 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 644 645 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, 646 msglistentry) { 647 requestheader = 648 (struct vmbus_channel_message_header *)msginfo->msg; 649 650 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) { 651 openmsg = 652 (struct vmbus_channel_open_channel *)msginfo->msg; 653 if (openmsg->child_relid == result->child_relid && 654 openmsg->openid == result->openid) { 655 memcpy(&msginfo->response.open_result, 656 result, 657 sizeof( 658 struct vmbus_channel_open_result)); 659 complete(&msginfo->waitevent); 660 break; 661 } 662 } 663 } 664 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 665 } 666 667 /* 668 * vmbus_ongpadl_created - GPADL created handler. 669 * 670 * This is invoked when we received a response to our gpadl create request. 671 * Find the matching request, copy the response and signal the requesting 672 * thread. 673 */ 674 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr) 675 { 676 struct vmbus_channel_gpadl_created *gpadlcreated; 677 struct vmbus_channel_msginfo *msginfo; 678 struct vmbus_channel_message_header *requestheader; 679 struct vmbus_channel_gpadl_header *gpadlheader; 680 unsigned long flags; 681 682 gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr; 683 684 /* 685 * Find the establish msg, copy the result and signal/unblock the wait 686 * event 687 */ 688 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 689 690 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, 691 msglistentry) { 692 requestheader = 693 (struct vmbus_channel_message_header *)msginfo->msg; 694 695 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) { 696 gpadlheader = 697 (struct vmbus_channel_gpadl_header *)requestheader; 698 699 if ((gpadlcreated->child_relid == 700 gpadlheader->child_relid) && 701 (gpadlcreated->gpadl == gpadlheader->gpadl)) { 702 memcpy(&msginfo->response.gpadl_created, 703 gpadlcreated, 704 sizeof( 705 struct vmbus_channel_gpadl_created)); 706 complete(&msginfo->waitevent); 707 break; 708 } 709 } 710 } 711 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 712 } 713 714 /* 715 * vmbus_ongpadl_torndown - GPADL torndown handler. 716 * 717 * This is invoked when we received a response to our gpadl teardown request. 718 * Find the matching request, copy the response and signal the requesting 719 * thread. 720 */ 721 static void vmbus_ongpadl_torndown( 722 struct vmbus_channel_message_header *hdr) 723 { 724 struct vmbus_channel_gpadl_torndown *gpadl_torndown; 725 struct vmbus_channel_msginfo *msginfo; 726 struct vmbus_channel_message_header *requestheader; 727 struct vmbus_channel_gpadl_teardown *gpadl_teardown; 728 unsigned long flags; 729 730 gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr; 731 732 /* 733 * Find the open msg, copy the result and signal/unblock the wait event 734 */ 735 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 736 737 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, 738 msglistentry) { 739 requestheader = 740 (struct vmbus_channel_message_header *)msginfo->msg; 741 742 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) { 743 gpadl_teardown = 744 (struct vmbus_channel_gpadl_teardown *)requestheader; 745 746 if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) { 747 memcpy(&msginfo->response.gpadl_torndown, 748 gpadl_torndown, 749 sizeof( 750 struct vmbus_channel_gpadl_torndown)); 751 complete(&msginfo->waitevent); 752 break; 753 } 754 } 755 } 756 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 757 } 758 759 /* 760 * vmbus_onversion_response - Version response handler 761 * 762 * This is invoked when we received a response to our initiate contact request. 763 * Find the matching request, copy the response and signal the requesting 764 * thread. 765 */ 766 static void vmbus_onversion_response( 767 struct vmbus_channel_message_header *hdr) 768 { 769 struct vmbus_channel_msginfo *msginfo; 770 struct vmbus_channel_message_header *requestheader; 771 struct vmbus_channel_version_response *version_response; 772 unsigned long flags; 773 774 version_response = (struct vmbus_channel_version_response *)hdr; 775 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 776 777 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, 778 msglistentry) { 779 requestheader = 780 (struct vmbus_channel_message_header *)msginfo->msg; 781 782 if (requestheader->msgtype == 783 CHANNELMSG_INITIATE_CONTACT) { 784 memcpy(&msginfo->response.version_response, 785 version_response, 786 sizeof(struct vmbus_channel_version_response)); 787 complete(&msginfo->waitevent); 788 } 789 } 790 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 791 } 792 793 /* Channel message dispatch table */ 794 struct vmbus_channel_message_table_entry 795 channel_message_table[CHANNELMSG_COUNT] = { 796 {CHANNELMSG_INVALID, 0, NULL}, 797 {CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer}, 798 {CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind}, 799 {CHANNELMSG_REQUESTOFFERS, 0, NULL}, 800 {CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered}, 801 {CHANNELMSG_OPENCHANNEL, 0, NULL}, 802 {CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result}, 803 {CHANNELMSG_CLOSECHANNEL, 0, NULL}, 804 {CHANNELMSG_GPADL_HEADER, 0, NULL}, 805 {CHANNELMSG_GPADL_BODY, 0, NULL}, 806 {CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created}, 807 {CHANNELMSG_GPADL_TEARDOWN, 0, NULL}, 808 {CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown}, 809 {CHANNELMSG_RELID_RELEASED, 0, NULL}, 810 {CHANNELMSG_INITIATE_CONTACT, 0, NULL}, 811 {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response}, 812 {CHANNELMSG_UNLOAD, 0, NULL}, 813 {CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response}, 814 }; 815 816 /* 817 * vmbus_onmessage - Handler for channel protocol messages. 818 * 819 * This is invoked in the vmbus worker thread context. 820 */ 821 void vmbus_onmessage(void *context) 822 { 823 struct hv_message *msg = context; 824 struct vmbus_channel_message_header *hdr; 825 int size; 826 827 hdr = (struct vmbus_channel_message_header *)msg->u.payload; 828 size = msg->header.payload_size; 829 830 if (hdr->msgtype >= CHANNELMSG_COUNT) { 831 pr_err("Received invalid channel message type %d size %d\n", 832 hdr->msgtype, size); 833 print_hex_dump_bytes("", DUMP_PREFIX_NONE, 834 (unsigned char *)msg->u.payload, size); 835 return; 836 } 837 838 if (channel_message_table[hdr->msgtype].message_handler) 839 channel_message_table[hdr->msgtype].message_handler(hdr); 840 else 841 pr_err("Unhandled channel message type %d\n", hdr->msgtype); 842 } 843 844 /* 845 * vmbus_request_offers - Send a request to get all our pending offers. 846 */ 847 int vmbus_request_offers(void) 848 { 849 struct vmbus_channel_message_header *msg; 850 struct vmbus_channel_msginfo *msginfo; 851 int ret; 852 853 msginfo = kmalloc(sizeof(*msginfo) + 854 sizeof(struct vmbus_channel_message_header), 855 GFP_KERNEL); 856 if (!msginfo) 857 return -ENOMEM; 858 859 msg = (struct vmbus_channel_message_header *)msginfo->msg; 860 861 msg->msgtype = CHANNELMSG_REQUESTOFFERS; 862 863 864 ret = vmbus_post_msg(msg, 865 sizeof(struct vmbus_channel_message_header)); 866 if (ret != 0) { 867 pr_err("Unable to request offers - %d\n", ret); 868 869 goto cleanup; 870 } 871 872 cleanup: 873 kfree(msginfo); 874 875 return ret; 876 } 877 878 /* 879 * Retrieve the (sub) channel on which to send an outgoing request. 880 * When a primary channel has multiple sub-channels, we try to 881 * distribute the load equally amongst all available channels. 882 */ 883 struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary) 884 { 885 struct list_head *cur, *tmp; 886 int cur_cpu; 887 struct vmbus_channel *cur_channel; 888 struct vmbus_channel *outgoing_channel = primary; 889 int next_channel; 890 int i = 1; 891 892 if (list_empty(&primary->sc_list)) 893 return outgoing_channel; 894 895 next_channel = primary->next_oc++; 896 897 if (next_channel > (primary->num_sc)) { 898 primary->next_oc = 0; 899 return outgoing_channel; 900 } 901 902 cur_cpu = hv_context.vp_index[get_cpu()]; 903 put_cpu(); 904 list_for_each_safe(cur, tmp, &primary->sc_list) { 905 cur_channel = list_entry(cur, struct vmbus_channel, sc_list); 906 if (cur_channel->state != CHANNEL_OPENED_STATE) 907 continue; 908 909 if (cur_channel->target_vp == cur_cpu) 910 return cur_channel; 911 912 if (i == next_channel) 913 return cur_channel; 914 915 i++; 916 } 917 918 return outgoing_channel; 919 } 920 EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel); 921 922 static void invoke_sc_cb(struct vmbus_channel *primary_channel) 923 { 924 struct list_head *cur, *tmp; 925 struct vmbus_channel *cur_channel; 926 927 if (primary_channel->sc_creation_callback == NULL) 928 return; 929 930 list_for_each_safe(cur, tmp, &primary_channel->sc_list) { 931 cur_channel = list_entry(cur, struct vmbus_channel, sc_list); 932 933 primary_channel->sc_creation_callback(cur_channel); 934 } 935 } 936 937 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, 938 void (*sc_cr_cb)(struct vmbus_channel *new_sc)) 939 { 940 primary_channel->sc_creation_callback = sc_cr_cb; 941 } 942 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback); 943 944 bool vmbus_are_subchannels_present(struct vmbus_channel *primary) 945 { 946 bool ret; 947 948 ret = !list_empty(&primary->sc_list); 949 950 if (ret) { 951 /* 952 * Invoke the callback on sub-channel creation. 953 * This will present a uniform interface to the 954 * clients. 955 */ 956 invoke_sc_cb(primary); 957 } 958 959 return ret; 960 } 961 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present); 962