1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 15 * Place - Suite 330, Boston, MA 02111-1307 USA. 16 * 17 * Authors: 18 * Haiyang Zhang <haiyangz@microsoft.com> 19 * Hank Janssen <hjanssen@microsoft.com> 20 * K. Y. Srinivasan <kys@microsoft.com> 21 * 22 */ 23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24 25 #include <linux/init.h> 26 #include <linux/module.h> 27 #include <linux/device.h> 28 #include <linux/interrupt.h> 29 #include <linux/sysctl.h> 30 #include <linux/slab.h> 31 #include <linux/acpi.h> 32 #include <linux/completion.h> 33 #include <linux/hyperv.h> 34 #include <linux/kernel_stat.h> 35 #include <linux/clockchips.h> 36 #include <linux/cpu.h> 37 #include <asm/hyperv.h> 38 #include <asm/hypervisor.h> 39 #include <asm/mshyperv.h> 40 #include <linux/notifier.h> 41 #include <linux/ptrace.h> 42 #include <linux/screen_info.h> 43 #include <linux/kdebug.h> 44 #include "hyperv_vmbus.h" 45 46 static struct acpi_device *hv_acpi_dev; 47 48 static struct completion probe_event; 49 50 51 static void hyperv_report_panic(struct pt_regs *regs) 52 { 53 static bool panic_reported; 54 55 /* 56 * We prefer to report panic on 'die' chain as we have proper 57 * registers to report, but if we miss it (e.g. on BUG()) we need 58 * to report it on 'panic'. 59 */ 60 if (panic_reported) 61 return; 62 panic_reported = true; 63 64 wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip); 65 wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax); 66 wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx); 67 wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx); 68 wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx); 69 70 /* 71 * Let Hyper-V know there is crash data available 72 */ 73 wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY); 74 } 75 76 static int hyperv_panic_event(struct notifier_block *nb, unsigned long val, 77 void *args) 78 { 79 struct pt_regs *regs; 80 81 regs = current_pt_regs(); 82 83 hyperv_report_panic(regs); 84 return NOTIFY_DONE; 85 } 86 87 static int hyperv_die_event(struct notifier_block *nb, unsigned long val, 88 void *args) 89 { 90 struct die_args *die = (struct die_args *)args; 91 struct pt_regs *regs = die->regs; 92 93 hyperv_report_panic(regs); 94 return NOTIFY_DONE; 95 } 96 97 static struct notifier_block hyperv_die_block = { 98 .notifier_call = hyperv_die_event, 99 }; 100 static struct notifier_block hyperv_panic_block = { 101 .notifier_call = hyperv_panic_event, 102 }; 103 104 struct resource *hyperv_mmio; 105 106 static int vmbus_exists(void) 107 { 108 if (hv_acpi_dev == NULL) 109 return -ENODEV; 110 111 return 0; 112 } 113 114 #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2) 115 static void print_alias_name(struct hv_device *hv_dev, char *alias_name) 116 { 117 int i; 118 for (i = 0; i < VMBUS_ALIAS_LEN; i += 2) 119 sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]); 120 } 121 122 static u8 channel_monitor_group(struct vmbus_channel *channel) 123 { 124 return (u8)channel->offermsg.monitorid / 32; 125 } 126 127 static u8 channel_monitor_offset(struct vmbus_channel *channel) 128 { 129 return (u8)channel->offermsg.monitorid % 32; 130 } 131 132 static u32 channel_pending(struct vmbus_channel *channel, 133 struct hv_monitor_page *monitor_page) 134 { 135 u8 monitor_group = channel_monitor_group(channel); 136 return monitor_page->trigger_group[monitor_group].pending; 137 } 138 139 static u32 channel_latency(struct vmbus_channel *channel, 140 struct hv_monitor_page *monitor_page) 141 { 142 u8 monitor_group = channel_monitor_group(channel); 143 u8 monitor_offset = channel_monitor_offset(channel); 144 return monitor_page->latency[monitor_group][monitor_offset]; 145 } 146 147 static u32 channel_conn_id(struct vmbus_channel *channel, 148 struct hv_monitor_page *monitor_page) 149 { 150 u8 monitor_group = channel_monitor_group(channel); 151 u8 monitor_offset = channel_monitor_offset(channel); 152 return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id; 153 } 154 155 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr, 156 char *buf) 157 { 158 struct hv_device *hv_dev = device_to_hv_device(dev); 159 160 if (!hv_dev->channel) 161 return -ENODEV; 162 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid); 163 } 164 static DEVICE_ATTR_RO(id); 165 166 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr, 167 char *buf) 168 { 169 struct hv_device *hv_dev = device_to_hv_device(dev); 170 171 if (!hv_dev->channel) 172 return -ENODEV; 173 return sprintf(buf, "%d\n", hv_dev->channel->state); 174 } 175 static DEVICE_ATTR_RO(state); 176 177 static ssize_t monitor_id_show(struct device *dev, 178 struct device_attribute *dev_attr, char *buf) 179 { 180 struct hv_device *hv_dev = device_to_hv_device(dev); 181 182 if (!hv_dev->channel) 183 return -ENODEV; 184 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid); 185 } 186 static DEVICE_ATTR_RO(monitor_id); 187 188 static ssize_t class_id_show(struct device *dev, 189 struct device_attribute *dev_attr, char *buf) 190 { 191 struct hv_device *hv_dev = device_to_hv_device(dev); 192 193 if (!hv_dev->channel) 194 return -ENODEV; 195 return sprintf(buf, "{%pUl}\n", 196 hv_dev->channel->offermsg.offer.if_type.b); 197 } 198 static DEVICE_ATTR_RO(class_id); 199 200 static ssize_t device_id_show(struct device *dev, 201 struct device_attribute *dev_attr, char *buf) 202 { 203 struct hv_device *hv_dev = device_to_hv_device(dev); 204 205 if (!hv_dev->channel) 206 return -ENODEV; 207 return sprintf(buf, "{%pUl}\n", 208 hv_dev->channel->offermsg.offer.if_instance.b); 209 } 210 static DEVICE_ATTR_RO(device_id); 211 212 static ssize_t modalias_show(struct device *dev, 213 struct device_attribute *dev_attr, char *buf) 214 { 215 struct hv_device *hv_dev = device_to_hv_device(dev); 216 char alias_name[VMBUS_ALIAS_LEN + 1]; 217 218 print_alias_name(hv_dev, alias_name); 219 return sprintf(buf, "vmbus:%s\n", alias_name); 220 } 221 static DEVICE_ATTR_RO(modalias); 222 223 static ssize_t server_monitor_pending_show(struct device *dev, 224 struct device_attribute *dev_attr, 225 char *buf) 226 { 227 struct hv_device *hv_dev = device_to_hv_device(dev); 228 229 if (!hv_dev->channel) 230 return -ENODEV; 231 return sprintf(buf, "%d\n", 232 channel_pending(hv_dev->channel, 233 vmbus_connection.monitor_pages[1])); 234 } 235 static DEVICE_ATTR_RO(server_monitor_pending); 236 237 static ssize_t client_monitor_pending_show(struct device *dev, 238 struct device_attribute *dev_attr, 239 char *buf) 240 { 241 struct hv_device *hv_dev = device_to_hv_device(dev); 242 243 if (!hv_dev->channel) 244 return -ENODEV; 245 return sprintf(buf, "%d\n", 246 channel_pending(hv_dev->channel, 247 vmbus_connection.monitor_pages[1])); 248 } 249 static DEVICE_ATTR_RO(client_monitor_pending); 250 251 static ssize_t server_monitor_latency_show(struct device *dev, 252 struct device_attribute *dev_attr, 253 char *buf) 254 { 255 struct hv_device *hv_dev = device_to_hv_device(dev); 256 257 if (!hv_dev->channel) 258 return -ENODEV; 259 return sprintf(buf, "%d\n", 260 channel_latency(hv_dev->channel, 261 vmbus_connection.monitor_pages[0])); 262 } 263 static DEVICE_ATTR_RO(server_monitor_latency); 264 265 static ssize_t client_monitor_latency_show(struct device *dev, 266 struct device_attribute *dev_attr, 267 char *buf) 268 { 269 struct hv_device *hv_dev = device_to_hv_device(dev); 270 271 if (!hv_dev->channel) 272 return -ENODEV; 273 return sprintf(buf, "%d\n", 274 channel_latency(hv_dev->channel, 275 vmbus_connection.monitor_pages[1])); 276 } 277 static DEVICE_ATTR_RO(client_monitor_latency); 278 279 static ssize_t server_monitor_conn_id_show(struct device *dev, 280 struct device_attribute *dev_attr, 281 char *buf) 282 { 283 struct hv_device *hv_dev = device_to_hv_device(dev); 284 285 if (!hv_dev->channel) 286 return -ENODEV; 287 return sprintf(buf, "%d\n", 288 channel_conn_id(hv_dev->channel, 289 vmbus_connection.monitor_pages[0])); 290 } 291 static DEVICE_ATTR_RO(server_monitor_conn_id); 292 293 static ssize_t client_monitor_conn_id_show(struct device *dev, 294 struct device_attribute *dev_attr, 295 char *buf) 296 { 297 struct hv_device *hv_dev = device_to_hv_device(dev); 298 299 if (!hv_dev->channel) 300 return -ENODEV; 301 return sprintf(buf, "%d\n", 302 channel_conn_id(hv_dev->channel, 303 vmbus_connection.monitor_pages[1])); 304 } 305 static DEVICE_ATTR_RO(client_monitor_conn_id); 306 307 static ssize_t out_intr_mask_show(struct device *dev, 308 struct device_attribute *dev_attr, char *buf) 309 { 310 struct hv_device *hv_dev = device_to_hv_device(dev); 311 struct hv_ring_buffer_debug_info outbound; 312 313 if (!hv_dev->channel) 314 return -ENODEV; 315 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 316 return sprintf(buf, "%d\n", outbound.current_interrupt_mask); 317 } 318 static DEVICE_ATTR_RO(out_intr_mask); 319 320 static ssize_t out_read_index_show(struct device *dev, 321 struct device_attribute *dev_attr, char *buf) 322 { 323 struct hv_device *hv_dev = device_to_hv_device(dev); 324 struct hv_ring_buffer_debug_info outbound; 325 326 if (!hv_dev->channel) 327 return -ENODEV; 328 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 329 return sprintf(buf, "%d\n", outbound.current_read_index); 330 } 331 static DEVICE_ATTR_RO(out_read_index); 332 333 static ssize_t out_write_index_show(struct device *dev, 334 struct device_attribute *dev_attr, 335 char *buf) 336 { 337 struct hv_device *hv_dev = device_to_hv_device(dev); 338 struct hv_ring_buffer_debug_info outbound; 339 340 if (!hv_dev->channel) 341 return -ENODEV; 342 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 343 return sprintf(buf, "%d\n", outbound.current_write_index); 344 } 345 static DEVICE_ATTR_RO(out_write_index); 346 347 static ssize_t out_read_bytes_avail_show(struct device *dev, 348 struct device_attribute *dev_attr, 349 char *buf) 350 { 351 struct hv_device *hv_dev = device_to_hv_device(dev); 352 struct hv_ring_buffer_debug_info outbound; 353 354 if (!hv_dev->channel) 355 return -ENODEV; 356 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 357 return sprintf(buf, "%d\n", outbound.bytes_avail_toread); 358 } 359 static DEVICE_ATTR_RO(out_read_bytes_avail); 360 361 static ssize_t out_write_bytes_avail_show(struct device *dev, 362 struct device_attribute *dev_attr, 363 char *buf) 364 { 365 struct hv_device *hv_dev = device_to_hv_device(dev); 366 struct hv_ring_buffer_debug_info outbound; 367 368 if (!hv_dev->channel) 369 return -ENODEV; 370 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 371 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite); 372 } 373 static DEVICE_ATTR_RO(out_write_bytes_avail); 374 375 static ssize_t in_intr_mask_show(struct device *dev, 376 struct device_attribute *dev_attr, char *buf) 377 { 378 struct hv_device *hv_dev = device_to_hv_device(dev); 379 struct hv_ring_buffer_debug_info inbound; 380 381 if (!hv_dev->channel) 382 return -ENODEV; 383 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 384 return sprintf(buf, "%d\n", inbound.current_interrupt_mask); 385 } 386 static DEVICE_ATTR_RO(in_intr_mask); 387 388 static ssize_t in_read_index_show(struct device *dev, 389 struct device_attribute *dev_attr, char *buf) 390 { 391 struct hv_device *hv_dev = device_to_hv_device(dev); 392 struct hv_ring_buffer_debug_info inbound; 393 394 if (!hv_dev->channel) 395 return -ENODEV; 396 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 397 return sprintf(buf, "%d\n", inbound.current_read_index); 398 } 399 static DEVICE_ATTR_RO(in_read_index); 400 401 static ssize_t in_write_index_show(struct device *dev, 402 struct device_attribute *dev_attr, char *buf) 403 { 404 struct hv_device *hv_dev = device_to_hv_device(dev); 405 struct hv_ring_buffer_debug_info inbound; 406 407 if (!hv_dev->channel) 408 return -ENODEV; 409 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 410 return sprintf(buf, "%d\n", inbound.current_write_index); 411 } 412 static DEVICE_ATTR_RO(in_write_index); 413 414 static ssize_t in_read_bytes_avail_show(struct device *dev, 415 struct device_attribute *dev_attr, 416 char *buf) 417 { 418 struct hv_device *hv_dev = device_to_hv_device(dev); 419 struct hv_ring_buffer_debug_info inbound; 420 421 if (!hv_dev->channel) 422 return -ENODEV; 423 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 424 return sprintf(buf, "%d\n", inbound.bytes_avail_toread); 425 } 426 static DEVICE_ATTR_RO(in_read_bytes_avail); 427 428 static ssize_t in_write_bytes_avail_show(struct device *dev, 429 struct device_attribute *dev_attr, 430 char *buf) 431 { 432 struct hv_device *hv_dev = device_to_hv_device(dev); 433 struct hv_ring_buffer_debug_info inbound; 434 435 if (!hv_dev->channel) 436 return -ENODEV; 437 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 438 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite); 439 } 440 static DEVICE_ATTR_RO(in_write_bytes_avail); 441 442 static ssize_t channel_vp_mapping_show(struct device *dev, 443 struct device_attribute *dev_attr, 444 char *buf) 445 { 446 struct hv_device *hv_dev = device_to_hv_device(dev); 447 struct vmbus_channel *channel = hv_dev->channel, *cur_sc; 448 unsigned long flags; 449 int buf_size = PAGE_SIZE, n_written, tot_written; 450 struct list_head *cur; 451 452 if (!channel) 453 return -ENODEV; 454 455 tot_written = snprintf(buf, buf_size, "%u:%u\n", 456 channel->offermsg.child_relid, channel->target_cpu); 457 458 spin_lock_irqsave(&channel->lock, flags); 459 460 list_for_each(cur, &channel->sc_list) { 461 if (tot_written >= buf_size - 1) 462 break; 463 464 cur_sc = list_entry(cur, struct vmbus_channel, sc_list); 465 n_written = scnprintf(buf + tot_written, 466 buf_size - tot_written, 467 "%u:%u\n", 468 cur_sc->offermsg.child_relid, 469 cur_sc->target_cpu); 470 tot_written += n_written; 471 } 472 473 spin_unlock_irqrestore(&channel->lock, flags); 474 475 return tot_written; 476 } 477 static DEVICE_ATTR_RO(channel_vp_mapping); 478 479 static ssize_t vendor_show(struct device *dev, 480 struct device_attribute *dev_attr, 481 char *buf) 482 { 483 struct hv_device *hv_dev = device_to_hv_device(dev); 484 return sprintf(buf, "0x%x\n", hv_dev->vendor_id); 485 } 486 static DEVICE_ATTR_RO(vendor); 487 488 static ssize_t device_show(struct device *dev, 489 struct device_attribute *dev_attr, 490 char *buf) 491 { 492 struct hv_device *hv_dev = device_to_hv_device(dev); 493 return sprintf(buf, "0x%x\n", hv_dev->device_id); 494 } 495 static DEVICE_ATTR_RO(device); 496 497 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */ 498 static struct attribute *vmbus_attrs[] = { 499 &dev_attr_id.attr, 500 &dev_attr_state.attr, 501 &dev_attr_monitor_id.attr, 502 &dev_attr_class_id.attr, 503 &dev_attr_device_id.attr, 504 &dev_attr_modalias.attr, 505 &dev_attr_server_monitor_pending.attr, 506 &dev_attr_client_monitor_pending.attr, 507 &dev_attr_server_monitor_latency.attr, 508 &dev_attr_client_monitor_latency.attr, 509 &dev_attr_server_monitor_conn_id.attr, 510 &dev_attr_client_monitor_conn_id.attr, 511 &dev_attr_out_intr_mask.attr, 512 &dev_attr_out_read_index.attr, 513 &dev_attr_out_write_index.attr, 514 &dev_attr_out_read_bytes_avail.attr, 515 &dev_attr_out_write_bytes_avail.attr, 516 &dev_attr_in_intr_mask.attr, 517 &dev_attr_in_read_index.attr, 518 &dev_attr_in_write_index.attr, 519 &dev_attr_in_read_bytes_avail.attr, 520 &dev_attr_in_write_bytes_avail.attr, 521 &dev_attr_channel_vp_mapping.attr, 522 &dev_attr_vendor.attr, 523 &dev_attr_device.attr, 524 NULL, 525 }; 526 ATTRIBUTE_GROUPS(vmbus); 527 528 /* 529 * vmbus_uevent - add uevent for our device 530 * 531 * This routine is invoked when a device is added or removed on the vmbus to 532 * generate a uevent to udev in the userspace. The udev will then look at its 533 * rule and the uevent generated here to load the appropriate driver 534 * 535 * The alias string will be of the form vmbus:guid where guid is the string 536 * representation of the device guid (each byte of the guid will be 537 * represented with two hex characters. 538 */ 539 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env) 540 { 541 struct hv_device *dev = device_to_hv_device(device); 542 int ret; 543 char alias_name[VMBUS_ALIAS_LEN + 1]; 544 545 print_alias_name(dev, alias_name); 546 ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name); 547 return ret; 548 } 549 550 static const uuid_le null_guid; 551 552 static inline bool is_null_guid(const uuid_le *guid) 553 { 554 if (uuid_le_cmp(*guid, null_guid)) 555 return false; 556 return true; 557 } 558 559 /* 560 * Return a matching hv_vmbus_device_id pointer. 561 * If there is no match, return NULL. 562 */ 563 static const struct hv_vmbus_device_id *hv_vmbus_get_id( 564 const struct hv_vmbus_device_id *id, 565 const uuid_le *guid) 566 { 567 for (; !is_null_guid(&id->guid); id++) 568 if (!uuid_le_cmp(id->guid, *guid)) 569 return id; 570 571 return NULL; 572 } 573 574 575 576 /* 577 * vmbus_match - Attempt to match the specified device to the specified driver 578 */ 579 static int vmbus_match(struct device *device, struct device_driver *driver) 580 { 581 struct hv_driver *drv = drv_to_hv_drv(driver); 582 struct hv_device *hv_dev = device_to_hv_device(device); 583 584 /* The hv_sock driver handles all hv_sock offers. */ 585 if (is_hvsock_channel(hv_dev->channel)) 586 return drv->hvsock; 587 588 if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type)) 589 return 1; 590 591 return 0; 592 } 593 594 /* 595 * vmbus_probe - Add the new vmbus's child device 596 */ 597 static int vmbus_probe(struct device *child_device) 598 { 599 int ret = 0; 600 struct hv_driver *drv = 601 drv_to_hv_drv(child_device->driver); 602 struct hv_device *dev = device_to_hv_device(child_device); 603 const struct hv_vmbus_device_id *dev_id; 604 605 dev_id = hv_vmbus_get_id(drv->id_table, &dev->dev_type); 606 if (drv->probe) { 607 ret = drv->probe(dev, dev_id); 608 if (ret != 0) 609 pr_err("probe failed for device %s (%d)\n", 610 dev_name(child_device), ret); 611 612 } else { 613 pr_err("probe not set for driver %s\n", 614 dev_name(child_device)); 615 ret = -ENODEV; 616 } 617 return ret; 618 } 619 620 /* 621 * vmbus_remove - Remove a vmbus device 622 */ 623 static int vmbus_remove(struct device *child_device) 624 { 625 struct hv_driver *drv; 626 struct hv_device *dev = device_to_hv_device(child_device); 627 628 if (child_device->driver) { 629 drv = drv_to_hv_drv(child_device->driver); 630 if (drv->remove) 631 drv->remove(dev); 632 } 633 634 return 0; 635 } 636 637 638 /* 639 * vmbus_shutdown - Shutdown a vmbus device 640 */ 641 static void vmbus_shutdown(struct device *child_device) 642 { 643 struct hv_driver *drv; 644 struct hv_device *dev = device_to_hv_device(child_device); 645 646 647 /* The device may not be attached yet */ 648 if (!child_device->driver) 649 return; 650 651 drv = drv_to_hv_drv(child_device->driver); 652 653 if (drv->shutdown) 654 drv->shutdown(dev); 655 656 return; 657 } 658 659 660 /* 661 * vmbus_device_release - Final callback release of the vmbus child device 662 */ 663 static void vmbus_device_release(struct device *device) 664 { 665 struct hv_device *hv_dev = device_to_hv_device(device); 666 struct vmbus_channel *channel = hv_dev->channel; 667 668 hv_process_channel_removal(channel, 669 channel->offermsg.child_relid); 670 kfree(hv_dev); 671 672 } 673 674 /* The one and only one */ 675 static struct bus_type hv_bus = { 676 .name = "vmbus", 677 .match = vmbus_match, 678 .shutdown = vmbus_shutdown, 679 .remove = vmbus_remove, 680 .probe = vmbus_probe, 681 .uevent = vmbus_uevent, 682 .dev_groups = vmbus_groups, 683 }; 684 685 struct onmessage_work_context { 686 struct work_struct work; 687 struct hv_message msg; 688 }; 689 690 static void vmbus_onmessage_work(struct work_struct *work) 691 { 692 struct onmessage_work_context *ctx; 693 694 /* Do not process messages if we're in DISCONNECTED state */ 695 if (vmbus_connection.conn_state == DISCONNECTED) 696 return; 697 698 ctx = container_of(work, struct onmessage_work_context, 699 work); 700 vmbus_onmessage(&ctx->msg); 701 kfree(ctx); 702 } 703 704 static void hv_process_timer_expiration(struct hv_message *msg, int cpu) 705 { 706 struct clock_event_device *dev = hv_context.clk_evt[cpu]; 707 708 if (dev->event_handler) 709 dev->event_handler(dev); 710 711 vmbus_signal_eom(msg); 712 } 713 714 void vmbus_on_msg_dpc(unsigned long data) 715 { 716 int cpu = smp_processor_id(); 717 void *page_addr = hv_context.synic_message_page[cpu]; 718 struct hv_message *msg = (struct hv_message *)page_addr + 719 VMBUS_MESSAGE_SINT; 720 struct vmbus_channel_message_header *hdr; 721 struct vmbus_channel_message_table_entry *entry; 722 struct onmessage_work_context *ctx; 723 724 if (msg->header.message_type == HVMSG_NONE) 725 /* no msg */ 726 return; 727 728 hdr = (struct vmbus_channel_message_header *)msg->u.payload; 729 730 if (hdr->msgtype >= CHANNELMSG_COUNT) { 731 WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype); 732 goto msg_handled; 733 } 734 735 entry = &channel_message_table[hdr->msgtype]; 736 if (entry->handler_type == VMHT_BLOCKING) { 737 ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); 738 if (ctx == NULL) 739 return; 740 741 INIT_WORK(&ctx->work, vmbus_onmessage_work); 742 memcpy(&ctx->msg, msg, sizeof(*msg)); 743 744 queue_work(vmbus_connection.work_queue, &ctx->work); 745 } else 746 entry->message_handler(hdr); 747 748 msg_handled: 749 vmbus_signal_eom(msg); 750 } 751 752 static void vmbus_isr(void) 753 { 754 int cpu = smp_processor_id(); 755 void *page_addr; 756 struct hv_message *msg; 757 union hv_synic_event_flags *event; 758 bool handled = false; 759 760 page_addr = hv_context.synic_event_page[cpu]; 761 if (page_addr == NULL) 762 return; 763 764 event = (union hv_synic_event_flags *)page_addr + 765 VMBUS_MESSAGE_SINT; 766 /* 767 * Check for events before checking for messages. This is the order 768 * in which events and messages are checked in Windows guests on 769 * Hyper-V, and the Windows team suggested we do the same. 770 */ 771 772 if ((vmbus_proto_version == VERSION_WS2008) || 773 (vmbus_proto_version == VERSION_WIN7)) { 774 775 /* Since we are a child, we only need to check bit 0 */ 776 if (sync_test_and_clear_bit(0, 777 (unsigned long *) &event->flags32[0])) { 778 handled = true; 779 } 780 } else { 781 /* 782 * Our host is win8 or above. The signaling mechanism 783 * has changed and we can directly look at the event page. 784 * If bit n is set then we have an interrup on the channel 785 * whose id is n. 786 */ 787 handled = true; 788 } 789 790 if (handled) 791 tasklet_schedule(hv_context.event_dpc[cpu]); 792 793 794 page_addr = hv_context.synic_message_page[cpu]; 795 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; 796 797 /* Check if there are actual msgs to be processed */ 798 if (msg->header.message_type != HVMSG_NONE) { 799 if (msg->header.message_type == HVMSG_TIMER_EXPIRED) 800 hv_process_timer_expiration(msg, cpu); 801 else 802 tasklet_schedule(hv_context.msg_dpc[cpu]); 803 } 804 } 805 806 807 /* 808 * vmbus_bus_init -Main vmbus driver initialization routine. 809 * 810 * Here, we 811 * - initialize the vmbus driver context 812 * - invoke the vmbus hv main init routine 813 * - retrieve the channel offers 814 */ 815 static int vmbus_bus_init(void) 816 { 817 int ret; 818 819 /* Hypervisor initialization...setup hypercall page..etc */ 820 ret = hv_init(); 821 if (ret != 0) { 822 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret); 823 return ret; 824 } 825 826 ret = bus_register(&hv_bus); 827 if (ret) 828 goto err_cleanup; 829 830 hv_setup_vmbus_irq(vmbus_isr); 831 832 ret = hv_synic_alloc(); 833 if (ret) 834 goto err_alloc; 835 /* 836 * Initialize the per-cpu interrupt state and 837 * connect to the host. 838 */ 839 on_each_cpu(hv_synic_init, NULL, 1); 840 ret = vmbus_connect(); 841 if (ret) 842 goto err_connect; 843 844 if (vmbus_proto_version > VERSION_WIN7) 845 cpu_hotplug_disable(); 846 847 /* 848 * Only register if the crash MSRs are available 849 */ 850 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { 851 register_die_notifier(&hyperv_die_block); 852 atomic_notifier_chain_register(&panic_notifier_list, 853 &hyperv_panic_block); 854 } 855 856 vmbus_request_offers(); 857 858 return 0; 859 860 err_connect: 861 on_each_cpu(hv_synic_cleanup, NULL, 1); 862 err_alloc: 863 hv_synic_free(); 864 hv_remove_vmbus_irq(); 865 866 bus_unregister(&hv_bus); 867 868 err_cleanup: 869 hv_cleanup(); 870 871 return ret; 872 } 873 874 /** 875 * __vmbus_child_driver_register() - Register a vmbus's driver 876 * @hv_driver: Pointer to driver structure you want to register 877 * @owner: owner module of the drv 878 * @mod_name: module name string 879 * 880 * Registers the given driver with Linux through the 'driver_register()' call 881 * and sets up the hyper-v vmbus handling for this driver. 882 * It will return the state of the 'driver_register()' call. 883 * 884 */ 885 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name) 886 { 887 int ret; 888 889 pr_info("registering driver %s\n", hv_driver->name); 890 891 ret = vmbus_exists(); 892 if (ret < 0) 893 return ret; 894 895 hv_driver->driver.name = hv_driver->name; 896 hv_driver->driver.owner = owner; 897 hv_driver->driver.mod_name = mod_name; 898 hv_driver->driver.bus = &hv_bus; 899 900 ret = driver_register(&hv_driver->driver); 901 902 return ret; 903 } 904 EXPORT_SYMBOL_GPL(__vmbus_driver_register); 905 906 /** 907 * vmbus_driver_unregister() - Unregister a vmbus's driver 908 * @hv_driver: Pointer to driver structure you want to 909 * un-register 910 * 911 * Un-register the given driver that was previous registered with a call to 912 * vmbus_driver_register() 913 */ 914 void vmbus_driver_unregister(struct hv_driver *hv_driver) 915 { 916 pr_info("unregistering driver %s\n", hv_driver->name); 917 918 if (!vmbus_exists()) 919 driver_unregister(&hv_driver->driver); 920 } 921 EXPORT_SYMBOL_GPL(vmbus_driver_unregister); 922 923 /* 924 * vmbus_device_create - Creates and registers a new child device 925 * on the vmbus. 926 */ 927 struct hv_device *vmbus_device_create(const uuid_le *type, 928 const uuid_le *instance, 929 struct vmbus_channel *channel) 930 { 931 struct hv_device *child_device_obj; 932 933 child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL); 934 if (!child_device_obj) { 935 pr_err("Unable to allocate device object for child device\n"); 936 return NULL; 937 } 938 939 child_device_obj->channel = channel; 940 memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le)); 941 memcpy(&child_device_obj->dev_instance, instance, 942 sizeof(uuid_le)); 943 child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */ 944 945 946 return child_device_obj; 947 } 948 949 /* 950 * vmbus_device_register - Register the child device 951 */ 952 int vmbus_device_register(struct hv_device *child_device_obj) 953 { 954 int ret = 0; 955 956 dev_set_name(&child_device_obj->device, "vmbus_%d", 957 child_device_obj->channel->id); 958 959 child_device_obj->device.bus = &hv_bus; 960 child_device_obj->device.parent = &hv_acpi_dev->dev; 961 child_device_obj->device.release = vmbus_device_release; 962 963 /* 964 * Register with the LDM. This will kick off the driver/device 965 * binding...which will eventually call vmbus_match() and vmbus_probe() 966 */ 967 ret = device_register(&child_device_obj->device); 968 969 if (ret) 970 pr_err("Unable to register child device\n"); 971 else 972 pr_debug("child device %s registered\n", 973 dev_name(&child_device_obj->device)); 974 975 return ret; 976 } 977 978 /* 979 * vmbus_device_unregister - Remove the specified child device 980 * from the vmbus. 981 */ 982 void vmbus_device_unregister(struct hv_device *device_obj) 983 { 984 pr_debug("child device %s unregistered\n", 985 dev_name(&device_obj->device)); 986 987 /* 988 * Kick off the process of unregistering the device. 989 * This will call vmbus_remove() and eventually vmbus_device_release() 990 */ 991 device_unregister(&device_obj->device); 992 } 993 994 995 /* 996 * VMBUS is an acpi enumerated device. Get the information we 997 * need from DSDT. 998 */ 999 #define VTPM_BASE_ADDRESS 0xfed40000 1000 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx) 1001 { 1002 resource_size_t start = 0; 1003 resource_size_t end = 0; 1004 struct resource *new_res; 1005 struct resource **old_res = &hyperv_mmio; 1006 struct resource **prev_res = NULL; 1007 1008 switch (res->type) { 1009 1010 /* 1011 * "Address" descriptors are for bus windows. Ignore 1012 * "memory" descriptors, which are for registers on 1013 * devices. 1014 */ 1015 case ACPI_RESOURCE_TYPE_ADDRESS32: 1016 start = res->data.address32.address.minimum; 1017 end = res->data.address32.address.maximum; 1018 break; 1019 1020 case ACPI_RESOURCE_TYPE_ADDRESS64: 1021 start = res->data.address64.address.minimum; 1022 end = res->data.address64.address.maximum; 1023 break; 1024 1025 default: 1026 /* Unused resource type */ 1027 return AE_OK; 1028 1029 } 1030 /* 1031 * Ignore ranges that are below 1MB, as they're not 1032 * necessary or useful here. 1033 */ 1034 if (end < 0x100000) 1035 return AE_OK; 1036 1037 new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC); 1038 if (!new_res) 1039 return AE_NO_MEMORY; 1040 1041 /* If this range overlaps the virtual TPM, truncate it. */ 1042 if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS) 1043 end = VTPM_BASE_ADDRESS; 1044 1045 new_res->name = "hyperv mmio"; 1046 new_res->flags = IORESOURCE_MEM; 1047 new_res->start = start; 1048 new_res->end = end; 1049 1050 /* 1051 * Stick ranges from higher in address space at the front of the list. 1052 * If two ranges are adjacent, merge them. 1053 */ 1054 do { 1055 if (!*old_res) { 1056 *old_res = new_res; 1057 break; 1058 } 1059 1060 if (((*old_res)->end + 1) == new_res->start) { 1061 (*old_res)->end = new_res->end; 1062 kfree(new_res); 1063 break; 1064 } 1065 1066 if ((*old_res)->start == new_res->end + 1) { 1067 (*old_res)->start = new_res->start; 1068 kfree(new_res); 1069 break; 1070 } 1071 1072 if ((*old_res)->end < new_res->start) { 1073 new_res->sibling = *old_res; 1074 if (prev_res) 1075 (*prev_res)->sibling = new_res; 1076 *old_res = new_res; 1077 break; 1078 } 1079 1080 prev_res = old_res; 1081 old_res = &(*old_res)->sibling; 1082 1083 } while (1); 1084 1085 return AE_OK; 1086 } 1087 1088 static int vmbus_acpi_remove(struct acpi_device *device) 1089 { 1090 struct resource *cur_res; 1091 struct resource *next_res; 1092 1093 if (hyperv_mmio) { 1094 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) { 1095 next_res = cur_res->sibling; 1096 kfree(cur_res); 1097 } 1098 } 1099 1100 return 0; 1101 } 1102 1103 /** 1104 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range. 1105 * @new: If successful, supplied a pointer to the 1106 * allocated MMIO space. 1107 * @device_obj: Identifies the caller 1108 * @min: Minimum guest physical address of the 1109 * allocation 1110 * @max: Maximum guest physical address 1111 * @size: Size of the range to be allocated 1112 * @align: Alignment of the range to be allocated 1113 * @fb_overlap_ok: Whether this allocation can be allowed 1114 * to overlap the video frame buffer. 1115 * 1116 * This function walks the resources granted to VMBus by the 1117 * _CRS object in the ACPI namespace underneath the parent 1118 * "bridge" whether that's a root PCI bus in the Generation 1 1119 * case or a Module Device in the Generation 2 case. It then 1120 * attempts to allocate from the global MMIO pool in a way that 1121 * matches the constraints supplied in these parameters and by 1122 * that _CRS. 1123 * 1124 * Return: 0 on success, -errno on failure 1125 */ 1126 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, 1127 resource_size_t min, resource_size_t max, 1128 resource_size_t size, resource_size_t align, 1129 bool fb_overlap_ok) 1130 { 1131 struct resource *iter; 1132 resource_size_t range_min, range_max, start, local_min, local_max; 1133 const char *dev_n = dev_name(&device_obj->device); 1134 u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1); 1135 int i; 1136 1137 for (iter = hyperv_mmio; iter; iter = iter->sibling) { 1138 if ((iter->start >= max) || (iter->end <= min)) 1139 continue; 1140 1141 range_min = iter->start; 1142 range_max = iter->end; 1143 1144 /* If this range overlaps the frame buffer, split it into 1145 two tries. */ 1146 for (i = 0; i < 2; i++) { 1147 local_min = range_min; 1148 local_max = range_max; 1149 if (fb_overlap_ok || (range_min >= fb_end) || 1150 (range_max <= screen_info.lfb_base)) { 1151 i++; 1152 } else { 1153 if ((range_min <= screen_info.lfb_base) && 1154 (range_max >= screen_info.lfb_base)) { 1155 /* 1156 * The frame buffer is in this window, 1157 * so trim this into the part that 1158 * preceeds the frame buffer. 1159 */ 1160 local_max = screen_info.lfb_base - 1; 1161 range_min = fb_end; 1162 } else { 1163 range_min = fb_end; 1164 continue; 1165 } 1166 } 1167 1168 start = (local_min + align - 1) & ~(align - 1); 1169 for (; start + size - 1 <= local_max; start += align) { 1170 *new = request_mem_region_exclusive(start, size, 1171 dev_n); 1172 if (*new) 1173 return 0; 1174 } 1175 } 1176 } 1177 1178 return -ENXIO; 1179 } 1180 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio); 1181 1182 /** 1183 * vmbus_cpu_number_to_vp_number() - Map CPU to VP. 1184 * @cpu_number: CPU number in Linux terms 1185 * 1186 * This function returns the mapping between the Linux processor 1187 * number and the hypervisor's virtual processor number, useful 1188 * in making hypercalls and such that talk about specific 1189 * processors. 1190 * 1191 * Return: Virtual processor number in Hyper-V terms 1192 */ 1193 int vmbus_cpu_number_to_vp_number(int cpu_number) 1194 { 1195 return hv_context.vp_index[cpu_number]; 1196 } 1197 EXPORT_SYMBOL_GPL(vmbus_cpu_number_to_vp_number); 1198 1199 static int vmbus_acpi_add(struct acpi_device *device) 1200 { 1201 acpi_status result; 1202 int ret_val = -ENODEV; 1203 struct acpi_device *ancestor; 1204 1205 hv_acpi_dev = device; 1206 1207 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS, 1208 vmbus_walk_resources, NULL); 1209 1210 if (ACPI_FAILURE(result)) 1211 goto acpi_walk_err; 1212 /* 1213 * Some ancestor of the vmbus acpi device (Gen1 or Gen2 1214 * firmware) is the VMOD that has the mmio ranges. Get that. 1215 */ 1216 for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) { 1217 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS, 1218 vmbus_walk_resources, NULL); 1219 1220 if (ACPI_FAILURE(result)) 1221 continue; 1222 if (hyperv_mmio) 1223 break; 1224 } 1225 ret_val = 0; 1226 1227 acpi_walk_err: 1228 complete(&probe_event); 1229 if (ret_val) 1230 vmbus_acpi_remove(device); 1231 return ret_val; 1232 } 1233 1234 static const struct acpi_device_id vmbus_acpi_device_ids[] = { 1235 {"VMBUS", 0}, 1236 {"VMBus", 0}, 1237 {"", 0}, 1238 }; 1239 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids); 1240 1241 static struct acpi_driver vmbus_acpi_driver = { 1242 .name = "vmbus", 1243 .ids = vmbus_acpi_device_ids, 1244 .ops = { 1245 .add = vmbus_acpi_add, 1246 .remove = vmbus_acpi_remove, 1247 }, 1248 }; 1249 1250 static void hv_kexec_handler(void) 1251 { 1252 int cpu; 1253 1254 hv_synic_clockevents_cleanup(); 1255 vmbus_initiate_unload(false); 1256 for_each_online_cpu(cpu) 1257 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); 1258 hv_cleanup(); 1259 }; 1260 1261 static void hv_crash_handler(struct pt_regs *regs) 1262 { 1263 vmbus_initiate_unload(true); 1264 /* 1265 * In crash handler we can't schedule synic cleanup for all CPUs, 1266 * doing the cleanup for current CPU only. This should be sufficient 1267 * for kdump. 1268 */ 1269 hv_synic_cleanup(NULL); 1270 hv_cleanup(); 1271 }; 1272 1273 static int __init hv_acpi_init(void) 1274 { 1275 int ret, t; 1276 1277 if (x86_hyper != &x86_hyper_ms_hyperv) 1278 return -ENODEV; 1279 1280 init_completion(&probe_event); 1281 1282 /* 1283 * Get ACPI resources first. 1284 */ 1285 ret = acpi_bus_register_driver(&vmbus_acpi_driver); 1286 1287 if (ret) 1288 return ret; 1289 1290 t = wait_for_completion_timeout(&probe_event, 5*HZ); 1291 if (t == 0) { 1292 ret = -ETIMEDOUT; 1293 goto cleanup; 1294 } 1295 1296 ret = vmbus_bus_init(); 1297 if (ret) 1298 goto cleanup; 1299 1300 hv_setup_kexec_handler(hv_kexec_handler); 1301 hv_setup_crash_handler(hv_crash_handler); 1302 1303 return 0; 1304 1305 cleanup: 1306 acpi_bus_unregister_driver(&vmbus_acpi_driver); 1307 hv_acpi_dev = NULL; 1308 return ret; 1309 } 1310 1311 static void __exit vmbus_exit(void) 1312 { 1313 int cpu; 1314 1315 hv_remove_kexec_handler(); 1316 hv_remove_crash_handler(); 1317 vmbus_connection.conn_state = DISCONNECTED; 1318 hv_synic_clockevents_cleanup(); 1319 vmbus_disconnect(); 1320 hv_remove_vmbus_irq(); 1321 for_each_online_cpu(cpu) 1322 tasklet_kill(hv_context.msg_dpc[cpu]); 1323 vmbus_free_channels(); 1324 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { 1325 unregister_die_notifier(&hyperv_die_block); 1326 atomic_notifier_chain_unregister(&panic_notifier_list, 1327 &hyperv_panic_block); 1328 } 1329 bus_unregister(&hv_bus); 1330 hv_cleanup(); 1331 for_each_online_cpu(cpu) { 1332 tasklet_kill(hv_context.event_dpc[cpu]); 1333 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); 1334 } 1335 hv_synic_free(); 1336 acpi_bus_unregister_driver(&vmbus_acpi_driver); 1337 if (vmbus_proto_version > VERSION_WIN7) 1338 cpu_hotplug_enable(); 1339 } 1340 1341 1342 MODULE_LICENSE("GPL"); 1343 1344 subsys_initcall(hv_acpi_init); 1345 module_exit(vmbus_exit); 1346