1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2009, Microsoft Corporation. 4 * 5 * Authors: 6 * Haiyang Zhang <haiyangz@microsoft.com> 7 * Hank Janssen <hjanssen@microsoft.com> 8 * K. Y. Srinivasan <kys@microsoft.com> 9 */ 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/device.h> 15 #include <linux/interrupt.h> 16 #include <linux/sysctl.h> 17 #include <linux/slab.h> 18 #include <linux/acpi.h> 19 #include <linux/completion.h> 20 #include <linux/hyperv.h> 21 #include <linux/kernel_stat.h> 22 #include <linux/clockchips.h> 23 #include <linux/cpu.h> 24 #include <linux/sched/task_stack.h> 25 26 #include <asm/mshyperv.h> 27 #include <linux/delay.h> 28 #include <linux/notifier.h> 29 #include <linux/ptrace.h> 30 #include <linux/screen_info.h> 31 #include <linux/kdebug.h> 32 #include <linux/efi.h> 33 #include <linux/random.h> 34 #include <linux/kernel.h> 35 #include <linux/syscore_ops.h> 36 #include <clocksource/hyperv_timer.h> 37 #include "hyperv_vmbus.h" 38 39 struct vmbus_dynid { 40 struct list_head node; 41 struct hv_vmbus_device_id id; 42 }; 43 44 static struct acpi_device *hv_acpi_dev; 45 46 static struct completion probe_event; 47 48 static int hyperv_cpuhp_online; 49 50 static void *hv_panic_page; 51 52 /* 53 * Boolean to control whether to report panic messages over Hyper-V. 54 * 55 * It can be set via /proc/sys/kernel/hyperv/record_panic_msg 56 */ 57 static int sysctl_record_panic_msg = 1; 58 59 static int hyperv_report_reg(void) 60 { 61 return !sysctl_record_panic_msg || !hv_panic_page; 62 } 63 64 static int hyperv_panic_event(struct notifier_block *nb, unsigned long val, 65 void *args) 66 { 67 struct pt_regs *regs; 68 69 vmbus_initiate_unload(true); 70 71 /* 72 * Hyper-V should be notified only once about a panic. If we will be 73 * doing hyperv_report_panic_msg() later with kmsg data, don't do 74 * the notification here. 75 */ 76 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE 77 && hyperv_report_reg()) { 78 regs = current_pt_regs(); 79 hyperv_report_panic(regs, val, false); 80 } 81 return NOTIFY_DONE; 82 } 83 84 static int hyperv_die_event(struct notifier_block *nb, unsigned long val, 85 void *args) 86 { 87 struct die_args *die = (struct die_args *)args; 88 struct pt_regs *regs = die->regs; 89 90 /* 91 * Hyper-V should be notified only once about a panic. If we will be 92 * doing hyperv_report_panic_msg() later with kmsg data, don't do 93 * the notification here. 94 */ 95 if (hyperv_report_reg()) 96 hyperv_report_panic(regs, val, true); 97 return NOTIFY_DONE; 98 } 99 100 static struct notifier_block hyperv_die_block = { 101 .notifier_call = hyperv_die_event, 102 }; 103 static struct notifier_block hyperv_panic_block = { 104 .notifier_call = hyperv_panic_event, 105 }; 106 107 static const char *fb_mmio_name = "fb_range"; 108 static struct resource *fb_mmio; 109 static struct resource *hyperv_mmio; 110 static DEFINE_MUTEX(hyperv_mmio_lock); 111 112 static int vmbus_exists(void) 113 { 114 if (hv_acpi_dev == NULL) 115 return -ENODEV; 116 117 return 0; 118 } 119 120 static u8 channel_monitor_group(const struct vmbus_channel *channel) 121 { 122 return (u8)channel->offermsg.monitorid / 32; 123 } 124 125 static u8 channel_monitor_offset(const struct vmbus_channel *channel) 126 { 127 return (u8)channel->offermsg.monitorid % 32; 128 } 129 130 static u32 channel_pending(const struct vmbus_channel *channel, 131 const struct hv_monitor_page *monitor_page) 132 { 133 u8 monitor_group = channel_monitor_group(channel); 134 135 return monitor_page->trigger_group[monitor_group].pending; 136 } 137 138 static u32 channel_latency(const struct vmbus_channel *channel, 139 const struct hv_monitor_page *monitor_page) 140 { 141 u8 monitor_group = channel_monitor_group(channel); 142 u8 monitor_offset = channel_monitor_offset(channel); 143 144 return monitor_page->latency[monitor_group][monitor_offset]; 145 } 146 147 static u32 channel_conn_id(struct vmbus_channel *channel, 148 struct hv_monitor_page *monitor_page) 149 { 150 u8 monitor_group = channel_monitor_group(channel); 151 u8 monitor_offset = channel_monitor_offset(channel); 152 return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id; 153 } 154 155 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr, 156 char *buf) 157 { 158 struct hv_device *hv_dev = device_to_hv_device(dev); 159 160 if (!hv_dev->channel) 161 return -ENODEV; 162 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid); 163 } 164 static DEVICE_ATTR_RO(id); 165 166 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr, 167 char *buf) 168 { 169 struct hv_device *hv_dev = device_to_hv_device(dev); 170 171 if (!hv_dev->channel) 172 return -ENODEV; 173 return sprintf(buf, "%d\n", hv_dev->channel->state); 174 } 175 static DEVICE_ATTR_RO(state); 176 177 static ssize_t monitor_id_show(struct device *dev, 178 struct device_attribute *dev_attr, char *buf) 179 { 180 struct hv_device *hv_dev = device_to_hv_device(dev); 181 182 if (!hv_dev->channel) 183 return -ENODEV; 184 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid); 185 } 186 static DEVICE_ATTR_RO(monitor_id); 187 188 static ssize_t class_id_show(struct device *dev, 189 struct device_attribute *dev_attr, char *buf) 190 { 191 struct hv_device *hv_dev = device_to_hv_device(dev); 192 193 if (!hv_dev->channel) 194 return -ENODEV; 195 return sprintf(buf, "{%pUl}\n", 196 &hv_dev->channel->offermsg.offer.if_type); 197 } 198 static DEVICE_ATTR_RO(class_id); 199 200 static ssize_t device_id_show(struct device *dev, 201 struct device_attribute *dev_attr, char *buf) 202 { 203 struct hv_device *hv_dev = device_to_hv_device(dev); 204 205 if (!hv_dev->channel) 206 return -ENODEV; 207 return sprintf(buf, "{%pUl}\n", 208 &hv_dev->channel->offermsg.offer.if_instance); 209 } 210 static DEVICE_ATTR_RO(device_id); 211 212 static ssize_t modalias_show(struct device *dev, 213 struct device_attribute *dev_attr, char *buf) 214 { 215 struct hv_device *hv_dev = device_to_hv_device(dev); 216 217 return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type); 218 } 219 static DEVICE_ATTR_RO(modalias); 220 221 #ifdef CONFIG_NUMA 222 static ssize_t numa_node_show(struct device *dev, 223 struct device_attribute *attr, char *buf) 224 { 225 struct hv_device *hv_dev = device_to_hv_device(dev); 226 227 if (!hv_dev->channel) 228 return -ENODEV; 229 230 return sprintf(buf, "%d\n", hv_dev->channel->numa_node); 231 } 232 static DEVICE_ATTR_RO(numa_node); 233 #endif 234 235 static ssize_t server_monitor_pending_show(struct device *dev, 236 struct device_attribute *dev_attr, 237 char *buf) 238 { 239 struct hv_device *hv_dev = device_to_hv_device(dev); 240 241 if (!hv_dev->channel) 242 return -ENODEV; 243 return sprintf(buf, "%d\n", 244 channel_pending(hv_dev->channel, 245 vmbus_connection.monitor_pages[0])); 246 } 247 static DEVICE_ATTR_RO(server_monitor_pending); 248 249 static ssize_t client_monitor_pending_show(struct device *dev, 250 struct device_attribute *dev_attr, 251 char *buf) 252 { 253 struct hv_device *hv_dev = device_to_hv_device(dev); 254 255 if (!hv_dev->channel) 256 return -ENODEV; 257 return sprintf(buf, "%d\n", 258 channel_pending(hv_dev->channel, 259 vmbus_connection.monitor_pages[1])); 260 } 261 static DEVICE_ATTR_RO(client_monitor_pending); 262 263 static ssize_t server_monitor_latency_show(struct device *dev, 264 struct device_attribute *dev_attr, 265 char *buf) 266 { 267 struct hv_device *hv_dev = device_to_hv_device(dev); 268 269 if (!hv_dev->channel) 270 return -ENODEV; 271 return sprintf(buf, "%d\n", 272 channel_latency(hv_dev->channel, 273 vmbus_connection.monitor_pages[0])); 274 } 275 static DEVICE_ATTR_RO(server_monitor_latency); 276 277 static ssize_t client_monitor_latency_show(struct device *dev, 278 struct device_attribute *dev_attr, 279 char *buf) 280 { 281 struct hv_device *hv_dev = device_to_hv_device(dev); 282 283 if (!hv_dev->channel) 284 return -ENODEV; 285 return sprintf(buf, "%d\n", 286 channel_latency(hv_dev->channel, 287 vmbus_connection.monitor_pages[1])); 288 } 289 static DEVICE_ATTR_RO(client_monitor_latency); 290 291 static ssize_t server_monitor_conn_id_show(struct device *dev, 292 struct device_attribute *dev_attr, 293 char *buf) 294 { 295 struct hv_device *hv_dev = device_to_hv_device(dev); 296 297 if (!hv_dev->channel) 298 return -ENODEV; 299 return sprintf(buf, "%d\n", 300 channel_conn_id(hv_dev->channel, 301 vmbus_connection.monitor_pages[0])); 302 } 303 static DEVICE_ATTR_RO(server_monitor_conn_id); 304 305 static ssize_t client_monitor_conn_id_show(struct device *dev, 306 struct device_attribute *dev_attr, 307 char *buf) 308 { 309 struct hv_device *hv_dev = device_to_hv_device(dev); 310 311 if (!hv_dev->channel) 312 return -ENODEV; 313 return sprintf(buf, "%d\n", 314 channel_conn_id(hv_dev->channel, 315 vmbus_connection.monitor_pages[1])); 316 } 317 static DEVICE_ATTR_RO(client_monitor_conn_id); 318 319 static ssize_t out_intr_mask_show(struct device *dev, 320 struct device_attribute *dev_attr, char *buf) 321 { 322 struct hv_device *hv_dev = device_to_hv_device(dev); 323 struct hv_ring_buffer_debug_info outbound; 324 int ret; 325 326 if (!hv_dev->channel) 327 return -ENODEV; 328 329 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, 330 &outbound); 331 if (ret < 0) 332 return ret; 333 334 return sprintf(buf, "%d\n", outbound.current_interrupt_mask); 335 } 336 static DEVICE_ATTR_RO(out_intr_mask); 337 338 static ssize_t out_read_index_show(struct device *dev, 339 struct device_attribute *dev_attr, char *buf) 340 { 341 struct hv_device *hv_dev = device_to_hv_device(dev); 342 struct hv_ring_buffer_debug_info outbound; 343 int ret; 344 345 if (!hv_dev->channel) 346 return -ENODEV; 347 348 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, 349 &outbound); 350 if (ret < 0) 351 return ret; 352 return sprintf(buf, "%d\n", outbound.current_read_index); 353 } 354 static DEVICE_ATTR_RO(out_read_index); 355 356 static ssize_t out_write_index_show(struct device *dev, 357 struct device_attribute *dev_attr, 358 char *buf) 359 { 360 struct hv_device *hv_dev = device_to_hv_device(dev); 361 struct hv_ring_buffer_debug_info outbound; 362 int ret; 363 364 if (!hv_dev->channel) 365 return -ENODEV; 366 367 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, 368 &outbound); 369 if (ret < 0) 370 return ret; 371 return sprintf(buf, "%d\n", outbound.current_write_index); 372 } 373 static DEVICE_ATTR_RO(out_write_index); 374 375 static ssize_t out_read_bytes_avail_show(struct device *dev, 376 struct device_attribute *dev_attr, 377 char *buf) 378 { 379 struct hv_device *hv_dev = device_to_hv_device(dev); 380 struct hv_ring_buffer_debug_info outbound; 381 int ret; 382 383 if (!hv_dev->channel) 384 return -ENODEV; 385 386 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, 387 &outbound); 388 if (ret < 0) 389 return ret; 390 return sprintf(buf, "%d\n", outbound.bytes_avail_toread); 391 } 392 static DEVICE_ATTR_RO(out_read_bytes_avail); 393 394 static ssize_t out_write_bytes_avail_show(struct device *dev, 395 struct device_attribute *dev_attr, 396 char *buf) 397 { 398 struct hv_device *hv_dev = device_to_hv_device(dev); 399 struct hv_ring_buffer_debug_info outbound; 400 int ret; 401 402 if (!hv_dev->channel) 403 return -ENODEV; 404 405 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, 406 &outbound); 407 if (ret < 0) 408 return ret; 409 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite); 410 } 411 static DEVICE_ATTR_RO(out_write_bytes_avail); 412 413 static ssize_t in_intr_mask_show(struct device *dev, 414 struct device_attribute *dev_attr, char *buf) 415 { 416 struct hv_device *hv_dev = device_to_hv_device(dev); 417 struct hv_ring_buffer_debug_info inbound; 418 int ret; 419 420 if (!hv_dev->channel) 421 return -ENODEV; 422 423 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 424 if (ret < 0) 425 return ret; 426 427 return sprintf(buf, "%d\n", inbound.current_interrupt_mask); 428 } 429 static DEVICE_ATTR_RO(in_intr_mask); 430 431 static ssize_t in_read_index_show(struct device *dev, 432 struct device_attribute *dev_attr, char *buf) 433 { 434 struct hv_device *hv_dev = device_to_hv_device(dev); 435 struct hv_ring_buffer_debug_info inbound; 436 int ret; 437 438 if (!hv_dev->channel) 439 return -ENODEV; 440 441 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 442 if (ret < 0) 443 return ret; 444 445 return sprintf(buf, "%d\n", inbound.current_read_index); 446 } 447 static DEVICE_ATTR_RO(in_read_index); 448 449 static ssize_t in_write_index_show(struct device *dev, 450 struct device_attribute *dev_attr, char *buf) 451 { 452 struct hv_device *hv_dev = device_to_hv_device(dev); 453 struct hv_ring_buffer_debug_info inbound; 454 int ret; 455 456 if (!hv_dev->channel) 457 return -ENODEV; 458 459 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 460 if (ret < 0) 461 return ret; 462 463 return sprintf(buf, "%d\n", inbound.current_write_index); 464 } 465 static DEVICE_ATTR_RO(in_write_index); 466 467 static ssize_t in_read_bytes_avail_show(struct device *dev, 468 struct device_attribute *dev_attr, 469 char *buf) 470 { 471 struct hv_device *hv_dev = device_to_hv_device(dev); 472 struct hv_ring_buffer_debug_info inbound; 473 int ret; 474 475 if (!hv_dev->channel) 476 return -ENODEV; 477 478 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 479 if (ret < 0) 480 return ret; 481 482 return sprintf(buf, "%d\n", inbound.bytes_avail_toread); 483 } 484 static DEVICE_ATTR_RO(in_read_bytes_avail); 485 486 static ssize_t in_write_bytes_avail_show(struct device *dev, 487 struct device_attribute *dev_attr, 488 char *buf) 489 { 490 struct hv_device *hv_dev = device_to_hv_device(dev); 491 struct hv_ring_buffer_debug_info inbound; 492 int ret; 493 494 if (!hv_dev->channel) 495 return -ENODEV; 496 497 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 498 if (ret < 0) 499 return ret; 500 501 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite); 502 } 503 static DEVICE_ATTR_RO(in_write_bytes_avail); 504 505 static ssize_t channel_vp_mapping_show(struct device *dev, 506 struct device_attribute *dev_attr, 507 char *buf) 508 { 509 struct hv_device *hv_dev = device_to_hv_device(dev); 510 struct vmbus_channel *channel = hv_dev->channel, *cur_sc; 511 unsigned long flags; 512 int buf_size = PAGE_SIZE, n_written, tot_written; 513 struct list_head *cur; 514 515 if (!channel) 516 return -ENODEV; 517 518 tot_written = snprintf(buf, buf_size, "%u:%u\n", 519 channel->offermsg.child_relid, channel->target_cpu); 520 521 spin_lock_irqsave(&channel->lock, flags); 522 523 list_for_each(cur, &channel->sc_list) { 524 if (tot_written >= buf_size - 1) 525 break; 526 527 cur_sc = list_entry(cur, struct vmbus_channel, sc_list); 528 n_written = scnprintf(buf + tot_written, 529 buf_size - tot_written, 530 "%u:%u\n", 531 cur_sc->offermsg.child_relid, 532 cur_sc->target_cpu); 533 tot_written += n_written; 534 } 535 536 spin_unlock_irqrestore(&channel->lock, flags); 537 538 return tot_written; 539 } 540 static DEVICE_ATTR_RO(channel_vp_mapping); 541 542 static ssize_t vendor_show(struct device *dev, 543 struct device_attribute *dev_attr, 544 char *buf) 545 { 546 struct hv_device *hv_dev = device_to_hv_device(dev); 547 return sprintf(buf, "0x%x\n", hv_dev->vendor_id); 548 } 549 static DEVICE_ATTR_RO(vendor); 550 551 static ssize_t device_show(struct device *dev, 552 struct device_attribute *dev_attr, 553 char *buf) 554 { 555 struct hv_device *hv_dev = device_to_hv_device(dev); 556 return sprintf(buf, "0x%x\n", hv_dev->device_id); 557 } 558 static DEVICE_ATTR_RO(device); 559 560 static ssize_t driver_override_store(struct device *dev, 561 struct device_attribute *attr, 562 const char *buf, size_t count) 563 { 564 struct hv_device *hv_dev = device_to_hv_device(dev); 565 char *driver_override, *old, *cp; 566 567 /* We need to keep extra room for a newline */ 568 if (count >= (PAGE_SIZE - 1)) 569 return -EINVAL; 570 571 driver_override = kstrndup(buf, count, GFP_KERNEL); 572 if (!driver_override) 573 return -ENOMEM; 574 575 cp = strchr(driver_override, '\n'); 576 if (cp) 577 *cp = '\0'; 578 579 device_lock(dev); 580 old = hv_dev->driver_override; 581 if (strlen(driver_override)) { 582 hv_dev->driver_override = driver_override; 583 } else { 584 kfree(driver_override); 585 hv_dev->driver_override = NULL; 586 } 587 device_unlock(dev); 588 589 kfree(old); 590 591 return count; 592 } 593 594 static ssize_t driver_override_show(struct device *dev, 595 struct device_attribute *attr, char *buf) 596 { 597 struct hv_device *hv_dev = device_to_hv_device(dev); 598 ssize_t len; 599 600 device_lock(dev); 601 len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override); 602 device_unlock(dev); 603 604 return len; 605 } 606 static DEVICE_ATTR_RW(driver_override); 607 608 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */ 609 static struct attribute *vmbus_dev_attrs[] = { 610 &dev_attr_id.attr, 611 &dev_attr_state.attr, 612 &dev_attr_monitor_id.attr, 613 &dev_attr_class_id.attr, 614 &dev_attr_device_id.attr, 615 &dev_attr_modalias.attr, 616 #ifdef CONFIG_NUMA 617 &dev_attr_numa_node.attr, 618 #endif 619 &dev_attr_server_monitor_pending.attr, 620 &dev_attr_client_monitor_pending.attr, 621 &dev_attr_server_monitor_latency.attr, 622 &dev_attr_client_monitor_latency.attr, 623 &dev_attr_server_monitor_conn_id.attr, 624 &dev_attr_client_monitor_conn_id.attr, 625 &dev_attr_out_intr_mask.attr, 626 &dev_attr_out_read_index.attr, 627 &dev_attr_out_write_index.attr, 628 &dev_attr_out_read_bytes_avail.attr, 629 &dev_attr_out_write_bytes_avail.attr, 630 &dev_attr_in_intr_mask.attr, 631 &dev_attr_in_read_index.attr, 632 &dev_attr_in_write_index.attr, 633 &dev_attr_in_read_bytes_avail.attr, 634 &dev_attr_in_write_bytes_avail.attr, 635 &dev_attr_channel_vp_mapping.attr, 636 &dev_attr_vendor.attr, 637 &dev_attr_device.attr, 638 &dev_attr_driver_override.attr, 639 NULL, 640 }; 641 642 /* 643 * Device-level attribute_group callback function. Returns the permission for 644 * each attribute, and returns 0 if an attribute is not visible. 645 */ 646 static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj, 647 struct attribute *attr, int idx) 648 { 649 struct device *dev = kobj_to_dev(kobj); 650 const struct hv_device *hv_dev = device_to_hv_device(dev); 651 652 /* Hide the monitor attributes if the monitor mechanism is not used. */ 653 if (!hv_dev->channel->offermsg.monitor_allocated && 654 (attr == &dev_attr_monitor_id.attr || 655 attr == &dev_attr_server_monitor_pending.attr || 656 attr == &dev_attr_client_monitor_pending.attr || 657 attr == &dev_attr_server_monitor_latency.attr || 658 attr == &dev_attr_client_monitor_latency.attr || 659 attr == &dev_attr_server_monitor_conn_id.attr || 660 attr == &dev_attr_client_monitor_conn_id.attr)) 661 return 0; 662 663 return attr->mode; 664 } 665 666 static const struct attribute_group vmbus_dev_group = { 667 .attrs = vmbus_dev_attrs, 668 .is_visible = vmbus_dev_attr_is_visible 669 }; 670 __ATTRIBUTE_GROUPS(vmbus_dev); 671 672 /* 673 * vmbus_uevent - add uevent for our device 674 * 675 * This routine is invoked when a device is added or removed on the vmbus to 676 * generate a uevent to udev in the userspace. The udev will then look at its 677 * rule and the uevent generated here to load the appropriate driver 678 * 679 * The alias string will be of the form vmbus:guid where guid is the string 680 * representation of the device guid (each byte of the guid will be 681 * represented with two hex characters. 682 */ 683 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env) 684 { 685 struct hv_device *dev = device_to_hv_device(device); 686 const char *format = "MODALIAS=vmbus:%*phN"; 687 688 return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type); 689 } 690 691 static const struct hv_vmbus_device_id * 692 hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid) 693 { 694 if (id == NULL) 695 return NULL; /* empty device table */ 696 697 for (; !guid_is_null(&id->guid); id++) 698 if (guid_equal(&id->guid, guid)) 699 return id; 700 701 return NULL; 702 } 703 704 static const struct hv_vmbus_device_id * 705 hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid) 706 { 707 const struct hv_vmbus_device_id *id = NULL; 708 struct vmbus_dynid *dynid; 709 710 spin_lock(&drv->dynids.lock); 711 list_for_each_entry(dynid, &drv->dynids.list, node) { 712 if (guid_equal(&dynid->id.guid, guid)) { 713 id = &dynid->id; 714 break; 715 } 716 } 717 spin_unlock(&drv->dynids.lock); 718 719 return id; 720 } 721 722 static const struct hv_vmbus_device_id vmbus_device_null; 723 724 /* 725 * Return a matching hv_vmbus_device_id pointer. 726 * If there is no match, return NULL. 727 */ 728 static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv, 729 struct hv_device *dev) 730 { 731 const guid_t *guid = &dev->dev_type; 732 const struct hv_vmbus_device_id *id; 733 734 /* When driver_override is set, only bind to the matching driver */ 735 if (dev->driver_override && strcmp(dev->driver_override, drv->name)) 736 return NULL; 737 738 /* Look at the dynamic ids first, before the static ones */ 739 id = hv_vmbus_dynid_match(drv, guid); 740 if (!id) 741 id = hv_vmbus_dev_match(drv->id_table, guid); 742 743 /* driver_override will always match, send a dummy id */ 744 if (!id && dev->driver_override) 745 id = &vmbus_device_null; 746 747 return id; 748 } 749 750 /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */ 751 static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid) 752 { 753 struct vmbus_dynid *dynid; 754 755 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 756 if (!dynid) 757 return -ENOMEM; 758 759 dynid->id.guid = *guid; 760 761 spin_lock(&drv->dynids.lock); 762 list_add_tail(&dynid->node, &drv->dynids.list); 763 spin_unlock(&drv->dynids.lock); 764 765 return driver_attach(&drv->driver); 766 } 767 768 static void vmbus_free_dynids(struct hv_driver *drv) 769 { 770 struct vmbus_dynid *dynid, *n; 771 772 spin_lock(&drv->dynids.lock); 773 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { 774 list_del(&dynid->node); 775 kfree(dynid); 776 } 777 spin_unlock(&drv->dynids.lock); 778 } 779 780 /* 781 * store_new_id - sysfs frontend to vmbus_add_dynid() 782 * 783 * Allow GUIDs to be added to an existing driver via sysfs. 784 */ 785 static ssize_t new_id_store(struct device_driver *driver, const char *buf, 786 size_t count) 787 { 788 struct hv_driver *drv = drv_to_hv_drv(driver); 789 guid_t guid; 790 ssize_t retval; 791 792 retval = guid_parse(buf, &guid); 793 if (retval) 794 return retval; 795 796 if (hv_vmbus_dynid_match(drv, &guid)) 797 return -EEXIST; 798 799 retval = vmbus_add_dynid(drv, &guid); 800 if (retval) 801 return retval; 802 return count; 803 } 804 static DRIVER_ATTR_WO(new_id); 805 806 /* 807 * store_remove_id - remove a PCI device ID from this driver 808 * 809 * Removes a dynamic pci device ID to this driver. 810 */ 811 static ssize_t remove_id_store(struct device_driver *driver, const char *buf, 812 size_t count) 813 { 814 struct hv_driver *drv = drv_to_hv_drv(driver); 815 struct vmbus_dynid *dynid, *n; 816 guid_t guid; 817 ssize_t retval; 818 819 retval = guid_parse(buf, &guid); 820 if (retval) 821 return retval; 822 823 retval = -ENODEV; 824 spin_lock(&drv->dynids.lock); 825 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { 826 struct hv_vmbus_device_id *id = &dynid->id; 827 828 if (guid_equal(&id->guid, &guid)) { 829 list_del(&dynid->node); 830 kfree(dynid); 831 retval = count; 832 break; 833 } 834 } 835 spin_unlock(&drv->dynids.lock); 836 837 return retval; 838 } 839 static DRIVER_ATTR_WO(remove_id); 840 841 static struct attribute *vmbus_drv_attrs[] = { 842 &driver_attr_new_id.attr, 843 &driver_attr_remove_id.attr, 844 NULL, 845 }; 846 ATTRIBUTE_GROUPS(vmbus_drv); 847 848 849 /* 850 * vmbus_match - Attempt to match the specified device to the specified driver 851 */ 852 static int vmbus_match(struct device *device, struct device_driver *driver) 853 { 854 struct hv_driver *drv = drv_to_hv_drv(driver); 855 struct hv_device *hv_dev = device_to_hv_device(device); 856 857 /* The hv_sock driver handles all hv_sock offers. */ 858 if (is_hvsock_channel(hv_dev->channel)) 859 return drv->hvsock; 860 861 if (hv_vmbus_get_id(drv, hv_dev)) 862 return 1; 863 864 return 0; 865 } 866 867 /* 868 * vmbus_probe - Add the new vmbus's child device 869 */ 870 static int vmbus_probe(struct device *child_device) 871 { 872 int ret = 0; 873 struct hv_driver *drv = 874 drv_to_hv_drv(child_device->driver); 875 struct hv_device *dev = device_to_hv_device(child_device); 876 const struct hv_vmbus_device_id *dev_id; 877 878 dev_id = hv_vmbus_get_id(drv, dev); 879 if (drv->probe) { 880 ret = drv->probe(dev, dev_id); 881 if (ret != 0) 882 pr_err("probe failed for device %s (%d)\n", 883 dev_name(child_device), ret); 884 885 } else { 886 pr_err("probe not set for driver %s\n", 887 dev_name(child_device)); 888 ret = -ENODEV; 889 } 890 return ret; 891 } 892 893 /* 894 * vmbus_remove - Remove a vmbus device 895 */ 896 static int vmbus_remove(struct device *child_device) 897 { 898 struct hv_driver *drv; 899 struct hv_device *dev = device_to_hv_device(child_device); 900 901 if (child_device->driver) { 902 drv = drv_to_hv_drv(child_device->driver); 903 if (drv->remove) 904 drv->remove(dev); 905 } 906 907 return 0; 908 } 909 910 911 /* 912 * vmbus_shutdown - Shutdown a vmbus device 913 */ 914 static void vmbus_shutdown(struct device *child_device) 915 { 916 struct hv_driver *drv; 917 struct hv_device *dev = device_to_hv_device(child_device); 918 919 920 /* The device may not be attached yet */ 921 if (!child_device->driver) 922 return; 923 924 drv = drv_to_hv_drv(child_device->driver); 925 926 if (drv->shutdown) 927 drv->shutdown(dev); 928 } 929 930 #ifdef CONFIG_PM_SLEEP 931 /* 932 * vmbus_suspend - Suspend a vmbus device 933 */ 934 static int vmbus_suspend(struct device *child_device) 935 { 936 struct hv_driver *drv; 937 struct hv_device *dev = device_to_hv_device(child_device); 938 939 /* The device may not be attached yet */ 940 if (!child_device->driver) 941 return 0; 942 943 drv = drv_to_hv_drv(child_device->driver); 944 if (!drv->suspend) 945 return -EOPNOTSUPP; 946 947 return drv->suspend(dev); 948 } 949 950 /* 951 * vmbus_resume - Resume a vmbus device 952 */ 953 static int vmbus_resume(struct device *child_device) 954 { 955 struct hv_driver *drv; 956 struct hv_device *dev = device_to_hv_device(child_device); 957 958 /* The device may not be attached yet */ 959 if (!child_device->driver) 960 return 0; 961 962 drv = drv_to_hv_drv(child_device->driver); 963 if (!drv->resume) 964 return -EOPNOTSUPP; 965 966 return drv->resume(dev); 967 } 968 #else 969 #define vmbus_suspend NULL 970 #define vmbus_resume NULL 971 #endif /* CONFIG_PM_SLEEP */ 972 973 /* 974 * vmbus_device_release - Final callback release of the vmbus child device 975 */ 976 static void vmbus_device_release(struct device *device) 977 { 978 struct hv_device *hv_dev = device_to_hv_device(device); 979 struct vmbus_channel *channel = hv_dev->channel; 980 981 hv_debug_rm_dev_dir(hv_dev); 982 983 mutex_lock(&vmbus_connection.channel_mutex); 984 hv_process_channel_removal(channel); 985 mutex_unlock(&vmbus_connection.channel_mutex); 986 kfree(hv_dev); 987 } 988 989 /* 990 * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm. 991 * 992 * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we 993 * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there 994 * is no way to wake up a Generation-2 VM. 995 * 996 * The other 4 ops are for hibernation. 997 */ 998 999 static const struct dev_pm_ops vmbus_pm = { 1000 .suspend_noirq = NULL, 1001 .resume_noirq = NULL, 1002 .freeze_noirq = vmbus_suspend, 1003 .thaw_noirq = vmbus_resume, 1004 .poweroff_noirq = vmbus_suspend, 1005 .restore_noirq = vmbus_resume, 1006 }; 1007 1008 /* The one and only one */ 1009 static struct bus_type hv_bus = { 1010 .name = "vmbus", 1011 .match = vmbus_match, 1012 .shutdown = vmbus_shutdown, 1013 .remove = vmbus_remove, 1014 .probe = vmbus_probe, 1015 .uevent = vmbus_uevent, 1016 .dev_groups = vmbus_dev_groups, 1017 .drv_groups = vmbus_drv_groups, 1018 .pm = &vmbus_pm, 1019 }; 1020 1021 struct onmessage_work_context { 1022 struct work_struct work; 1023 struct { 1024 struct hv_message_header header; 1025 u8 payload[]; 1026 } msg; 1027 }; 1028 1029 static void vmbus_onmessage_work(struct work_struct *work) 1030 { 1031 struct onmessage_work_context *ctx; 1032 1033 /* Do not process messages if we're in DISCONNECTED state */ 1034 if (vmbus_connection.conn_state == DISCONNECTED) 1035 return; 1036 1037 ctx = container_of(work, struct onmessage_work_context, 1038 work); 1039 vmbus_onmessage((struct vmbus_channel_message_header *) 1040 &ctx->msg.payload); 1041 kfree(ctx); 1042 } 1043 1044 void vmbus_on_msg_dpc(unsigned long data) 1045 { 1046 struct hv_per_cpu_context *hv_cpu = (void *)data; 1047 void *page_addr = hv_cpu->synic_message_page; 1048 struct hv_message *msg = (struct hv_message *)page_addr + 1049 VMBUS_MESSAGE_SINT; 1050 struct vmbus_channel_message_header *hdr; 1051 const struct vmbus_channel_message_table_entry *entry; 1052 struct onmessage_work_context *ctx; 1053 u32 message_type = msg->header.message_type; 1054 1055 /* 1056 * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as 1057 * it is being used in 'struct vmbus_channel_message_header' definition 1058 * which is supposed to match hypervisor ABI. 1059 */ 1060 BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32)); 1061 1062 if (message_type == HVMSG_NONE) 1063 /* no msg */ 1064 return; 1065 1066 hdr = (struct vmbus_channel_message_header *)msg->u.payload; 1067 1068 trace_vmbus_on_msg_dpc(hdr); 1069 1070 if (hdr->msgtype >= CHANNELMSG_COUNT) { 1071 WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype); 1072 goto msg_handled; 1073 } 1074 1075 if (msg->header.payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) { 1076 WARN_ONCE(1, "payload size is too large (%d)\n", 1077 msg->header.payload_size); 1078 goto msg_handled; 1079 } 1080 1081 entry = &channel_message_table[hdr->msgtype]; 1082 1083 if (!entry->message_handler) 1084 goto msg_handled; 1085 1086 if (msg->header.payload_size < entry->min_payload_len) { 1087 WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", 1088 hdr->msgtype, msg->header.payload_size); 1089 goto msg_handled; 1090 } 1091 1092 if (entry->handler_type == VMHT_BLOCKING) { 1093 ctx = kmalloc(sizeof(*ctx) + msg->header.payload_size, 1094 GFP_ATOMIC); 1095 if (ctx == NULL) 1096 return; 1097 1098 INIT_WORK(&ctx->work, vmbus_onmessage_work); 1099 memcpy(&ctx->msg, msg, sizeof(msg->header) + 1100 msg->header.payload_size); 1101 1102 /* 1103 * The host can generate a rescind message while we 1104 * may still be handling the original offer. We deal with 1105 * this condition by relying on the synchronization provided 1106 * by offer_in_progress and by channel_mutex. See also the 1107 * inline comments in vmbus_onoffer_rescind(). 1108 */ 1109 switch (hdr->msgtype) { 1110 case CHANNELMSG_RESCIND_CHANNELOFFER: 1111 /* 1112 * If we are handling the rescind message; 1113 * schedule the work on the global work queue. 1114 * 1115 * The OFFER message and the RESCIND message should 1116 * not be handled by the same serialized work queue, 1117 * because the OFFER handler may call vmbus_open(), 1118 * which tries to open the channel by sending an 1119 * OPEN_CHANNEL message to the host and waits for 1120 * the host's response; however, if the host has 1121 * rescinded the channel before it receives the 1122 * OPEN_CHANNEL message, the host just silently 1123 * ignores the OPEN_CHANNEL message; as a result, 1124 * the guest's OFFER handler hangs for ever, if we 1125 * handle the RESCIND message in the same serialized 1126 * work queue: the RESCIND handler can not start to 1127 * run before the OFFER handler finishes. 1128 */ 1129 schedule_work(&ctx->work); 1130 break; 1131 1132 case CHANNELMSG_OFFERCHANNEL: 1133 /* 1134 * The host sends the offer message of a given channel 1135 * before sending the rescind message of the same 1136 * channel. These messages are sent to the guest's 1137 * connect CPU; the guest then starts processing them 1138 * in the tasklet handler on this CPU: 1139 * 1140 * VMBUS_CONNECT_CPU 1141 * 1142 * [vmbus_on_msg_dpc()] 1143 * atomic_inc() // CHANNELMSG_OFFERCHANNEL 1144 * queue_work() 1145 * ... 1146 * [vmbus_on_msg_dpc()] 1147 * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER 1148 * 1149 * We rely on the memory-ordering properties of the 1150 * queue_work() and schedule_work() primitives, which 1151 * guarantee that the atomic increment will be visible 1152 * to the CPUs which will execute the offer & rescind 1153 * works by the time these works will start execution. 1154 */ 1155 atomic_inc(&vmbus_connection.offer_in_progress); 1156 fallthrough; 1157 1158 default: 1159 queue_work(vmbus_connection.work_queue, &ctx->work); 1160 } 1161 } else 1162 entry->message_handler(hdr); 1163 1164 msg_handled: 1165 vmbus_signal_eom(msg, message_type); 1166 } 1167 1168 #ifdef CONFIG_PM_SLEEP 1169 /* 1170 * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for 1171 * hibernation, because hv_sock connections can not persist across hibernation. 1172 */ 1173 static void vmbus_force_channel_rescinded(struct vmbus_channel *channel) 1174 { 1175 struct onmessage_work_context *ctx; 1176 struct vmbus_channel_rescind_offer *rescind; 1177 1178 WARN_ON(!is_hvsock_channel(channel)); 1179 1180 /* 1181 * Allocation size is small and the allocation should really not fail, 1182 * otherwise the state of the hv_sock connections ends up in limbo. 1183 */ 1184 ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind), 1185 GFP_KERNEL | __GFP_NOFAIL); 1186 1187 /* 1188 * So far, these are not really used by Linux. Just set them to the 1189 * reasonable values conforming to the definitions of the fields. 1190 */ 1191 ctx->msg.header.message_type = 1; 1192 ctx->msg.header.payload_size = sizeof(*rescind); 1193 1194 /* These values are actually used by Linux. */ 1195 rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload; 1196 rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER; 1197 rescind->child_relid = channel->offermsg.child_relid; 1198 1199 INIT_WORK(&ctx->work, vmbus_onmessage_work); 1200 1201 queue_work(vmbus_connection.work_queue, &ctx->work); 1202 } 1203 #endif /* CONFIG_PM_SLEEP */ 1204 1205 /* 1206 * Schedule all channels with events pending 1207 */ 1208 static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu) 1209 { 1210 unsigned long *recv_int_page; 1211 u32 maxbits, relid; 1212 1213 if (vmbus_proto_version < VERSION_WIN8) { 1214 maxbits = MAX_NUM_CHANNELS_SUPPORTED; 1215 recv_int_page = vmbus_connection.recv_int_page; 1216 } else { 1217 /* 1218 * When the host is win8 and beyond, the event page 1219 * can be directly checked to get the id of the channel 1220 * that has the interrupt pending. 1221 */ 1222 void *page_addr = hv_cpu->synic_event_page; 1223 union hv_synic_event_flags *event 1224 = (union hv_synic_event_flags *)page_addr + 1225 VMBUS_MESSAGE_SINT; 1226 1227 maxbits = HV_EVENT_FLAGS_COUNT; 1228 recv_int_page = event->flags; 1229 } 1230 1231 if (unlikely(!recv_int_page)) 1232 return; 1233 1234 for_each_set_bit(relid, recv_int_page, maxbits) { 1235 void (*callback_fn)(void *context); 1236 struct vmbus_channel *channel; 1237 1238 if (!sync_test_and_clear_bit(relid, recv_int_page)) 1239 continue; 1240 1241 /* Special case - vmbus channel protocol msg */ 1242 if (relid == 0) 1243 continue; 1244 1245 /* 1246 * Pairs with the kfree_rcu() in vmbus_chan_release(). 1247 * Guarantees that the channel data structure doesn't 1248 * get freed while the channel pointer below is being 1249 * dereferenced. 1250 */ 1251 rcu_read_lock(); 1252 1253 /* Find channel based on relid */ 1254 channel = relid2channel(relid); 1255 if (channel == NULL) 1256 goto sched_unlock_rcu; 1257 1258 if (channel->rescind) 1259 goto sched_unlock_rcu; 1260 1261 /* 1262 * Make sure that the ring buffer data structure doesn't get 1263 * freed while we dereference the ring buffer pointer. Test 1264 * for the channel's onchannel_callback being NULL within a 1265 * sched_lock critical section. See also the inline comments 1266 * in vmbus_reset_channel_cb(). 1267 */ 1268 spin_lock(&channel->sched_lock); 1269 1270 callback_fn = channel->onchannel_callback; 1271 if (unlikely(callback_fn == NULL)) 1272 goto sched_unlock; 1273 1274 trace_vmbus_chan_sched(channel); 1275 1276 ++channel->interrupts; 1277 1278 switch (channel->callback_mode) { 1279 case HV_CALL_ISR: 1280 (*callback_fn)(channel->channel_callback_context); 1281 break; 1282 1283 case HV_CALL_BATCHED: 1284 hv_begin_read(&channel->inbound); 1285 fallthrough; 1286 case HV_CALL_DIRECT: 1287 tasklet_schedule(&channel->callback_event); 1288 } 1289 1290 sched_unlock: 1291 spin_unlock(&channel->sched_lock); 1292 sched_unlock_rcu: 1293 rcu_read_unlock(); 1294 } 1295 } 1296 1297 static void vmbus_isr(void) 1298 { 1299 struct hv_per_cpu_context *hv_cpu 1300 = this_cpu_ptr(hv_context.cpu_context); 1301 void *page_addr = hv_cpu->synic_event_page; 1302 struct hv_message *msg; 1303 union hv_synic_event_flags *event; 1304 bool handled = false; 1305 1306 if (unlikely(page_addr == NULL)) 1307 return; 1308 1309 event = (union hv_synic_event_flags *)page_addr + 1310 VMBUS_MESSAGE_SINT; 1311 /* 1312 * Check for events before checking for messages. This is the order 1313 * in which events and messages are checked in Windows guests on 1314 * Hyper-V, and the Windows team suggested we do the same. 1315 */ 1316 1317 if ((vmbus_proto_version == VERSION_WS2008) || 1318 (vmbus_proto_version == VERSION_WIN7)) { 1319 1320 /* Since we are a child, we only need to check bit 0 */ 1321 if (sync_test_and_clear_bit(0, event->flags)) 1322 handled = true; 1323 } else { 1324 /* 1325 * Our host is win8 or above. The signaling mechanism 1326 * has changed and we can directly look at the event page. 1327 * If bit n is set then we have an interrup on the channel 1328 * whose id is n. 1329 */ 1330 handled = true; 1331 } 1332 1333 if (handled) 1334 vmbus_chan_sched(hv_cpu); 1335 1336 page_addr = hv_cpu->synic_message_page; 1337 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; 1338 1339 /* Check if there are actual msgs to be processed */ 1340 if (msg->header.message_type != HVMSG_NONE) { 1341 if (msg->header.message_type == HVMSG_TIMER_EXPIRED) { 1342 hv_stimer0_isr(); 1343 vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED); 1344 } else 1345 tasklet_schedule(&hv_cpu->msg_dpc); 1346 } 1347 1348 add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); 1349 } 1350 1351 /* 1352 * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg 1353 * buffer and call into Hyper-V to transfer the data. 1354 */ 1355 static void hv_kmsg_dump(struct kmsg_dumper *dumper, 1356 enum kmsg_dump_reason reason) 1357 { 1358 size_t bytes_written; 1359 phys_addr_t panic_pa; 1360 1361 /* We are only interested in panics. */ 1362 if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg)) 1363 return; 1364 1365 panic_pa = virt_to_phys(hv_panic_page); 1366 1367 /* 1368 * Write dump contents to the page. No need to synchronize; panic should 1369 * be single-threaded. 1370 */ 1371 kmsg_dump_get_buffer(dumper, true, hv_panic_page, HV_HYP_PAGE_SIZE, 1372 &bytes_written); 1373 if (bytes_written) 1374 hyperv_report_panic_msg(panic_pa, bytes_written); 1375 } 1376 1377 static struct kmsg_dumper hv_kmsg_dumper = { 1378 .dump = hv_kmsg_dump, 1379 }; 1380 1381 static struct ctl_table_header *hv_ctl_table_hdr; 1382 1383 /* 1384 * sysctl option to allow the user to control whether kmsg data should be 1385 * reported to Hyper-V on panic. 1386 */ 1387 static struct ctl_table hv_ctl_table[] = { 1388 { 1389 .procname = "hyperv_record_panic_msg", 1390 .data = &sysctl_record_panic_msg, 1391 .maxlen = sizeof(int), 1392 .mode = 0644, 1393 .proc_handler = proc_dointvec_minmax, 1394 .extra1 = SYSCTL_ZERO, 1395 .extra2 = SYSCTL_ONE 1396 }, 1397 {} 1398 }; 1399 1400 static struct ctl_table hv_root_table[] = { 1401 { 1402 .procname = "kernel", 1403 .mode = 0555, 1404 .child = hv_ctl_table 1405 }, 1406 {} 1407 }; 1408 1409 /* 1410 * vmbus_bus_init -Main vmbus driver initialization routine. 1411 * 1412 * Here, we 1413 * - initialize the vmbus driver context 1414 * - invoke the vmbus hv main init routine 1415 * - retrieve the channel offers 1416 */ 1417 static int vmbus_bus_init(void) 1418 { 1419 int ret; 1420 1421 ret = hv_init(); 1422 if (ret != 0) { 1423 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret); 1424 return ret; 1425 } 1426 1427 ret = bus_register(&hv_bus); 1428 if (ret) 1429 return ret; 1430 1431 hv_setup_vmbus_irq(vmbus_isr); 1432 1433 ret = hv_synic_alloc(); 1434 if (ret) 1435 goto err_alloc; 1436 1437 /* 1438 * Initialize the per-cpu interrupt state and stimer state. 1439 * Then connect to the host. 1440 */ 1441 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online", 1442 hv_synic_init, hv_synic_cleanup); 1443 if (ret < 0) 1444 goto err_cpuhp; 1445 hyperv_cpuhp_online = ret; 1446 1447 ret = vmbus_connect(); 1448 if (ret) 1449 goto err_connect; 1450 1451 /* 1452 * Only register if the crash MSRs are available 1453 */ 1454 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { 1455 u64 hyperv_crash_ctl; 1456 /* 1457 * Sysctl registration is not fatal, since by default 1458 * reporting is enabled. 1459 */ 1460 hv_ctl_table_hdr = register_sysctl_table(hv_root_table); 1461 if (!hv_ctl_table_hdr) 1462 pr_err("Hyper-V: sysctl table register error"); 1463 1464 /* 1465 * Register for panic kmsg callback only if the right 1466 * capability is supported by the hypervisor. 1467 */ 1468 hv_get_crash_ctl(hyperv_crash_ctl); 1469 if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) { 1470 hv_panic_page = (void *)hv_alloc_hyperv_zeroed_page(); 1471 if (hv_panic_page) { 1472 ret = kmsg_dump_register(&hv_kmsg_dumper); 1473 if (ret) { 1474 pr_err("Hyper-V: kmsg dump register " 1475 "error 0x%x\n", ret); 1476 hv_free_hyperv_page( 1477 (unsigned long)hv_panic_page); 1478 hv_panic_page = NULL; 1479 } 1480 } else 1481 pr_err("Hyper-V: panic message page memory " 1482 "allocation failed"); 1483 } 1484 1485 register_die_notifier(&hyperv_die_block); 1486 } 1487 1488 /* 1489 * Always register the panic notifier because we need to unload 1490 * the VMbus channel connection to prevent any VMbus 1491 * activity after the VM panics. 1492 */ 1493 atomic_notifier_chain_register(&panic_notifier_list, 1494 &hyperv_panic_block); 1495 1496 vmbus_request_offers(); 1497 1498 return 0; 1499 1500 err_connect: 1501 cpuhp_remove_state(hyperv_cpuhp_online); 1502 err_cpuhp: 1503 hv_synic_free(); 1504 err_alloc: 1505 hv_remove_vmbus_irq(); 1506 1507 bus_unregister(&hv_bus); 1508 unregister_sysctl_table(hv_ctl_table_hdr); 1509 hv_ctl_table_hdr = NULL; 1510 return ret; 1511 } 1512 1513 /** 1514 * __vmbus_child_driver_register() - Register a vmbus's driver 1515 * @hv_driver: Pointer to driver structure you want to register 1516 * @owner: owner module of the drv 1517 * @mod_name: module name string 1518 * 1519 * Registers the given driver with Linux through the 'driver_register()' call 1520 * and sets up the hyper-v vmbus handling for this driver. 1521 * It will return the state of the 'driver_register()' call. 1522 * 1523 */ 1524 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name) 1525 { 1526 int ret; 1527 1528 pr_info("registering driver %s\n", hv_driver->name); 1529 1530 ret = vmbus_exists(); 1531 if (ret < 0) 1532 return ret; 1533 1534 hv_driver->driver.name = hv_driver->name; 1535 hv_driver->driver.owner = owner; 1536 hv_driver->driver.mod_name = mod_name; 1537 hv_driver->driver.bus = &hv_bus; 1538 1539 spin_lock_init(&hv_driver->dynids.lock); 1540 INIT_LIST_HEAD(&hv_driver->dynids.list); 1541 1542 ret = driver_register(&hv_driver->driver); 1543 1544 return ret; 1545 } 1546 EXPORT_SYMBOL_GPL(__vmbus_driver_register); 1547 1548 /** 1549 * vmbus_driver_unregister() - Unregister a vmbus's driver 1550 * @hv_driver: Pointer to driver structure you want to 1551 * un-register 1552 * 1553 * Un-register the given driver that was previous registered with a call to 1554 * vmbus_driver_register() 1555 */ 1556 void vmbus_driver_unregister(struct hv_driver *hv_driver) 1557 { 1558 pr_info("unregistering driver %s\n", hv_driver->name); 1559 1560 if (!vmbus_exists()) { 1561 driver_unregister(&hv_driver->driver); 1562 vmbus_free_dynids(hv_driver); 1563 } 1564 } 1565 EXPORT_SYMBOL_GPL(vmbus_driver_unregister); 1566 1567 1568 /* 1569 * Called when last reference to channel is gone. 1570 */ 1571 static void vmbus_chan_release(struct kobject *kobj) 1572 { 1573 struct vmbus_channel *channel 1574 = container_of(kobj, struct vmbus_channel, kobj); 1575 1576 kfree_rcu(channel, rcu); 1577 } 1578 1579 struct vmbus_chan_attribute { 1580 struct attribute attr; 1581 ssize_t (*show)(struct vmbus_channel *chan, char *buf); 1582 ssize_t (*store)(struct vmbus_channel *chan, 1583 const char *buf, size_t count); 1584 }; 1585 #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \ 1586 struct vmbus_chan_attribute chan_attr_##_name \ 1587 = __ATTR(_name, _mode, _show, _store) 1588 #define VMBUS_CHAN_ATTR_RW(_name) \ 1589 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name) 1590 #define VMBUS_CHAN_ATTR_RO(_name) \ 1591 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name) 1592 #define VMBUS_CHAN_ATTR_WO(_name) \ 1593 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name) 1594 1595 static ssize_t vmbus_chan_attr_show(struct kobject *kobj, 1596 struct attribute *attr, char *buf) 1597 { 1598 const struct vmbus_chan_attribute *attribute 1599 = container_of(attr, struct vmbus_chan_attribute, attr); 1600 struct vmbus_channel *chan 1601 = container_of(kobj, struct vmbus_channel, kobj); 1602 1603 if (!attribute->show) 1604 return -EIO; 1605 1606 return attribute->show(chan, buf); 1607 } 1608 1609 static ssize_t vmbus_chan_attr_store(struct kobject *kobj, 1610 struct attribute *attr, const char *buf, 1611 size_t count) 1612 { 1613 const struct vmbus_chan_attribute *attribute 1614 = container_of(attr, struct vmbus_chan_attribute, attr); 1615 struct vmbus_channel *chan 1616 = container_of(kobj, struct vmbus_channel, kobj); 1617 1618 if (!attribute->store) 1619 return -EIO; 1620 1621 return attribute->store(chan, buf, count); 1622 } 1623 1624 static const struct sysfs_ops vmbus_chan_sysfs_ops = { 1625 .show = vmbus_chan_attr_show, 1626 .store = vmbus_chan_attr_store, 1627 }; 1628 1629 static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf) 1630 { 1631 struct hv_ring_buffer_info *rbi = &channel->outbound; 1632 ssize_t ret; 1633 1634 mutex_lock(&rbi->ring_buffer_mutex); 1635 if (!rbi->ring_buffer) { 1636 mutex_unlock(&rbi->ring_buffer_mutex); 1637 return -EINVAL; 1638 } 1639 1640 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask); 1641 mutex_unlock(&rbi->ring_buffer_mutex); 1642 return ret; 1643 } 1644 static VMBUS_CHAN_ATTR_RO(out_mask); 1645 1646 static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf) 1647 { 1648 struct hv_ring_buffer_info *rbi = &channel->inbound; 1649 ssize_t ret; 1650 1651 mutex_lock(&rbi->ring_buffer_mutex); 1652 if (!rbi->ring_buffer) { 1653 mutex_unlock(&rbi->ring_buffer_mutex); 1654 return -EINVAL; 1655 } 1656 1657 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask); 1658 mutex_unlock(&rbi->ring_buffer_mutex); 1659 return ret; 1660 } 1661 static VMBUS_CHAN_ATTR_RO(in_mask); 1662 1663 static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf) 1664 { 1665 struct hv_ring_buffer_info *rbi = &channel->inbound; 1666 ssize_t ret; 1667 1668 mutex_lock(&rbi->ring_buffer_mutex); 1669 if (!rbi->ring_buffer) { 1670 mutex_unlock(&rbi->ring_buffer_mutex); 1671 return -EINVAL; 1672 } 1673 1674 ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi)); 1675 mutex_unlock(&rbi->ring_buffer_mutex); 1676 return ret; 1677 } 1678 static VMBUS_CHAN_ATTR_RO(read_avail); 1679 1680 static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf) 1681 { 1682 struct hv_ring_buffer_info *rbi = &channel->outbound; 1683 ssize_t ret; 1684 1685 mutex_lock(&rbi->ring_buffer_mutex); 1686 if (!rbi->ring_buffer) { 1687 mutex_unlock(&rbi->ring_buffer_mutex); 1688 return -EINVAL; 1689 } 1690 1691 ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi)); 1692 mutex_unlock(&rbi->ring_buffer_mutex); 1693 return ret; 1694 } 1695 static VMBUS_CHAN_ATTR_RO(write_avail); 1696 1697 static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf) 1698 { 1699 return sprintf(buf, "%u\n", channel->target_cpu); 1700 } 1701 static ssize_t target_cpu_store(struct vmbus_channel *channel, 1702 const char *buf, size_t count) 1703 { 1704 u32 target_cpu, origin_cpu; 1705 ssize_t ret = count; 1706 1707 if (vmbus_proto_version < VERSION_WIN10_V4_1) 1708 return -EIO; 1709 1710 if (sscanf(buf, "%uu", &target_cpu) != 1) 1711 return -EIO; 1712 1713 /* Validate target_cpu for the cpumask_test_cpu() operation below. */ 1714 if (target_cpu >= nr_cpumask_bits) 1715 return -EINVAL; 1716 1717 /* No CPUs should come up or down during this. */ 1718 cpus_read_lock(); 1719 1720 if (!cpumask_test_cpu(target_cpu, cpu_online_mask)) { 1721 cpus_read_unlock(); 1722 return -EINVAL; 1723 } 1724 1725 /* 1726 * Synchronizes target_cpu_store() and channel closure: 1727 * 1728 * { Initially: state = CHANNEL_OPENED } 1729 * 1730 * CPU1 CPU2 1731 * 1732 * [target_cpu_store()] [vmbus_disconnect_ring()] 1733 * 1734 * LOCK channel_mutex LOCK channel_mutex 1735 * LOAD r1 = state LOAD r2 = state 1736 * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED) 1737 * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN 1738 * [...] SEND CLOSECHANNEL 1739 * UNLOCK channel_mutex UNLOCK channel_mutex 1740 * 1741 * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes 1742 * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND 1743 * 1744 * Note. The host processes the channel messages "sequentially", in 1745 * the order in which they are received on a per-partition basis. 1746 */ 1747 mutex_lock(&vmbus_connection.channel_mutex); 1748 1749 /* 1750 * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels; 1751 * avoid sending the message and fail here for such channels. 1752 */ 1753 if (channel->state != CHANNEL_OPENED_STATE) { 1754 ret = -EIO; 1755 goto cpu_store_unlock; 1756 } 1757 1758 origin_cpu = channel->target_cpu; 1759 if (target_cpu == origin_cpu) 1760 goto cpu_store_unlock; 1761 1762 if (vmbus_send_modifychannel(channel->offermsg.child_relid, 1763 hv_cpu_number_to_vp_number(target_cpu))) { 1764 ret = -EIO; 1765 goto cpu_store_unlock; 1766 } 1767 1768 /* 1769 * Warning. At this point, there is *no* guarantee that the host will 1770 * have successfully processed the vmbus_send_modifychannel() request. 1771 * See the header comment of vmbus_send_modifychannel() for more info. 1772 * 1773 * Lags in the processing of the above vmbus_send_modifychannel() can 1774 * result in missed interrupts if the "old" target CPU is taken offline 1775 * before Hyper-V starts sending interrupts to the "new" target CPU. 1776 * But apart from this offlining scenario, the code tolerates such 1777 * lags. It will function correctly even if a channel interrupt comes 1778 * in on a CPU that is different from the channel target_cpu value. 1779 */ 1780 1781 channel->target_cpu = target_cpu; 1782 channel->target_vp = hv_cpu_number_to_vp_number(target_cpu); 1783 channel->numa_node = cpu_to_node(target_cpu); 1784 1785 /* See init_vp_index(). */ 1786 if (hv_is_perf_channel(channel)) 1787 hv_update_alloced_cpus(origin_cpu, target_cpu); 1788 1789 /* Currently set only for storvsc channels. */ 1790 if (channel->change_target_cpu_callback) { 1791 (*channel->change_target_cpu_callback)(channel, 1792 origin_cpu, target_cpu); 1793 } 1794 1795 cpu_store_unlock: 1796 mutex_unlock(&vmbus_connection.channel_mutex); 1797 cpus_read_unlock(); 1798 return ret; 1799 } 1800 static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store); 1801 1802 static ssize_t channel_pending_show(struct vmbus_channel *channel, 1803 char *buf) 1804 { 1805 return sprintf(buf, "%d\n", 1806 channel_pending(channel, 1807 vmbus_connection.monitor_pages[1])); 1808 } 1809 static VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL); 1810 1811 static ssize_t channel_latency_show(struct vmbus_channel *channel, 1812 char *buf) 1813 { 1814 return sprintf(buf, "%d\n", 1815 channel_latency(channel, 1816 vmbus_connection.monitor_pages[1])); 1817 } 1818 static VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL); 1819 1820 static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf) 1821 { 1822 return sprintf(buf, "%llu\n", channel->interrupts); 1823 } 1824 static VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL); 1825 1826 static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf) 1827 { 1828 return sprintf(buf, "%llu\n", channel->sig_events); 1829 } 1830 static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL); 1831 1832 static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel, 1833 char *buf) 1834 { 1835 return sprintf(buf, "%llu\n", 1836 (unsigned long long)channel->intr_in_full); 1837 } 1838 static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL); 1839 1840 static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel, 1841 char *buf) 1842 { 1843 return sprintf(buf, "%llu\n", 1844 (unsigned long long)channel->intr_out_empty); 1845 } 1846 static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL); 1847 1848 static ssize_t channel_out_full_first_show(struct vmbus_channel *channel, 1849 char *buf) 1850 { 1851 return sprintf(buf, "%llu\n", 1852 (unsigned long long)channel->out_full_first); 1853 } 1854 static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL); 1855 1856 static ssize_t channel_out_full_total_show(struct vmbus_channel *channel, 1857 char *buf) 1858 { 1859 return sprintf(buf, "%llu\n", 1860 (unsigned long long)channel->out_full_total); 1861 } 1862 static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL); 1863 1864 static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel, 1865 char *buf) 1866 { 1867 return sprintf(buf, "%u\n", channel->offermsg.monitorid); 1868 } 1869 static VMBUS_CHAN_ATTR(monitor_id, S_IRUGO, subchannel_monitor_id_show, NULL); 1870 1871 static ssize_t subchannel_id_show(struct vmbus_channel *channel, 1872 char *buf) 1873 { 1874 return sprintf(buf, "%u\n", 1875 channel->offermsg.offer.sub_channel_index); 1876 } 1877 static VMBUS_CHAN_ATTR_RO(subchannel_id); 1878 1879 static struct attribute *vmbus_chan_attrs[] = { 1880 &chan_attr_out_mask.attr, 1881 &chan_attr_in_mask.attr, 1882 &chan_attr_read_avail.attr, 1883 &chan_attr_write_avail.attr, 1884 &chan_attr_cpu.attr, 1885 &chan_attr_pending.attr, 1886 &chan_attr_latency.attr, 1887 &chan_attr_interrupts.attr, 1888 &chan_attr_events.attr, 1889 &chan_attr_intr_in_full.attr, 1890 &chan_attr_intr_out_empty.attr, 1891 &chan_attr_out_full_first.attr, 1892 &chan_attr_out_full_total.attr, 1893 &chan_attr_monitor_id.attr, 1894 &chan_attr_subchannel_id.attr, 1895 NULL 1896 }; 1897 1898 /* 1899 * Channel-level attribute_group callback function. Returns the permission for 1900 * each attribute, and returns 0 if an attribute is not visible. 1901 */ 1902 static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj, 1903 struct attribute *attr, int idx) 1904 { 1905 const struct vmbus_channel *channel = 1906 container_of(kobj, struct vmbus_channel, kobj); 1907 1908 /* Hide the monitor attributes if the monitor mechanism is not used. */ 1909 if (!channel->offermsg.monitor_allocated && 1910 (attr == &chan_attr_pending.attr || 1911 attr == &chan_attr_latency.attr || 1912 attr == &chan_attr_monitor_id.attr)) 1913 return 0; 1914 1915 return attr->mode; 1916 } 1917 1918 static struct attribute_group vmbus_chan_group = { 1919 .attrs = vmbus_chan_attrs, 1920 .is_visible = vmbus_chan_attr_is_visible 1921 }; 1922 1923 static struct kobj_type vmbus_chan_ktype = { 1924 .sysfs_ops = &vmbus_chan_sysfs_ops, 1925 .release = vmbus_chan_release, 1926 }; 1927 1928 /* 1929 * vmbus_add_channel_kobj - setup a sub-directory under device/channels 1930 */ 1931 int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel) 1932 { 1933 const struct device *device = &dev->device; 1934 struct kobject *kobj = &channel->kobj; 1935 u32 relid = channel->offermsg.child_relid; 1936 int ret; 1937 1938 kobj->kset = dev->channels_kset; 1939 ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL, 1940 "%u", relid); 1941 if (ret) 1942 return ret; 1943 1944 ret = sysfs_create_group(kobj, &vmbus_chan_group); 1945 1946 if (ret) { 1947 /* 1948 * The calling functions' error handling paths will cleanup the 1949 * empty channel directory. 1950 */ 1951 dev_err(device, "Unable to set up channel sysfs files\n"); 1952 return ret; 1953 } 1954 1955 kobject_uevent(kobj, KOBJ_ADD); 1956 1957 return 0; 1958 } 1959 1960 /* 1961 * vmbus_remove_channel_attr_group - remove the channel's attribute group 1962 */ 1963 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel) 1964 { 1965 sysfs_remove_group(&channel->kobj, &vmbus_chan_group); 1966 } 1967 1968 /* 1969 * vmbus_device_create - Creates and registers a new child device 1970 * on the vmbus. 1971 */ 1972 struct hv_device *vmbus_device_create(const guid_t *type, 1973 const guid_t *instance, 1974 struct vmbus_channel *channel) 1975 { 1976 struct hv_device *child_device_obj; 1977 1978 child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL); 1979 if (!child_device_obj) { 1980 pr_err("Unable to allocate device object for child device\n"); 1981 return NULL; 1982 } 1983 1984 child_device_obj->channel = channel; 1985 guid_copy(&child_device_obj->dev_type, type); 1986 guid_copy(&child_device_obj->dev_instance, instance); 1987 child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */ 1988 1989 return child_device_obj; 1990 } 1991 1992 /* 1993 * vmbus_device_register - Register the child device 1994 */ 1995 int vmbus_device_register(struct hv_device *child_device_obj) 1996 { 1997 struct kobject *kobj = &child_device_obj->device.kobj; 1998 int ret; 1999 2000 dev_set_name(&child_device_obj->device, "%pUl", 2001 &child_device_obj->channel->offermsg.offer.if_instance); 2002 2003 child_device_obj->device.bus = &hv_bus; 2004 child_device_obj->device.parent = &hv_acpi_dev->dev; 2005 child_device_obj->device.release = vmbus_device_release; 2006 2007 /* 2008 * Register with the LDM. This will kick off the driver/device 2009 * binding...which will eventually call vmbus_match() and vmbus_probe() 2010 */ 2011 ret = device_register(&child_device_obj->device); 2012 if (ret) { 2013 pr_err("Unable to register child device\n"); 2014 return ret; 2015 } 2016 2017 child_device_obj->channels_kset = kset_create_and_add("channels", 2018 NULL, kobj); 2019 if (!child_device_obj->channels_kset) { 2020 ret = -ENOMEM; 2021 goto err_dev_unregister; 2022 } 2023 2024 ret = vmbus_add_channel_kobj(child_device_obj, 2025 child_device_obj->channel); 2026 if (ret) { 2027 pr_err("Unable to register primary channeln"); 2028 goto err_kset_unregister; 2029 } 2030 hv_debug_add_dev_dir(child_device_obj); 2031 2032 return 0; 2033 2034 err_kset_unregister: 2035 kset_unregister(child_device_obj->channels_kset); 2036 2037 err_dev_unregister: 2038 device_unregister(&child_device_obj->device); 2039 return ret; 2040 } 2041 2042 /* 2043 * vmbus_device_unregister - Remove the specified child device 2044 * from the vmbus. 2045 */ 2046 void vmbus_device_unregister(struct hv_device *device_obj) 2047 { 2048 pr_debug("child device %s unregistered\n", 2049 dev_name(&device_obj->device)); 2050 2051 kset_unregister(device_obj->channels_kset); 2052 2053 /* 2054 * Kick off the process of unregistering the device. 2055 * This will call vmbus_remove() and eventually vmbus_device_release() 2056 */ 2057 device_unregister(&device_obj->device); 2058 } 2059 2060 2061 /* 2062 * VMBUS is an acpi enumerated device. Get the information we 2063 * need from DSDT. 2064 */ 2065 #define VTPM_BASE_ADDRESS 0xfed40000 2066 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx) 2067 { 2068 resource_size_t start = 0; 2069 resource_size_t end = 0; 2070 struct resource *new_res; 2071 struct resource **old_res = &hyperv_mmio; 2072 struct resource **prev_res = NULL; 2073 2074 switch (res->type) { 2075 2076 /* 2077 * "Address" descriptors are for bus windows. Ignore 2078 * "memory" descriptors, which are for registers on 2079 * devices. 2080 */ 2081 case ACPI_RESOURCE_TYPE_ADDRESS32: 2082 start = res->data.address32.address.minimum; 2083 end = res->data.address32.address.maximum; 2084 break; 2085 2086 case ACPI_RESOURCE_TYPE_ADDRESS64: 2087 start = res->data.address64.address.minimum; 2088 end = res->data.address64.address.maximum; 2089 break; 2090 2091 default: 2092 /* Unused resource type */ 2093 return AE_OK; 2094 2095 } 2096 /* 2097 * Ignore ranges that are below 1MB, as they're not 2098 * necessary or useful here. 2099 */ 2100 if (end < 0x100000) 2101 return AE_OK; 2102 2103 new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC); 2104 if (!new_res) 2105 return AE_NO_MEMORY; 2106 2107 /* If this range overlaps the virtual TPM, truncate it. */ 2108 if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS) 2109 end = VTPM_BASE_ADDRESS; 2110 2111 new_res->name = "hyperv mmio"; 2112 new_res->flags = IORESOURCE_MEM; 2113 new_res->start = start; 2114 new_res->end = end; 2115 2116 /* 2117 * If two ranges are adjacent, merge them. 2118 */ 2119 do { 2120 if (!*old_res) { 2121 *old_res = new_res; 2122 break; 2123 } 2124 2125 if (((*old_res)->end + 1) == new_res->start) { 2126 (*old_res)->end = new_res->end; 2127 kfree(new_res); 2128 break; 2129 } 2130 2131 if ((*old_res)->start == new_res->end + 1) { 2132 (*old_res)->start = new_res->start; 2133 kfree(new_res); 2134 break; 2135 } 2136 2137 if ((*old_res)->start > new_res->end) { 2138 new_res->sibling = *old_res; 2139 if (prev_res) 2140 (*prev_res)->sibling = new_res; 2141 *old_res = new_res; 2142 break; 2143 } 2144 2145 prev_res = old_res; 2146 old_res = &(*old_res)->sibling; 2147 2148 } while (1); 2149 2150 return AE_OK; 2151 } 2152 2153 static int vmbus_acpi_remove(struct acpi_device *device) 2154 { 2155 struct resource *cur_res; 2156 struct resource *next_res; 2157 2158 if (hyperv_mmio) { 2159 if (fb_mmio) { 2160 __release_region(hyperv_mmio, fb_mmio->start, 2161 resource_size(fb_mmio)); 2162 fb_mmio = NULL; 2163 } 2164 2165 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) { 2166 next_res = cur_res->sibling; 2167 kfree(cur_res); 2168 } 2169 } 2170 2171 return 0; 2172 } 2173 2174 static void vmbus_reserve_fb(void) 2175 { 2176 int size; 2177 /* 2178 * Make a claim for the frame buffer in the resource tree under the 2179 * first node, which will be the one below 4GB. The length seems to 2180 * be underreported, particularly in a Generation 1 VM. So start out 2181 * reserving a larger area and make it smaller until it succeeds. 2182 */ 2183 2184 if (screen_info.lfb_base) { 2185 if (efi_enabled(EFI_BOOT)) 2186 size = max_t(__u32, screen_info.lfb_size, 0x800000); 2187 else 2188 size = max_t(__u32, screen_info.lfb_size, 0x4000000); 2189 2190 for (; !fb_mmio && (size >= 0x100000); size >>= 1) { 2191 fb_mmio = __request_region(hyperv_mmio, 2192 screen_info.lfb_base, size, 2193 fb_mmio_name, 0); 2194 } 2195 } 2196 } 2197 2198 /** 2199 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range. 2200 * @new: If successful, supplied a pointer to the 2201 * allocated MMIO space. 2202 * @device_obj: Identifies the caller 2203 * @min: Minimum guest physical address of the 2204 * allocation 2205 * @max: Maximum guest physical address 2206 * @size: Size of the range to be allocated 2207 * @align: Alignment of the range to be allocated 2208 * @fb_overlap_ok: Whether this allocation can be allowed 2209 * to overlap the video frame buffer. 2210 * 2211 * This function walks the resources granted to VMBus by the 2212 * _CRS object in the ACPI namespace underneath the parent 2213 * "bridge" whether that's a root PCI bus in the Generation 1 2214 * case or a Module Device in the Generation 2 case. It then 2215 * attempts to allocate from the global MMIO pool in a way that 2216 * matches the constraints supplied in these parameters and by 2217 * that _CRS. 2218 * 2219 * Return: 0 on success, -errno on failure 2220 */ 2221 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, 2222 resource_size_t min, resource_size_t max, 2223 resource_size_t size, resource_size_t align, 2224 bool fb_overlap_ok) 2225 { 2226 struct resource *iter, *shadow; 2227 resource_size_t range_min, range_max, start; 2228 const char *dev_n = dev_name(&device_obj->device); 2229 int retval; 2230 2231 retval = -ENXIO; 2232 mutex_lock(&hyperv_mmio_lock); 2233 2234 /* 2235 * If overlaps with frame buffers are allowed, then first attempt to 2236 * make the allocation from within the reserved region. Because it 2237 * is already reserved, no shadow allocation is necessary. 2238 */ 2239 if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) && 2240 !(max < fb_mmio->start)) { 2241 2242 range_min = fb_mmio->start; 2243 range_max = fb_mmio->end; 2244 start = (range_min + align - 1) & ~(align - 1); 2245 for (; start + size - 1 <= range_max; start += align) { 2246 *new = request_mem_region_exclusive(start, size, dev_n); 2247 if (*new) { 2248 retval = 0; 2249 goto exit; 2250 } 2251 } 2252 } 2253 2254 for (iter = hyperv_mmio; iter; iter = iter->sibling) { 2255 if ((iter->start >= max) || (iter->end <= min)) 2256 continue; 2257 2258 range_min = iter->start; 2259 range_max = iter->end; 2260 start = (range_min + align - 1) & ~(align - 1); 2261 for (; start + size - 1 <= range_max; start += align) { 2262 shadow = __request_region(iter, start, size, NULL, 2263 IORESOURCE_BUSY); 2264 if (!shadow) 2265 continue; 2266 2267 *new = request_mem_region_exclusive(start, size, dev_n); 2268 if (*new) { 2269 shadow->name = (char *)*new; 2270 retval = 0; 2271 goto exit; 2272 } 2273 2274 __release_region(iter, start, size); 2275 } 2276 } 2277 2278 exit: 2279 mutex_unlock(&hyperv_mmio_lock); 2280 return retval; 2281 } 2282 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio); 2283 2284 /** 2285 * vmbus_free_mmio() - Free a memory-mapped I/O range. 2286 * @start: Base address of region to release. 2287 * @size: Size of the range to be allocated 2288 * 2289 * This function releases anything requested by 2290 * vmbus_mmio_allocate(). 2291 */ 2292 void vmbus_free_mmio(resource_size_t start, resource_size_t size) 2293 { 2294 struct resource *iter; 2295 2296 mutex_lock(&hyperv_mmio_lock); 2297 for (iter = hyperv_mmio; iter; iter = iter->sibling) { 2298 if ((iter->start >= start + size) || (iter->end <= start)) 2299 continue; 2300 2301 __release_region(iter, start, size); 2302 } 2303 release_mem_region(start, size); 2304 mutex_unlock(&hyperv_mmio_lock); 2305 2306 } 2307 EXPORT_SYMBOL_GPL(vmbus_free_mmio); 2308 2309 static int vmbus_acpi_add(struct acpi_device *device) 2310 { 2311 acpi_status result; 2312 int ret_val = -ENODEV; 2313 struct acpi_device *ancestor; 2314 2315 hv_acpi_dev = device; 2316 2317 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS, 2318 vmbus_walk_resources, NULL); 2319 2320 if (ACPI_FAILURE(result)) 2321 goto acpi_walk_err; 2322 /* 2323 * Some ancestor of the vmbus acpi device (Gen1 or Gen2 2324 * firmware) is the VMOD that has the mmio ranges. Get that. 2325 */ 2326 for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) { 2327 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS, 2328 vmbus_walk_resources, NULL); 2329 2330 if (ACPI_FAILURE(result)) 2331 continue; 2332 if (hyperv_mmio) { 2333 vmbus_reserve_fb(); 2334 break; 2335 } 2336 } 2337 ret_val = 0; 2338 2339 acpi_walk_err: 2340 complete(&probe_event); 2341 if (ret_val) 2342 vmbus_acpi_remove(device); 2343 return ret_val; 2344 } 2345 2346 #ifdef CONFIG_PM_SLEEP 2347 static int vmbus_bus_suspend(struct device *dev) 2348 { 2349 struct vmbus_channel *channel, *sc; 2350 unsigned long flags; 2351 2352 while (atomic_read(&vmbus_connection.offer_in_progress) != 0) { 2353 /* 2354 * We wait here until the completion of any channel 2355 * offers that are currently in progress. 2356 */ 2357 msleep(1); 2358 } 2359 2360 mutex_lock(&vmbus_connection.channel_mutex); 2361 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { 2362 if (!is_hvsock_channel(channel)) 2363 continue; 2364 2365 vmbus_force_channel_rescinded(channel); 2366 } 2367 mutex_unlock(&vmbus_connection.channel_mutex); 2368 2369 /* 2370 * Wait until all the sub-channels and hv_sock channels have been 2371 * cleaned up. Sub-channels should be destroyed upon suspend, otherwise 2372 * they would conflict with the new sub-channels that will be created 2373 * in the resume path. hv_sock channels should also be destroyed, but 2374 * a hv_sock channel of an established hv_sock connection can not be 2375 * really destroyed since it may still be referenced by the userspace 2376 * application, so we just force the hv_sock channel to be rescinded 2377 * by vmbus_force_channel_rescinded(), and the userspace application 2378 * will thoroughly destroy the channel after hibernation. 2379 * 2380 * Note: the counter nr_chan_close_on_suspend may never go above 0 if 2381 * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM. 2382 */ 2383 if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0) 2384 wait_for_completion(&vmbus_connection.ready_for_suspend_event); 2385 2386 WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0); 2387 2388 mutex_lock(&vmbus_connection.channel_mutex); 2389 2390 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { 2391 /* 2392 * Remove the channel from the array of channels and invalidate 2393 * the channel's relid. Upon resume, vmbus_onoffer() will fix 2394 * up the relid (and other fields, if necessary) and add the 2395 * channel back to the array. 2396 */ 2397 vmbus_channel_unmap_relid(channel); 2398 channel->offermsg.child_relid = INVALID_RELID; 2399 2400 if (is_hvsock_channel(channel)) { 2401 if (!channel->rescind) { 2402 pr_err("hv_sock channel not rescinded!\n"); 2403 WARN_ON_ONCE(1); 2404 } 2405 continue; 2406 } 2407 2408 spin_lock_irqsave(&channel->lock, flags); 2409 list_for_each_entry(sc, &channel->sc_list, sc_list) { 2410 pr_err("Sub-channel not deleted!\n"); 2411 WARN_ON_ONCE(1); 2412 } 2413 spin_unlock_irqrestore(&channel->lock, flags); 2414 2415 atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume); 2416 } 2417 2418 mutex_unlock(&vmbus_connection.channel_mutex); 2419 2420 vmbus_initiate_unload(false); 2421 2422 /* Reset the event for the next resume. */ 2423 reinit_completion(&vmbus_connection.ready_for_resume_event); 2424 2425 return 0; 2426 } 2427 2428 static int vmbus_bus_resume(struct device *dev) 2429 { 2430 struct vmbus_channel_msginfo *msginfo; 2431 size_t msgsize; 2432 int ret; 2433 2434 /* 2435 * We only use the 'vmbus_proto_version', which was in use before 2436 * hibernation, to re-negotiate with the host. 2437 */ 2438 if (!vmbus_proto_version) { 2439 pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version); 2440 return -EINVAL; 2441 } 2442 2443 msgsize = sizeof(*msginfo) + 2444 sizeof(struct vmbus_channel_initiate_contact); 2445 2446 msginfo = kzalloc(msgsize, GFP_KERNEL); 2447 2448 if (msginfo == NULL) 2449 return -ENOMEM; 2450 2451 ret = vmbus_negotiate_version(msginfo, vmbus_proto_version); 2452 2453 kfree(msginfo); 2454 2455 if (ret != 0) 2456 return ret; 2457 2458 WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0); 2459 2460 vmbus_request_offers(); 2461 2462 wait_for_completion(&vmbus_connection.ready_for_resume_event); 2463 2464 /* Reset the event for the next suspend. */ 2465 reinit_completion(&vmbus_connection.ready_for_suspend_event); 2466 2467 return 0; 2468 } 2469 #else 2470 #define vmbus_bus_suspend NULL 2471 #define vmbus_bus_resume NULL 2472 #endif /* CONFIG_PM_SLEEP */ 2473 2474 static const struct acpi_device_id vmbus_acpi_device_ids[] = { 2475 {"VMBUS", 0}, 2476 {"VMBus", 0}, 2477 {"", 0}, 2478 }; 2479 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids); 2480 2481 /* 2482 * Note: we must use the "no_irq" ops, otherwise hibernation can not work with 2483 * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in 2484 * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see 2485 * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() -> 2486 * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's 2487 * resume callback must also run via the "noirq" ops. 2488 * 2489 * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment 2490 * earlier in this file before vmbus_pm. 2491 */ 2492 2493 static const struct dev_pm_ops vmbus_bus_pm = { 2494 .suspend_noirq = NULL, 2495 .resume_noirq = NULL, 2496 .freeze_noirq = vmbus_bus_suspend, 2497 .thaw_noirq = vmbus_bus_resume, 2498 .poweroff_noirq = vmbus_bus_suspend, 2499 .restore_noirq = vmbus_bus_resume 2500 }; 2501 2502 static struct acpi_driver vmbus_acpi_driver = { 2503 .name = "vmbus", 2504 .ids = vmbus_acpi_device_ids, 2505 .ops = { 2506 .add = vmbus_acpi_add, 2507 .remove = vmbus_acpi_remove, 2508 }, 2509 .drv.pm = &vmbus_bus_pm, 2510 }; 2511 2512 static void hv_kexec_handler(void) 2513 { 2514 hv_stimer_global_cleanup(); 2515 vmbus_initiate_unload(false); 2516 /* Make sure conn_state is set as hv_synic_cleanup checks for it */ 2517 mb(); 2518 cpuhp_remove_state(hyperv_cpuhp_online); 2519 hyperv_cleanup(); 2520 }; 2521 2522 static void hv_crash_handler(struct pt_regs *regs) 2523 { 2524 int cpu; 2525 2526 vmbus_initiate_unload(true); 2527 /* 2528 * In crash handler we can't schedule synic cleanup for all CPUs, 2529 * doing the cleanup for current CPU only. This should be sufficient 2530 * for kdump. 2531 */ 2532 cpu = smp_processor_id(); 2533 hv_stimer_cleanup(cpu); 2534 hv_synic_disable_regs(cpu); 2535 hyperv_cleanup(); 2536 }; 2537 2538 static int hv_synic_suspend(void) 2539 { 2540 /* 2541 * When we reach here, all the non-boot CPUs have been offlined. 2542 * If we're in a legacy configuration where stimer Direct Mode is 2543 * not enabled, the stimers on the non-boot CPUs have been unbound 2544 * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() -> 2545 * hv_stimer_cleanup() -> clockevents_unbind_device(). 2546 * 2547 * hv_synic_suspend() only runs on CPU0 with interrupts disabled. 2548 * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because: 2549 * 1) it's unnecessary as interrupts remain disabled between 2550 * syscore_suspend() and syscore_resume(): see create_image() and 2551 * resume_target_kernel() 2552 * 2) the stimer on CPU0 is automatically disabled later by 2553 * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ... 2554 * -> clockevents_shutdown() -> ... -> hv_ce_shutdown() 2555 * 3) a warning would be triggered if we call 2556 * clockevents_unbind_device(), which may sleep, in an 2557 * interrupts-disabled context. 2558 */ 2559 2560 hv_synic_disable_regs(0); 2561 2562 return 0; 2563 } 2564 2565 static void hv_synic_resume(void) 2566 { 2567 hv_synic_enable_regs(0); 2568 2569 /* 2570 * Note: we don't need to call hv_stimer_init(0), because the timer 2571 * on CPU0 is not unbound in hv_synic_suspend(), and the timer is 2572 * automatically re-enabled in timekeeping_resume(). 2573 */ 2574 } 2575 2576 /* The callbacks run only on CPU0, with irqs_disabled. */ 2577 static struct syscore_ops hv_synic_syscore_ops = { 2578 .suspend = hv_synic_suspend, 2579 .resume = hv_synic_resume, 2580 }; 2581 2582 static int __init hv_acpi_init(void) 2583 { 2584 int ret, t; 2585 2586 if (!hv_is_hyperv_initialized()) 2587 return -ENODEV; 2588 2589 init_completion(&probe_event); 2590 2591 /* 2592 * Get ACPI resources first. 2593 */ 2594 ret = acpi_bus_register_driver(&vmbus_acpi_driver); 2595 2596 if (ret) 2597 return ret; 2598 2599 t = wait_for_completion_timeout(&probe_event, 5*HZ); 2600 if (t == 0) { 2601 ret = -ETIMEDOUT; 2602 goto cleanup; 2603 } 2604 hv_debug_init(); 2605 2606 ret = vmbus_bus_init(); 2607 if (ret) 2608 goto cleanup; 2609 2610 hv_setup_kexec_handler(hv_kexec_handler); 2611 hv_setup_crash_handler(hv_crash_handler); 2612 2613 register_syscore_ops(&hv_synic_syscore_ops); 2614 2615 return 0; 2616 2617 cleanup: 2618 acpi_bus_unregister_driver(&vmbus_acpi_driver); 2619 hv_acpi_dev = NULL; 2620 return ret; 2621 } 2622 2623 static void __exit vmbus_exit(void) 2624 { 2625 int cpu; 2626 2627 unregister_syscore_ops(&hv_synic_syscore_ops); 2628 2629 hv_remove_kexec_handler(); 2630 hv_remove_crash_handler(); 2631 vmbus_connection.conn_state = DISCONNECTED; 2632 hv_stimer_global_cleanup(); 2633 vmbus_disconnect(); 2634 hv_remove_vmbus_irq(); 2635 for_each_online_cpu(cpu) { 2636 struct hv_per_cpu_context *hv_cpu 2637 = per_cpu_ptr(hv_context.cpu_context, cpu); 2638 2639 tasklet_kill(&hv_cpu->msg_dpc); 2640 } 2641 hv_debug_rm_all_dir(); 2642 2643 vmbus_free_channels(); 2644 kfree(vmbus_connection.channels); 2645 2646 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { 2647 kmsg_dump_unregister(&hv_kmsg_dumper); 2648 unregister_die_notifier(&hyperv_die_block); 2649 atomic_notifier_chain_unregister(&panic_notifier_list, 2650 &hyperv_panic_block); 2651 } 2652 2653 free_page((unsigned long)hv_panic_page); 2654 unregister_sysctl_table(hv_ctl_table_hdr); 2655 hv_ctl_table_hdr = NULL; 2656 bus_unregister(&hv_bus); 2657 2658 cpuhp_remove_state(hyperv_cpuhp_online); 2659 hv_synic_free(); 2660 acpi_bus_unregister_driver(&vmbus_acpi_driver); 2661 } 2662 2663 2664 MODULE_LICENSE("GPL"); 2665 MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver"); 2666 2667 subsys_initcall(hv_acpi_init); 2668 module_exit(vmbus_exit); 2669