xref: /openbmc/linux/drivers/hv/vmbus_drv.c (revision 55eb9a6c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  *   K. Y. Srinivasan <kys@microsoft.com>
9  */
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/sysctl.h>
17 #include <linux/slab.h>
18 #include <linux/acpi.h>
19 #include <linux/completion.h>
20 #include <linux/hyperv.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/clockchips.h>
23 #include <linux/cpu.h>
24 #include <linux/sched/task_stack.h>
25 
26 #include <linux/delay.h>
27 #include <linux/notifier.h>
28 #include <linux/panic_notifier.h>
29 #include <linux/ptrace.h>
30 #include <linux/screen_info.h>
31 #include <linux/kdebug.h>
32 #include <linux/efi.h>
33 #include <linux/random.h>
34 #include <linux/kernel.h>
35 #include <linux/syscore_ops.h>
36 #include <linux/dma-map-ops.h>
37 #include <clocksource/hyperv_timer.h>
38 #include "hyperv_vmbus.h"
39 
40 struct vmbus_dynid {
41 	struct list_head node;
42 	struct hv_vmbus_device_id id;
43 };
44 
45 static struct acpi_device  *hv_acpi_dev;
46 
47 static struct completion probe_event;
48 
49 static int hyperv_cpuhp_online;
50 
51 static void *hv_panic_page;
52 
53 static long __percpu *vmbus_evt;
54 
55 /* Values parsed from ACPI DSDT */
56 int vmbus_irq;
57 int vmbus_interrupt;
58 
59 /*
60  * Boolean to control whether to report panic messages over Hyper-V.
61  *
62  * It can be set via /proc/sys/kernel/hyperv_record_panic_msg
63  */
64 static int sysctl_record_panic_msg = 1;
65 
66 static int hyperv_report_reg(void)
67 {
68 	return !sysctl_record_panic_msg || !hv_panic_page;
69 }
70 
71 static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
72 			      void *args)
73 {
74 	struct pt_regs *regs;
75 
76 	vmbus_initiate_unload(true);
77 
78 	/*
79 	 * Hyper-V should be notified only once about a panic.  If we will be
80 	 * doing hv_kmsg_dump() with kmsg data later, don't do the notification
81 	 * here.
82 	 */
83 	if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
84 	    && hyperv_report_reg()) {
85 		regs = current_pt_regs();
86 		hyperv_report_panic(regs, val, false);
87 	}
88 	return NOTIFY_DONE;
89 }
90 
91 static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
92 			    void *args)
93 {
94 	struct die_args *die = args;
95 	struct pt_regs *regs = die->regs;
96 
97 	/* Don't notify Hyper-V if the die event is other than oops */
98 	if (val != DIE_OOPS)
99 		return NOTIFY_DONE;
100 
101 	/*
102 	 * Hyper-V should be notified only once about a panic.  If we will be
103 	 * doing hv_kmsg_dump() with kmsg data later, don't do the notification
104 	 * here.
105 	 */
106 	if (hyperv_report_reg())
107 		hyperv_report_panic(regs, val, true);
108 	return NOTIFY_DONE;
109 }
110 
111 static struct notifier_block hyperv_die_block = {
112 	.notifier_call = hyperv_die_event,
113 };
114 static struct notifier_block hyperv_panic_block = {
115 	.notifier_call = hyperv_panic_event,
116 };
117 
118 static const char *fb_mmio_name = "fb_range";
119 static struct resource *fb_mmio;
120 static struct resource *hyperv_mmio;
121 static DEFINE_MUTEX(hyperv_mmio_lock);
122 
123 static int vmbus_exists(void)
124 {
125 	if (hv_acpi_dev == NULL)
126 		return -ENODEV;
127 
128 	return 0;
129 }
130 
131 static u8 channel_monitor_group(const struct vmbus_channel *channel)
132 {
133 	return (u8)channel->offermsg.monitorid / 32;
134 }
135 
136 static u8 channel_monitor_offset(const struct vmbus_channel *channel)
137 {
138 	return (u8)channel->offermsg.monitorid % 32;
139 }
140 
141 static u32 channel_pending(const struct vmbus_channel *channel,
142 			   const struct hv_monitor_page *monitor_page)
143 {
144 	u8 monitor_group = channel_monitor_group(channel);
145 
146 	return monitor_page->trigger_group[monitor_group].pending;
147 }
148 
149 static u32 channel_latency(const struct vmbus_channel *channel,
150 			   const struct hv_monitor_page *monitor_page)
151 {
152 	u8 monitor_group = channel_monitor_group(channel);
153 	u8 monitor_offset = channel_monitor_offset(channel);
154 
155 	return monitor_page->latency[monitor_group][monitor_offset];
156 }
157 
158 static u32 channel_conn_id(struct vmbus_channel *channel,
159 			   struct hv_monitor_page *monitor_page)
160 {
161 	u8 monitor_group = channel_monitor_group(channel);
162 	u8 monitor_offset = channel_monitor_offset(channel);
163 
164 	return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
165 }
166 
167 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
168 		       char *buf)
169 {
170 	struct hv_device *hv_dev = device_to_hv_device(dev);
171 
172 	if (!hv_dev->channel)
173 		return -ENODEV;
174 	return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
175 }
176 static DEVICE_ATTR_RO(id);
177 
178 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
179 			  char *buf)
180 {
181 	struct hv_device *hv_dev = device_to_hv_device(dev);
182 
183 	if (!hv_dev->channel)
184 		return -ENODEV;
185 	return sprintf(buf, "%d\n", hv_dev->channel->state);
186 }
187 static DEVICE_ATTR_RO(state);
188 
189 static ssize_t monitor_id_show(struct device *dev,
190 			       struct device_attribute *dev_attr, char *buf)
191 {
192 	struct hv_device *hv_dev = device_to_hv_device(dev);
193 
194 	if (!hv_dev->channel)
195 		return -ENODEV;
196 	return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
197 }
198 static DEVICE_ATTR_RO(monitor_id);
199 
200 static ssize_t class_id_show(struct device *dev,
201 			       struct device_attribute *dev_attr, char *buf)
202 {
203 	struct hv_device *hv_dev = device_to_hv_device(dev);
204 
205 	if (!hv_dev->channel)
206 		return -ENODEV;
207 	return sprintf(buf, "{%pUl}\n",
208 		       &hv_dev->channel->offermsg.offer.if_type);
209 }
210 static DEVICE_ATTR_RO(class_id);
211 
212 static ssize_t device_id_show(struct device *dev,
213 			      struct device_attribute *dev_attr, char *buf)
214 {
215 	struct hv_device *hv_dev = device_to_hv_device(dev);
216 
217 	if (!hv_dev->channel)
218 		return -ENODEV;
219 	return sprintf(buf, "{%pUl}\n",
220 		       &hv_dev->channel->offermsg.offer.if_instance);
221 }
222 static DEVICE_ATTR_RO(device_id);
223 
224 static ssize_t modalias_show(struct device *dev,
225 			     struct device_attribute *dev_attr, char *buf)
226 {
227 	struct hv_device *hv_dev = device_to_hv_device(dev);
228 
229 	return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
230 }
231 static DEVICE_ATTR_RO(modalias);
232 
233 #ifdef CONFIG_NUMA
234 static ssize_t numa_node_show(struct device *dev,
235 			      struct device_attribute *attr, char *buf)
236 {
237 	struct hv_device *hv_dev = device_to_hv_device(dev);
238 
239 	if (!hv_dev->channel)
240 		return -ENODEV;
241 
242 	return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
243 }
244 static DEVICE_ATTR_RO(numa_node);
245 #endif
246 
247 static ssize_t server_monitor_pending_show(struct device *dev,
248 					   struct device_attribute *dev_attr,
249 					   char *buf)
250 {
251 	struct hv_device *hv_dev = device_to_hv_device(dev);
252 
253 	if (!hv_dev->channel)
254 		return -ENODEV;
255 	return sprintf(buf, "%d\n",
256 		       channel_pending(hv_dev->channel,
257 				       vmbus_connection.monitor_pages[0]));
258 }
259 static DEVICE_ATTR_RO(server_monitor_pending);
260 
261 static ssize_t client_monitor_pending_show(struct device *dev,
262 					   struct device_attribute *dev_attr,
263 					   char *buf)
264 {
265 	struct hv_device *hv_dev = device_to_hv_device(dev);
266 
267 	if (!hv_dev->channel)
268 		return -ENODEV;
269 	return sprintf(buf, "%d\n",
270 		       channel_pending(hv_dev->channel,
271 				       vmbus_connection.monitor_pages[1]));
272 }
273 static DEVICE_ATTR_RO(client_monitor_pending);
274 
275 static ssize_t server_monitor_latency_show(struct device *dev,
276 					   struct device_attribute *dev_attr,
277 					   char *buf)
278 {
279 	struct hv_device *hv_dev = device_to_hv_device(dev);
280 
281 	if (!hv_dev->channel)
282 		return -ENODEV;
283 	return sprintf(buf, "%d\n",
284 		       channel_latency(hv_dev->channel,
285 				       vmbus_connection.monitor_pages[0]));
286 }
287 static DEVICE_ATTR_RO(server_monitor_latency);
288 
289 static ssize_t client_monitor_latency_show(struct device *dev,
290 					   struct device_attribute *dev_attr,
291 					   char *buf)
292 {
293 	struct hv_device *hv_dev = device_to_hv_device(dev);
294 
295 	if (!hv_dev->channel)
296 		return -ENODEV;
297 	return sprintf(buf, "%d\n",
298 		       channel_latency(hv_dev->channel,
299 				       vmbus_connection.monitor_pages[1]));
300 }
301 static DEVICE_ATTR_RO(client_monitor_latency);
302 
303 static ssize_t server_monitor_conn_id_show(struct device *dev,
304 					   struct device_attribute *dev_attr,
305 					   char *buf)
306 {
307 	struct hv_device *hv_dev = device_to_hv_device(dev);
308 
309 	if (!hv_dev->channel)
310 		return -ENODEV;
311 	return sprintf(buf, "%d\n",
312 		       channel_conn_id(hv_dev->channel,
313 				       vmbus_connection.monitor_pages[0]));
314 }
315 static DEVICE_ATTR_RO(server_monitor_conn_id);
316 
317 static ssize_t client_monitor_conn_id_show(struct device *dev,
318 					   struct device_attribute *dev_attr,
319 					   char *buf)
320 {
321 	struct hv_device *hv_dev = device_to_hv_device(dev);
322 
323 	if (!hv_dev->channel)
324 		return -ENODEV;
325 	return sprintf(buf, "%d\n",
326 		       channel_conn_id(hv_dev->channel,
327 				       vmbus_connection.monitor_pages[1]));
328 }
329 static DEVICE_ATTR_RO(client_monitor_conn_id);
330 
331 static ssize_t out_intr_mask_show(struct device *dev,
332 				  struct device_attribute *dev_attr, char *buf)
333 {
334 	struct hv_device *hv_dev = device_to_hv_device(dev);
335 	struct hv_ring_buffer_debug_info outbound;
336 	int ret;
337 
338 	if (!hv_dev->channel)
339 		return -ENODEV;
340 
341 	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
342 					  &outbound);
343 	if (ret < 0)
344 		return ret;
345 
346 	return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
347 }
348 static DEVICE_ATTR_RO(out_intr_mask);
349 
350 static ssize_t out_read_index_show(struct device *dev,
351 				   struct device_attribute *dev_attr, char *buf)
352 {
353 	struct hv_device *hv_dev = device_to_hv_device(dev);
354 	struct hv_ring_buffer_debug_info outbound;
355 	int ret;
356 
357 	if (!hv_dev->channel)
358 		return -ENODEV;
359 
360 	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
361 					  &outbound);
362 	if (ret < 0)
363 		return ret;
364 	return sprintf(buf, "%d\n", outbound.current_read_index);
365 }
366 static DEVICE_ATTR_RO(out_read_index);
367 
368 static ssize_t out_write_index_show(struct device *dev,
369 				    struct device_attribute *dev_attr,
370 				    char *buf)
371 {
372 	struct hv_device *hv_dev = device_to_hv_device(dev);
373 	struct hv_ring_buffer_debug_info outbound;
374 	int ret;
375 
376 	if (!hv_dev->channel)
377 		return -ENODEV;
378 
379 	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
380 					  &outbound);
381 	if (ret < 0)
382 		return ret;
383 	return sprintf(buf, "%d\n", outbound.current_write_index);
384 }
385 static DEVICE_ATTR_RO(out_write_index);
386 
387 static ssize_t out_read_bytes_avail_show(struct device *dev,
388 					 struct device_attribute *dev_attr,
389 					 char *buf)
390 {
391 	struct hv_device *hv_dev = device_to_hv_device(dev);
392 	struct hv_ring_buffer_debug_info outbound;
393 	int ret;
394 
395 	if (!hv_dev->channel)
396 		return -ENODEV;
397 
398 	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
399 					  &outbound);
400 	if (ret < 0)
401 		return ret;
402 	return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
403 }
404 static DEVICE_ATTR_RO(out_read_bytes_avail);
405 
406 static ssize_t out_write_bytes_avail_show(struct device *dev,
407 					  struct device_attribute *dev_attr,
408 					  char *buf)
409 {
410 	struct hv_device *hv_dev = device_to_hv_device(dev);
411 	struct hv_ring_buffer_debug_info outbound;
412 	int ret;
413 
414 	if (!hv_dev->channel)
415 		return -ENODEV;
416 
417 	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
418 					  &outbound);
419 	if (ret < 0)
420 		return ret;
421 	return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
422 }
423 static DEVICE_ATTR_RO(out_write_bytes_avail);
424 
425 static ssize_t in_intr_mask_show(struct device *dev,
426 				 struct device_attribute *dev_attr, char *buf)
427 {
428 	struct hv_device *hv_dev = device_to_hv_device(dev);
429 	struct hv_ring_buffer_debug_info inbound;
430 	int ret;
431 
432 	if (!hv_dev->channel)
433 		return -ENODEV;
434 
435 	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
436 	if (ret < 0)
437 		return ret;
438 
439 	return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
440 }
441 static DEVICE_ATTR_RO(in_intr_mask);
442 
443 static ssize_t in_read_index_show(struct device *dev,
444 				  struct device_attribute *dev_attr, char *buf)
445 {
446 	struct hv_device *hv_dev = device_to_hv_device(dev);
447 	struct hv_ring_buffer_debug_info inbound;
448 	int ret;
449 
450 	if (!hv_dev->channel)
451 		return -ENODEV;
452 
453 	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
454 	if (ret < 0)
455 		return ret;
456 
457 	return sprintf(buf, "%d\n", inbound.current_read_index);
458 }
459 static DEVICE_ATTR_RO(in_read_index);
460 
461 static ssize_t in_write_index_show(struct device *dev,
462 				   struct device_attribute *dev_attr, char *buf)
463 {
464 	struct hv_device *hv_dev = device_to_hv_device(dev);
465 	struct hv_ring_buffer_debug_info inbound;
466 	int ret;
467 
468 	if (!hv_dev->channel)
469 		return -ENODEV;
470 
471 	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
472 	if (ret < 0)
473 		return ret;
474 
475 	return sprintf(buf, "%d\n", inbound.current_write_index);
476 }
477 static DEVICE_ATTR_RO(in_write_index);
478 
479 static ssize_t in_read_bytes_avail_show(struct device *dev,
480 					struct device_attribute *dev_attr,
481 					char *buf)
482 {
483 	struct hv_device *hv_dev = device_to_hv_device(dev);
484 	struct hv_ring_buffer_debug_info inbound;
485 	int ret;
486 
487 	if (!hv_dev->channel)
488 		return -ENODEV;
489 
490 	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
491 	if (ret < 0)
492 		return ret;
493 
494 	return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
495 }
496 static DEVICE_ATTR_RO(in_read_bytes_avail);
497 
498 static ssize_t in_write_bytes_avail_show(struct device *dev,
499 					 struct device_attribute *dev_attr,
500 					 char *buf)
501 {
502 	struct hv_device *hv_dev = device_to_hv_device(dev);
503 	struct hv_ring_buffer_debug_info inbound;
504 	int ret;
505 
506 	if (!hv_dev->channel)
507 		return -ENODEV;
508 
509 	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
510 	if (ret < 0)
511 		return ret;
512 
513 	return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
514 }
515 static DEVICE_ATTR_RO(in_write_bytes_avail);
516 
517 static ssize_t channel_vp_mapping_show(struct device *dev,
518 				       struct device_attribute *dev_attr,
519 				       char *buf)
520 {
521 	struct hv_device *hv_dev = device_to_hv_device(dev);
522 	struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
523 	int buf_size = PAGE_SIZE, n_written, tot_written;
524 	struct list_head *cur;
525 
526 	if (!channel)
527 		return -ENODEV;
528 
529 	mutex_lock(&vmbus_connection.channel_mutex);
530 
531 	tot_written = snprintf(buf, buf_size, "%u:%u\n",
532 		channel->offermsg.child_relid, channel->target_cpu);
533 
534 	list_for_each(cur, &channel->sc_list) {
535 		if (tot_written >= buf_size - 1)
536 			break;
537 
538 		cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
539 		n_written = scnprintf(buf + tot_written,
540 				     buf_size - tot_written,
541 				     "%u:%u\n",
542 				     cur_sc->offermsg.child_relid,
543 				     cur_sc->target_cpu);
544 		tot_written += n_written;
545 	}
546 
547 	mutex_unlock(&vmbus_connection.channel_mutex);
548 
549 	return tot_written;
550 }
551 static DEVICE_ATTR_RO(channel_vp_mapping);
552 
553 static ssize_t vendor_show(struct device *dev,
554 			   struct device_attribute *dev_attr,
555 			   char *buf)
556 {
557 	struct hv_device *hv_dev = device_to_hv_device(dev);
558 
559 	return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
560 }
561 static DEVICE_ATTR_RO(vendor);
562 
563 static ssize_t device_show(struct device *dev,
564 			   struct device_attribute *dev_attr,
565 			   char *buf)
566 {
567 	struct hv_device *hv_dev = device_to_hv_device(dev);
568 
569 	return sprintf(buf, "0x%x\n", hv_dev->device_id);
570 }
571 static DEVICE_ATTR_RO(device);
572 
573 static ssize_t driver_override_store(struct device *dev,
574 				     struct device_attribute *attr,
575 				     const char *buf, size_t count)
576 {
577 	struct hv_device *hv_dev = device_to_hv_device(dev);
578 	char *driver_override, *old, *cp;
579 
580 	/* We need to keep extra room for a newline */
581 	if (count >= (PAGE_SIZE - 1))
582 		return -EINVAL;
583 
584 	driver_override = kstrndup(buf, count, GFP_KERNEL);
585 	if (!driver_override)
586 		return -ENOMEM;
587 
588 	cp = strchr(driver_override, '\n');
589 	if (cp)
590 		*cp = '\0';
591 
592 	device_lock(dev);
593 	old = hv_dev->driver_override;
594 	if (strlen(driver_override)) {
595 		hv_dev->driver_override = driver_override;
596 	} else {
597 		kfree(driver_override);
598 		hv_dev->driver_override = NULL;
599 	}
600 	device_unlock(dev);
601 
602 	kfree(old);
603 
604 	return count;
605 }
606 
607 static ssize_t driver_override_show(struct device *dev,
608 				    struct device_attribute *attr, char *buf)
609 {
610 	struct hv_device *hv_dev = device_to_hv_device(dev);
611 	ssize_t len;
612 
613 	device_lock(dev);
614 	len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
615 	device_unlock(dev);
616 
617 	return len;
618 }
619 static DEVICE_ATTR_RW(driver_override);
620 
621 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
622 static struct attribute *vmbus_dev_attrs[] = {
623 	&dev_attr_id.attr,
624 	&dev_attr_state.attr,
625 	&dev_attr_monitor_id.attr,
626 	&dev_attr_class_id.attr,
627 	&dev_attr_device_id.attr,
628 	&dev_attr_modalias.attr,
629 #ifdef CONFIG_NUMA
630 	&dev_attr_numa_node.attr,
631 #endif
632 	&dev_attr_server_monitor_pending.attr,
633 	&dev_attr_client_monitor_pending.attr,
634 	&dev_attr_server_monitor_latency.attr,
635 	&dev_attr_client_monitor_latency.attr,
636 	&dev_attr_server_monitor_conn_id.attr,
637 	&dev_attr_client_monitor_conn_id.attr,
638 	&dev_attr_out_intr_mask.attr,
639 	&dev_attr_out_read_index.attr,
640 	&dev_attr_out_write_index.attr,
641 	&dev_attr_out_read_bytes_avail.attr,
642 	&dev_attr_out_write_bytes_avail.attr,
643 	&dev_attr_in_intr_mask.attr,
644 	&dev_attr_in_read_index.attr,
645 	&dev_attr_in_write_index.attr,
646 	&dev_attr_in_read_bytes_avail.attr,
647 	&dev_attr_in_write_bytes_avail.attr,
648 	&dev_attr_channel_vp_mapping.attr,
649 	&dev_attr_vendor.attr,
650 	&dev_attr_device.attr,
651 	&dev_attr_driver_override.attr,
652 	NULL,
653 };
654 
655 /*
656  * Device-level attribute_group callback function. Returns the permission for
657  * each attribute, and returns 0 if an attribute is not visible.
658  */
659 static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
660 					 struct attribute *attr, int idx)
661 {
662 	struct device *dev = kobj_to_dev(kobj);
663 	const struct hv_device *hv_dev = device_to_hv_device(dev);
664 
665 	/* Hide the monitor attributes if the monitor mechanism is not used. */
666 	if (!hv_dev->channel->offermsg.monitor_allocated &&
667 	    (attr == &dev_attr_monitor_id.attr ||
668 	     attr == &dev_attr_server_monitor_pending.attr ||
669 	     attr == &dev_attr_client_monitor_pending.attr ||
670 	     attr == &dev_attr_server_monitor_latency.attr ||
671 	     attr == &dev_attr_client_monitor_latency.attr ||
672 	     attr == &dev_attr_server_monitor_conn_id.attr ||
673 	     attr == &dev_attr_client_monitor_conn_id.attr))
674 		return 0;
675 
676 	return attr->mode;
677 }
678 
679 static const struct attribute_group vmbus_dev_group = {
680 	.attrs = vmbus_dev_attrs,
681 	.is_visible = vmbus_dev_attr_is_visible
682 };
683 __ATTRIBUTE_GROUPS(vmbus_dev);
684 
685 /* Set up the attribute for /sys/bus/vmbus/hibernation */
686 static ssize_t hibernation_show(struct bus_type *bus, char *buf)
687 {
688 	return sprintf(buf, "%d\n", !!hv_is_hibernation_supported());
689 }
690 
691 static BUS_ATTR_RO(hibernation);
692 
693 static struct attribute *vmbus_bus_attrs[] = {
694 	&bus_attr_hibernation.attr,
695 	NULL,
696 };
697 static const struct attribute_group vmbus_bus_group = {
698 	.attrs = vmbus_bus_attrs,
699 };
700 __ATTRIBUTE_GROUPS(vmbus_bus);
701 
702 /*
703  * vmbus_uevent - add uevent for our device
704  *
705  * This routine is invoked when a device is added or removed on the vmbus to
706  * generate a uevent to udev in the userspace. The udev will then look at its
707  * rule and the uevent generated here to load the appropriate driver
708  *
709  * The alias string will be of the form vmbus:guid where guid is the string
710  * representation of the device guid (each byte of the guid will be
711  * represented with two hex characters.
712  */
713 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
714 {
715 	struct hv_device *dev = device_to_hv_device(device);
716 	const char *format = "MODALIAS=vmbus:%*phN";
717 
718 	return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
719 }
720 
721 static const struct hv_vmbus_device_id *
722 hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
723 {
724 	if (id == NULL)
725 		return NULL; /* empty device table */
726 
727 	for (; !guid_is_null(&id->guid); id++)
728 		if (guid_equal(&id->guid, guid))
729 			return id;
730 
731 	return NULL;
732 }
733 
734 static const struct hv_vmbus_device_id *
735 hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
736 {
737 	const struct hv_vmbus_device_id *id = NULL;
738 	struct vmbus_dynid *dynid;
739 
740 	spin_lock(&drv->dynids.lock);
741 	list_for_each_entry(dynid, &drv->dynids.list, node) {
742 		if (guid_equal(&dynid->id.guid, guid)) {
743 			id = &dynid->id;
744 			break;
745 		}
746 	}
747 	spin_unlock(&drv->dynids.lock);
748 
749 	return id;
750 }
751 
752 static const struct hv_vmbus_device_id vmbus_device_null;
753 
754 /*
755  * Return a matching hv_vmbus_device_id pointer.
756  * If there is no match, return NULL.
757  */
758 static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
759 							struct hv_device *dev)
760 {
761 	const guid_t *guid = &dev->dev_type;
762 	const struct hv_vmbus_device_id *id;
763 
764 	/* When driver_override is set, only bind to the matching driver */
765 	if (dev->driver_override && strcmp(dev->driver_override, drv->name))
766 		return NULL;
767 
768 	/* Look at the dynamic ids first, before the static ones */
769 	id = hv_vmbus_dynid_match(drv, guid);
770 	if (!id)
771 		id = hv_vmbus_dev_match(drv->id_table, guid);
772 
773 	/* driver_override will always match, send a dummy id */
774 	if (!id && dev->driver_override)
775 		id = &vmbus_device_null;
776 
777 	return id;
778 }
779 
780 /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
781 static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
782 {
783 	struct vmbus_dynid *dynid;
784 
785 	dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
786 	if (!dynid)
787 		return -ENOMEM;
788 
789 	dynid->id.guid = *guid;
790 
791 	spin_lock(&drv->dynids.lock);
792 	list_add_tail(&dynid->node, &drv->dynids.list);
793 	spin_unlock(&drv->dynids.lock);
794 
795 	return driver_attach(&drv->driver);
796 }
797 
798 static void vmbus_free_dynids(struct hv_driver *drv)
799 {
800 	struct vmbus_dynid *dynid, *n;
801 
802 	spin_lock(&drv->dynids.lock);
803 	list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
804 		list_del(&dynid->node);
805 		kfree(dynid);
806 	}
807 	spin_unlock(&drv->dynids.lock);
808 }
809 
810 /*
811  * store_new_id - sysfs frontend to vmbus_add_dynid()
812  *
813  * Allow GUIDs to be added to an existing driver via sysfs.
814  */
815 static ssize_t new_id_store(struct device_driver *driver, const char *buf,
816 			    size_t count)
817 {
818 	struct hv_driver *drv = drv_to_hv_drv(driver);
819 	guid_t guid;
820 	ssize_t retval;
821 
822 	retval = guid_parse(buf, &guid);
823 	if (retval)
824 		return retval;
825 
826 	if (hv_vmbus_dynid_match(drv, &guid))
827 		return -EEXIST;
828 
829 	retval = vmbus_add_dynid(drv, &guid);
830 	if (retval)
831 		return retval;
832 	return count;
833 }
834 static DRIVER_ATTR_WO(new_id);
835 
836 /*
837  * store_remove_id - remove a PCI device ID from this driver
838  *
839  * Removes a dynamic pci device ID to this driver.
840  */
841 static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
842 			       size_t count)
843 {
844 	struct hv_driver *drv = drv_to_hv_drv(driver);
845 	struct vmbus_dynid *dynid, *n;
846 	guid_t guid;
847 	ssize_t retval;
848 
849 	retval = guid_parse(buf, &guid);
850 	if (retval)
851 		return retval;
852 
853 	retval = -ENODEV;
854 	spin_lock(&drv->dynids.lock);
855 	list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
856 		struct hv_vmbus_device_id *id = &dynid->id;
857 
858 		if (guid_equal(&id->guid, &guid)) {
859 			list_del(&dynid->node);
860 			kfree(dynid);
861 			retval = count;
862 			break;
863 		}
864 	}
865 	spin_unlock(&drv->dynids.lock);
866 
867 	return retval;
868 }
869 static DRIVER_ATTR_WO(remove_id);
870 
871 static struct attribute *vmbus_drv_attrs[] = {
872 	&driver_attr_new_id.attr,
873 	&driver_attr_remove_id.attr,
874 	NULL,
875 };
876 ATTRIBUTE_GROUPS(vmbus_drv);
877 
878 
879 /*
880  * vmbus_match - Attempt to match the specified device to the specified driver
881  */
882 static int vmbus_match(struct device *device, struct device_driver *driver)
883 {
884 	struct hv_driver *drv = drv_to_hv_drv(driver);
885 	struct hv_device *hv_dev = device_to_hv_device(device);
886 
887 	/* The hv_sock driver handles all hv_sock offers. */
888 	if (is_hvsock_channel(hv_dev->channel))
889 		return drv->hvsock;
890 
891 	if (hv_vmbus_get_id(drv, hv_dev))
892 		return 1;
893 
894 	return 0;
895 }
896 
897 /*
898  * vmbus_probe - Add the new vmbus's child device
899  */
900 static int vmbus_probe(struct device *child_device)
901 {
902 	int ret = 0;
903 	struct hv_driver *drv =
904 			drv_to_hv_drv(child_device->driver);
905 	struct hv_device *dev = device_to_hv_device(child_device);
906 	const struct hv_vmbus_device_id *dev_id;
907 
908 	dev_id = hv_vmbus_get_id(drv, dev);
909 	if (drv->probe) {
910 		ret = drv->probe(dev, dev_id);
911 		if (ret != 0)
912 			pr_err("probe failed for device %s (%d)\n",
913 			       dev_name(child_device), ret);
914 
915 	} else {
916 		pr_err("probe not set for driver %s\n",
917 		       dev_name(child_device));
918 		ret = -ENODEV;
919 	}
920 	return ret;
921 }
922 
923 /*
924  * vmbus_dma_configure -- Configure DMA coherence for VMbus device
925  */
926 static int vmbus_dma_configure(struct device *child_device)
927 {
928 	/*
929 	 * On ARM64, propagate the DMA coherence setting from the top level
930 	 * VMbus ACPI device to the child VMbus device being added here.
931 	 * On x86/x64 coherence is assumed and these calls have no effect.
932 	 */
933 	hv_setup_dma_ops(child_device,
934 		device_get_dma_attr(&hv_acpi_dev->dev) == DEV_DMA_COHERENT);
935 	return 0;
936 }
937 
938 /*
939  * vmbus_remove - Remove a vmbus device
940  */
941 static void vmbus_remove(struct device *child_device)
942 {
943 	struct hv_driver *drv;
944 	struct hv_device *dev = device_to_hv_device(child_device);
945 
946 	if (child_device->driver) {
947 		drv = drv_to_hv_drv(child_device->driver);
948 		if (drv->remove)
949 			drv->remove(dev);
950 	}
951 }
952 
953 /*
954  * vmbus_shutdown - Shutdown a vmbus device
955  */
956 static void vmbus_shutdown(struct device *child_device)
957 {
958 	struct hv_driver *drv;
959 	struct hv_device *dev = device_to_hv_device(child_device);
960 
961 
962 	/* The device may not be attached yet */
963 	if (!child_device->driver)
964 		return;
965 
966 	drv = drv_to_hv_drv(child_device->driver);
967 
968 	if (drv->shutdown)
969 		drv->shutdown(dev);
970 }
971 
972 #ifdef CONFIG_PM_SLEEP
973 /*
974  * vmbus_suspend - Suspend a vmbus device
975  */
976 static int vmbus_suspend(struct device *child_device)
977 {
978 	struct hv_driver *drv;
979 	struct hv_device *dev = device_to_hv_device(child_device);
980 
981 	/* The device may not be attached yet */
982 	if (!child_device->driver)
983 		return 0;
984 
985 	drv = drv_to_hv_drv(child_device->driver);
986 	if (!drv->suspend)
987 		return -EOPNOTSUPP;
988 
989 	return drv->suspend(dev);
990 }
991 
992 /*
993  * vmbus_resume - Resume a vmbus device
994  */
995 static int vmbus_resume(struct device *child_device)
996 {
997 	struct hv_driver *drv;
998 	struct hv_device *dev = device_to_hv_device(child_device);
999 
1000 	/* The device may not be attached yet */
1001 	if (!child_device->driver)
1002 		return 0;
1003 
1004 	drv = drv_to_hv_drv(child_device->driver);
1005 	if (!drv->resume)
1006 		return -EOPNOTSUPP;
1007 
1008 	return drv->resume(dev);
1009 }
1010 #else
1011 #define vmbus_suspend NULL
1012 #define vmbus_resume NULL
1013 #endif /* CONFIG_PM_SLEEP */
1014 
1015 /*
1016  * vmbus_device_release - Final callback release of the vmbus child device
1017  */
1018 static void vmbus_device_release(struct device *device)
1019 {
1020 	struct hv_device *hv_dev = device_to_hv_device(device);
1021 	struct vmbus_channel *channel = hv_dev->channel;
1022 
1023 	hv_debug_rm_dev_dir(hv_dev);
1024 
1025 	mutex_lock(&vmbus_connection.channel_mutex);
1026 	hv_process_channel_removal(channel);
1027 	mutex_unlock(&vmbus_connection.channel_mutex);
1028 	kfree(hv_dev);
1029 }
1030 
1031 /*
1032  * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
1033  *
1034  * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
1035  * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
1036  * is no way to wake up a Generation-2 VM.
1037  *
1038  * The other 4 ops are for hibernation.
1039  */
1040 
1041 static const struct dev_pm_ops vmbus_pm = {
1042 	.suspend_noirq	= NULL,
1043 	.resume_noirq	= NULL,
1044 	.freeze_noirq	= vmbus_suspend,
1045 	.thaw_noirq	= vmbus_resume,
1046 	.poweroff_noirq	= vmbus_suspend,
1047 	.restore_noirq	= vmbus_resume,
1048 };
1049 
1050 /* The one and only one */
1051 static struct bus_type  hv_bus = {
1052 	.name =		"vmbus",
1053 	.match =		vmbus_match,
1054 	.shutdown =		vmbus_shutdown,
1055 	.remove =		vmbus_remove,
1056 	.probe =		vmbus_probe,
1057 	.uevent =		vmbus_uevent,
1058 	.dma_configure =	vmbus_dma_configure,
1059 	.dev_groups =		vmbus_dev_groups,
1060 	.drv_groups =		vmbus_drv_groups,
1061 	.bus_groups =		vmbus_bus_groups,
1062 	.pm =			&vmbus_pm,
1063 };
1064 
1065 struct onmessage_work_context {
1066 	struct work_struct work;
1067 	struct {
1068 		struct hv_message_header header;
1069 		u8 payload[];
1070 	} msg;
1071 };
1072 
1073 static void vmbus_onmessage_work(struct work_struct *work)
1074 {
1075 	struct onmessage_work_context *ctx;
1076 
1077 	/* Do not process messages if we're in DISCONNECTED state */
1078 	if (vmbus_connection.conn_state == DISCONNECTED)
1079 		return;
1080 
1081 	ctx = container_of(work, struct onmessage_work_context,
1082 			   work);
1083 	vmbus_onmessage((struct vmbus_channel_message_header *)
1084 			&ctx->msg.payload);
1085 	kfree(ctx);
1086 }
1087 
1088 void vmbus_on_msg_dpc(unsigned long data)
1089 {
1090 	struct hv_per_cpu_context *hv_cpu = (void *)data;
1091 	void *page_addr = hv_cpu->synic_message_page;
1092 	struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
1093 				  VMBUS_MESSAGE_SINT;
1094 	struct vmbus_channel_message_header *hdr;
1095 	enum vmbus_channel_message_type msgtype;
1096 	const struct vmbus_channel_message_table_entry *entry;
1097 	struct onmessage_work_context *ctx;
1098 	__u8 payload_size;
1099 	u32 message_type;
1100 
1101 	/*
1102 	 * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
1103 	 * it is being used in 'struct vmbus_channel_message_header' definition
1104 	 * which is supposed to match hypervisor ABI.
1105 	 */
1106 	BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
1107 
1108 	/*
1109 	 * Since the message is in memory shared with the host, an erroneous or
1110 	 * malicious Hyper-V could modify the message while vmbus_on_msg_dpc()
1111 	 * or individual message handlers are executing; to prevent this, copy
1112 	 * the message into private memory.
1113 	 */
1114 	memcpy(&msg_copy, msg, sizeof(struct hv_message));
1115 
1116 	message_type = msg_copy.header.message_type;
1117 	if (message_type == HVMSG_NONE)
1118 		/* no msg */
1119 		return;
1120 
1121 	hdr = (struct vmbus_channel_message_header *)msg_copy.u.payload;
1122 	msgtype = hdr->msgtype;
1123 
1124 	trace_vmbus_on_msg_dpc(hdr);
1125 
1126 	if (msgtype >= CHANNELMSG_COUNT) {
1127 		WARN_ONCE(1, "unknown msgtype=%d\n", msgtype);
1128 		goto msg_handled;
1129 	}
1130 
1131 	payload_size = msg_copy.header.payload_size;
1132 	if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
1133 		WARN_ONCE(1, "payload size is too large (%d)\n", payload_size);
1134 		goto msg_handled;
1135 	}
1136 
1137 	entry = &channel_message_table[msgtype];
1138 
1139 	if (!entry->message_handler)
1140 		goto msg_handled;
1141 
1142 	if (payload_size < entry->min_payload_len) {
1143 		WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size);
1144 		goto msg_handled;
1145 	}
1146 
1147 	if (entry->handler_type	== VMHT_BLOCKING) {
1148 		ctx = kmalloc(struct_size(ctx, msg.payload, payload_size), GFP_ATOMIC);
1149 		if (ctx == NULL)
1150 			return;
1151 
1152 		INIT_WORK(&ctx->work, vmbus_onmessage_work);
1153 		memcpy(&ctx->msg, &msg_copy, sizeof(msg->header) + payload_size);
1154 
1155 		/*
1156 		 * The host can generate a rescind message while we
1157 		 * may still be handling the original offer. We deal with
1158 		 * this condition by relying on the synchronization provided
1159 		 * by offer_in_progress and by channel_mutex.  See also the
1160 		 * inline comments in vmbus_onoffer_rescind().
1161 		 */
1162 		switch (msgtype) {
1163 		case CHANNELMSG_RESCIND_CHANNELOFFER:
1164 			/*
1165 			 * If we are handling the rescind message;
1166 			 * schedule the work on the global work queue.
1167 			 *
1168 			 * The OFFER message and the RESCIND message should
1169 			 * not be handled by the same serialized work queue,
1170 			 * because the OFFER handler may call vmbus_open(),
1171 			 * which tries to open the channel by sending an
1172 			 * OPEN_CHANNEL message to the host and waits for
1173 			 * the host's response; however, if the host has
1174 			 * rescinded the channel before it receives the
1175 			 * OPEN_CHANNEL message, the host just silently
1176 			 * ignores the OPEN_CHANNEL message; as a result,
1177 			 * the guest's OFFER handler hangs for ever, if we
1178 			 * handle the RESCIND message in the same serialized
1179 			 * work queue: the RESCIND handler can not start to
1180 			 * run before the OFFER handler finishes.
1181 			 */
1182 			schedule_work(&ctx->work);
1183 			break;
1184 
1185 		case CHANNELMSG_OFFERCHANNEL:
1186 			/*
1187 			 * The host sends the offer message of a given channel
1188 			 * before sending the rescind message of the same
1189 			 * channel.  These messages are sent to the guest's
1190 			 * connect CPU; the guest then starts processing them
1191 			 * in the tasklet handler on this CPU:
1192 			 *
1193 			 * VMBUS_CONNECT_CPU
1194 			 *
1195 			 * [vmbus_on_msg_dpc()]
1196 			 * atomic_inc()  // CHANNELMSG_OFFERCHANNEL
1197 			 * queue_work()
1198 			 * ...
1199 			 * [vmbus_on_msg_dpc()]
1200 			 * schedule_work()  // CHANNELMSG_RESCIND_CHANNELOFFER
1201 			 *
1202 			 * We rely on the memory-ordering properties of the
1203 			 * queue_work() and schedule_work() primitives, which
1204 			 * guarantee that the atomic increment will be visible
1205 			 * to the CPUs which will execute the offer & rescind
1206 			 * works by the time these works will start execution.
1207 			 */
1208 			atomic_inc(&vmbus_connection.offer_in_progress);
1209 			fallthrough;
1210 
1211 		default:
1212 			queue_work(vmbus_connection.work_queue, &ctx->work);
1213 		}
1214 	} else
1215 		entry->message_handler(hdr);
1216 
1217 msg_handled:
1218 	vmbus_signal_eom(msg, message_type);
1219 }
1220 
1221 #ifdef CONFIG_PM_SLEEP
1222 /*
1223  * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
1224  * hibernation, because hv_sock connections can not persist across hibernation.
1225  */
1226 static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
1227 {
1228 	struct onmessage_work_context *ctx;
1229 	struct vmbus_channel_rescind_offer *rescind;
1230 
1231 	WARN_ON(!is_hvsock_channel(channel));
1232 
1233 	/*
1234 	 * Allocation size is small and the allocation should really not fail,
1235 	 * otherwise the state of the hv_sock connections ends up in limbo.
1236 	 */
1237 	ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
1238 		      GFP_KERNEL | __GFP_NOFAIL);
1239 
1240 	/*
1241 	 * So far, these are not really used by Linux. Just set them to the
1242 	 * reasonable values conforming to the definitions of the fields.
1243 	 */
1244 	ctx->msg.header.message_type = 1;
1245 	ctx->msg.header.payload_size = sizeof(*rescind);
1246 
1247 	/* These values are actually used by Linux. */
1248 	rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
1249 	rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
1250 	rescind->child_relid = channel->offermsg.child_relid;
1251 
1252 	INIT_WORK(&ctx->work, vmbus_onmessage_work);
1253 
1254 	queue_work(vmbus_connection.work_queue, &ctx->work);
1255 }
1256 #endif /* CONFIG_PM_SLEEP */
1257 
1258 /*
1259  * Schedule all channels with events pending
1260  */
1261 static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
1262 {
1263 	unsigned long *recv_int_page;
1264 	u32 maxbits, relid;
1265 
1266 	/*
1267 	 * The event page can be directly checked to get the id of
1268 	 * the channel that has the interrupt pending.
1269 	 */
1270 	void *page_addr = hv_cpu->synic_event_page;
1271 	union hv_synic_event_flags *event
1272 		= (union hv_synic_event_flags *)page_addr +
1273 					 VMBUS_MESSAGE_SINT;
1274 
1275 	maxbits = HV_EVENT_FLAGS_COUNT;
1276 	recv_int_page = event->flags;
1277 
1278 	if (unlikely(!recv_int_page))
1279 		return;
1280 
1281 	for_each_set_bit(relid, recv_int_page, maxbits) {
1282 		void (*callback_fn)(void *context);
1283 		struct vmbus_channel *channel;
1284 
1285 		if (!sync_test_and_clear_bit(relid, recv_int_page))
1286 			continue;
1287 
1288 		/* Special case - vmbus channel protocol msg */
1289 		if (relid == 0)
1290 			continue;
1291 
1292 		/*
1293 		 * Pairs with the kfree_rcu() in vmbus_chan_release().
1294 		 * Guarantees that the channel data structure doesn't
1295 		 * get freed while the channel pointer below is being
1296 		 * dereferenced.
1297 		 */
1298 		rcu_read_lock();
1299 
1300 		/* Find channel based on relid */
1301 		channel = relid2channel(relid);
1302 		if (channel == NULL)
1303 			goto sched_unlock_rcu;
1304 
1305 		if (channel->rescind)
1306 			goto sched_unlock_rcu;
1307 
1308 		/*
1309 		 * Make sure that the ring buffer data structure doesn't get
1310 		 * freed while we dereference the ring buffer pointer.  Test
1311 		 * for the channel's onchannel_callback being NULL within a
1312 		 * sched_lock critical section.  See also the inline comments
1313 		 * in vmbus_reset_channel_cb().
1314 		 */
1315 		spin_lock(&channel->sched_lock);
1316 
1317 		callback_fn = channel->onchannel_callback;
1318 		if (unlikely(callback_fn == NULL))
1319 			goto sched_unlock;
1320 
1321 		trace_vmbus_chan_sched(channel);
1322 
1323 		++channel->interrupts;
1324 
1325 		switch (channel->callback_mode) {
1326 		case HV_CALL_ISR:
1327 			(*callback_fn)(channel->channel_callback_context);
1328 			break;
1329 
1330 		case HV_CALL_BATCHED:
1331 			hv_begin_read(&channel->inbound);
1332 			fallthrough;
1333 		case HV_CALL_DIRECT:
1334 			tasklet_schedule(&channel->callback_event);
1335 		}
1336 
1337 sched_unlock:
1338 		spin_unlock(&channel->sched_lock);
1339 sched_unlock_rcu:
1340 		rcu_read_unlock();
1341 	}
1342 }
1343 
1344 static void vmbus_isr(void)
1345 {
1346 	struct hv_per_cpu_context *hv_cpu
1347 		= this_cpu_ptr(hv_context.cpu_context);
1348 	void *page_addr;
1349 	struct hv_message *msg;
1350 
1351 	vmbus_chan_sched(hv_cpu);
1352 
1353 	page_addr = hv_cpu->synic_message_page;
1354 	msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
1355 
1356 	/* Check if there are actual msgs to be processed */
1357 	if (msg->header.message_type != HVMSG_NONE) {
1358 		if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
1359 			hv_stimer0_isr();
1360 			vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
1361 		} else
1362 			tasklet_schedule(&hv_cpu->msg_dpc);
1363 	}
1364 
1365 	add_interrupt_randomness(vmbus_interrupt);
1366 }
1367 
1368 static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
1369 {
1370 	vmbus_isr();
1371 	return IRQ_HANDLED;
1372 }
1373 
1374 /*
1375  * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
1376  * buffer and call into Hyper-V to transfer the data.
1377  */
1378 static void hv_kmsg_dump(struct kmsg_dumper *dumper,
1379 			 enum kmsg_dump_reason reason)
1380 {
1381 	struct kmsg_dump_iter iter;
1382 	size_t bytes_written;
1383 
1384 	/* We are only interested in panics. */
1385 	if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
1386 		return;
1387 
1388 	/*
1389 	 * Write dump contents to the page. No need to synchronize; panic should
1390 	 * be single-threaded.
1391 	 */
1392 	kmsg_dump_rewind(&iter);
1393 	kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE,
1394 			     &bytes_written);
1395 	if (!bytes_written)
1396 		return;
1397 	/*
1398 	 * P3 to contain the physical address of the panic page & P4 to
1399 	 * contain the size of the panic data in that page. Rest of the
1400 	 * registers are no-op when the NOTIFY_MSG flag is set.
1401 	 */
1402 	hv_set_register(HV_REGISTER_CRASH_P0, 0);
1403 	hv_set_register(HV_REGISTER_CRASH_P1, 0);
1404 	hv_set_register(HV_REGISTER_CRASH_P2, 0);
1405 	hv_set_register(HV_REGISTER_CRASH_P3, virt_to_phys(hv_panic_page));
1406 	hv_set_register(HV_REGISTER_CRASH_P4, bytes_written);
1407 
1408 	/*
1409 	 * Let Hyper-V know there is crash data available along with
1410 	 * the panic message.
1411 	 */
1412 	hv_set_register(HV_REGISTER_CRASH_CTL,
1413 	       (HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG));
1414 }
1415 
1416 static struct kmsg_dumper hv_kmsg_dumper = {
1417 	.dump = hv_kmsg_dump,
1418 };
1419 
1420 static void hv_kmsg_dump_register(void)
1421 {
1422 	int ret;
1423 
1424 	hv_panic_page = hv_alloc_hyperv_zeroed_page();
1425 	if (!hv_panic_page) {
1426 		pr_err("Hyper-V: panic message page memory allocation failed\n");
1427 		return;
1428 	}
1429 
1430 	ret = kmsg_dump_register(&hv_kmsg_dumper);
1431 	if (ret) {
1432 		pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
1433 		hv_free_hyperv_page((unsigned long)hv_panic_page);
1434 		hv_panic_page = NULL;
1435 	}
1436 }
1437 
1438 static struct ctl_table_header *hv_ctl_table_hdr;
1439 
1440 /*
1441  * sysctl option to allow the user to control whether kmsg data should be
1442  * reported to Hyper-V on panic.
1443  */
1444 static struct ctl_table hv_ctl_table[] = {
1445 	{
1446 		.procname       = "hyperv_record_panic_msg",
1447 		.data           = &sysctl_record_panic_msg,
1448 		.maxlen         = sizeof(int),
1449 		.mode           = 0644,
1450 		.proc_handler   = proc_dointvec_minmax,
1451 		.extra1		= SYSCTL_ZERO,
1452 		.extra2		= SYSCTL_ONE
1453 	},
1454 	{}
1455 };
1456 
1457 static struct ctl_table hv_root_table[] = {
1458 	{
1459 		.procname	= "kernel",
1460 		.mode		= 0555,
1461 		.child		= hv_ctl_table
1462 	},
1463 	{}
1464 };
1465 
1466 /*
1467  * vmbus_bus_init -Main vmbus driver initialization routine.
1468  *
1469  * Here, we
1470  *	- initialize the vmbus driver context
1471  *	- invoke the vmbus hv main init routine
1472  *	- retrieve the channel offers
1473  */
1474 static int vmbus_bus_init(void)
1475 {
1476 	int ret;
1477 
1478 	ret = hv_init();
1479 	if (ret != 0) {
1480 		pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
1481 		return ret;
1482 	}
1483 
1484 	ret = bus_register(&hv_bus);
1485 	if (ret)
1486 		return ret;
1487 
1488 	/*
1489 	 * VMbus interrupts are best modeled as per-cpu interrupts. If
1490 	 * on an architecture with support for per-cpu IRQs (e.g. ARM64),
1491 	 * allocate a per-cpu IRQ using standard Linux kernel functionality.
1492 	 * If not on such an architecture (e.g., x86/x64), then rely on
1493 	 * code in the arch-specific portion of the code tree to connect
1494 	 * the VMbus interrupt handler.
1495 	 */
1496 
1497 	if (vmbus_irq == -1) {
1498 		hv_setup_vmbus_handler(vmbus_isr);
1499 	} else {
1500 		vmbus_evt = alloc_percpu(long);
1501 		ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr,
1502 				"Hyper-V VMbus", vmbus_evt);
1503 		if (ret) {
1504 			pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d",
1505 					vmbus_irq, ret);
1506 			free_percpu(vmbus_evt);
1507 			goto err_setup;
1508 		}
1509 	}
1510 
1511 	ret = hv_synic_alloc();
1512 	if (ret)
1513 		goto err_alloc;
1514 
1515 	/*
1516 	 * Initialize the per-cpu interrupt state and stimer state.
1517 	 * Then connect to the host.
1518 	 */
1519 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
1520 				hv_synic_init, hv_synic_cleanup);
1521 	if (ret < 0)
1522 		goto err_cpuhp;
1523 	hyperv_cpuhp_online = ret;
1524 
1525 	ret = vmbus_connect();
1526 	if (ret)
1527 		goto err_connect;
1528 
1529 	if (hv_is_isolation_supported())
1530 		sysctl_record_panic_msg = 0;
1531 
1532 	/*
1533 	 * Only register if the crash MSRs are available
1534 	 */
1535 	if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
1536 		u64 hyperv_crash_ctl;
1537 		/*
1538 		 * Panic message recording (sysctl_record_panic_msg)
1539 		 * is enabled by default in non-isolated guests and
1540 		 * disabled by default in isolated guests; the panic
1541 		 * message recording won't be available in isolated
1542 		 * guests should the following registration fail.
1543 		 */
1544 		hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
1545 		if (!hv_ctl_table_hdr)
1546 			pr_err("Hyper-V: sysctl table register error");
1547 
1548 		/*
1549 		 * Register for panic kmsg callback only if the right
1550 		 * capability is supported by the hypervisor.
1551 		 */
1552 		hyperv_crash_ctl = hv_get_register(HV_REGISTER_CRASH_CTL);
1553 		if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
1554 			hv_kmsg_dump_register();
1555 
1556 		register_die_notifier(&hyperv_die_block);
1557 	}
1558 
1559 	/*
1560 	 * Always register the panic notifier because we need to unload
1561 	 * the VMbus channel connection to prevent any VMbus
1562 	 * activity after the VM panics.
1563 	 */
1564 	atomic_notifier_chain_register(&panic_notifier_list,
1565 			       &hyperv_panic_block);
1566 
1567 	vmbus_request_offers();
1568 
1569 	return 0;
1570 
1571 err_connect:
1572 	cpuhp_remove_state(hyperv_cpuhp_online);
1573 err_cpuhp:
1574 	hv_synic_free();
1575 err_alloc:
1576 	if (vmbus_irq == -1) {
1577 		hv_remove_vmbus_handler();
1578 	} else {
1579 		free_percpu_irq(vmbus_irq, vmbus_evt);
1580 		free_percpu(vmbus_evt);
1581 	}
1582 err_setup:
1583 	bus_unregister(&hv_bus);
1584 	unregister_sysctl_table(hv_ctl_table_hdr);
1585 	hv_ctl_table_hdr = NULL;
1586 	return ret;
1587 }
1588 
1589 /**
1590  * __vmbus_child_driver_register() - Register a vmbus's driver
1591  * @hv_driver: Pointer to driver structure you want to register
1592  * @owner: owner module of the drv
1593  * @mod_name: module name string
1594  *
1595  * Registers the given driver with Linux through the 'driver_register()' call
1596  * and sets up the hyper-v vmbus handling for this driver.
1597  * It will return the state of the 'driver_register()' call.
1598  *
1599  */
1600 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
1601 {
1602 	int ret;
1603 
1604 	pr_info("registering driver %s\n", hv_driver->name);
1605 
1606 	ret = vmbus_exists();
1607 	if (ret < 0)
1608 		return ret;
1609 
1610 	hv_driver->driver.name = hv_driver->name;
1611 	hv_driver->driver.owner = owner;
1612 	hv_driver->driver.mod_name = mod_name;
1613 	hv_driver->driver.bus = &hv_bus;
1614 
1615 	spin_lock_init(&hv_driver->dynids.lock);
1616 	INIT_LIST_HEAD(&hv_driver->dynids.list);
1617 
1618 	ret = driver_register(&hv_driver->driver);
1619 
1620 	return ret;
1621 }
1622 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
1623 
1624 /**
1625  * vmbus_driver_unregister() - Unregister a vmbus's driver
1626  * @hv_driver: Pointer to driver structure you want to
1627  *             un-register
1628  *
1629  * Un-register the given driver that was previous registered with a call to
1630  * vmbus_driver_register()
1631  */
1632 void vmbus_driver_unregister(struct hv_driver *hv_driver)
1633 {
1634 	pr_info("unregistering driver %s\n", hv_driver->name);
1635 
1636 	if (!vmbus_exists()) {
1637 		driver_unregister(&hv_driver->driver);
1638 		vmbus_free_dynids(hv_driver);
1639 	}
1640 }
1641 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
1642 
1643 
1644 /*
1645  * Called when last reference to channel is gone.
1646  */
1647 static void vmbus_chan_release(struct kobject *kobj)
1648 {
1649 	struct vmbus_channel *channel
1650 		= container_of(kobj, struct vmbus_channel, kobj);
1651 
1652 	kfree_rcu(channel, rcu);
1653 }
1654 
1655 struct vmbus_chan_attribute {
1656 	struct attribute attr;
1657 	ssize_t (*show)(struct vmbus_channel *chan, char *buf);
1658 	ssize_t (*store)(struct vmbus_channel *chan,
1659 			 const char *buf, size_t count);
1660 };
1661 #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
1662 	struct vmbus_chan_attribute chan_attr_##_name \
1663 		= __ATTR(_name, _mode, _show, _store)
1664 #define VMBUS_CHAN_ATTR_RW(_name) \
1665 	struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
1666 #define VMBUS_CHAN_ATTR_RO(_name) \
1667 	struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
1668 #define VMBUS_CHAN_ATTR_WO(_name) \
1669 	struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
1670 
1671 static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
1672 				    struct attribute *attr, char *buf)
1673 {
1674 	const struct vmbus_chan_attribute *attribute
1675 		= container_of(attr, struct vmbus_chan_attribute, attr);
1676 	struct vmbus_channel *chan
1677 		= container_of(kobj, struct vmbus_channel, kobj);
1678 
1679 	if (!attribute->show)
1680 		return -EIO;
1681 
1682 	return attribute->show(chan, buf);
1683 }
1684 
1685 static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
1686 				     struct attribute *attr, const char *buf,
1687 				     size_t count)
1688 {
1689 	const struct vmbus_chan_attribute *attribute
1690 		= container_of(attr, struct vmbus_chan_attribute, attr);
1691 	struct vmbus_channel *chan
1692 		= container_of(kobj, struct vmbus_channel, kobj);
1693 
1694 	if (!attribute->store)
1695 		return -EIO;
1696 
1697 	return attribute->store(chan, buf, count);
1698 }
1699 
1700 static const struct sysfs_ops vmbus_chan_sysfs_ops = {
1701 	.show = vmbus_chan_attr_show,
1702 	.store = vmbus_chan_attr_store,
1703 };
1704 
1705 static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
1706 {
1707 	struct hv_ring_buffer_info *rbi = &channel->outbound;
1708 	ssize_t ret;
1709 
1710 	mutex_lock(&rbi->ring_buffer_mutex);
1711 	if (!rbi->ring_buffer) {
1712 		mutex_unlock(&rbi->ring_buffer_mutex);
1713 		return -EINVAL;
1714 	}
1715 
1716 	ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1717 	mutex_unlock(&rbi->ring_buffer_mutex);
1718 	return ret;
1719 }
1720 static VMBUS_CHAN_ATTR_RO(out_mask);
1721 
1722 static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
1723 {
1724 	struct hv_ring_buffer_info *rbi = &channel->inbound;
1725 	ssize_t ret;
1726 
1727 	mutex_lock(&rbi->ring_buffer_mutex);
1728 	if (!rbi->ring_buffer) {
1729 		mutex_unlock(&rbi->ring_buffer_mutex);
1730 		return -EINVAL;
1731 	}
1732 
1733 	ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1734 	mutex_unlock(&rbi->ring_buffer_mutex);
1735 	return ret;
1736 }
1737 static VMBUS_CHAN_ATTR_RO(in_mask);
1738 
1739 static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
1740 {
1741 	struct hv_ring_buffer_info *rbi = &channel->inbound;
1742 	ssize_t ret;
1743 
1744 	mutex_lock(&rbi->ring_buffer_mutex);
1745 	if (!rbi->ring_buffer) {
1746 		mutex_unlock(&rbi->ring_buffer_mutex);
1747 		return -EINVAL;
1748 	}
1749 
1750 	ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
1751 	mutex_unlock(&rbi->ring_buffer_mutex);
1752 	return ret;
1753 }
1754 static VMBUS_CHAN_ATTR_RO(read_avail);
1755 
1756 static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
1757 {
1758 	struct hv_ring_buffer_info *rbi = &channel->outbound;
1759 	ssize_t ret;
1760 
1761 	mutex_lock(&rbi->ring_buffer_mutex);
1762 	if (!rbi->ring_buffer) {
1763 		mutex_unlock(&rbi->ring_buffer_mutex);
1764 		return -EINVAL;
1765 	}
1766 
1767 	ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
1768 	mutex_unlock(&rbi->ring_buffer_mutex);
1769 	return ret;
1770 }
1771 static VMBUS_CHAN_ATTR_RO(write_avail);
1772 
1773 static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
1774 {
1775 	return sprintf(buf, "%u\n", channel->target_cpu);
1776 }
1777 static ssize_t target_cpu_store(struct vmbus_channel *channel,
1778 				const char *buf, size_t count)
1779 {
1780 	u32 target_cpu, origin_cpu;
1781 	ssize_t ret = count;
1782 
1783 	if (vmbus_proto_version < VERSION_WIN10_V4_1)
1784 		return -EIO;
1785 
1786 	if (sscanf(buf, "%uu", &target_cpu) != 1)
1787 		return -EIO;
1788 
1789 	/* Validate target_cpu for the cpumask_test_cpu() operation below. */
1790 	if (target_cpu >= nr_cpumask_bits)
1791 		return -EINVAL;
1792 
1793 	/* No CPUs should come up or down during this. */
1794 	cpus_read_lock();
1795 
1796 	if (!cpu_online(target_cpu)) {
1797 		cpus_read_unlock();
1798 		return -EINVAL;
1799 	}
1800 
1801 	/*
1802 	 * Synchronizes target_cpu_store() and channel closure:
1803 	 *
1804 	 * { Initially: state = CHANNEL_OPENED }
1805 	 *
1806 	 * CPU1				CPU2
1807 	 *
1808 	 * [target_cpu_store()]		[vmbus_disconnect_ring()]
1809 	 *
1810 	 * LOCK channel_mutex		LOCK channel_mutex
1811 	 * LOAD r1 = state		LOAD r2 = state
1812 	 * IF (r1 == CHANNEL_OPENED)	IF (r2 == CHANNEL_OPENED)
1813 	 *   SEND MODIFYCHANNEL		  STORE state = CHANNEL_OPEN
1814 	 *   [...]			  SEND CLOSECHANNEL
1815 	 * UNLOCK channel_mutex		UNLOCK channel_mutex
1816 	 *
1817 	 * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
1818 	 * 		CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
1819 	 *
1820 	 * Note.  The host processes the channel messages "sequentially", in
1821 	 * the order in which they are received on a per-partition basis.
1822 	 */
1823 	mutex_lock(&vmbus_connection.channel_mutex);
1824 
1825 	/*
1826 	 * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
1827 	 * avoid sending the message and fail here for such channels.
1828 	 */
1829 	if (channel->state != CHANNEL_OPENED_STATE) {
1830 		ret = -EIO;
1831 		goto cpu_store_unlock;
1832 	}
1833 
1834 	origin_cpu = channel->target_cpu;
1835 	if (target_cpu == origin_cpu)
1836 		goto cpu_store_unlock;
1837 
1838 	if (vmbus_send_modifychannel(channel,
1839 				     hv_cpu_number_to_vp_number(target_cpu))) {
1840 		ret = -EIO;
1841 		goto cpu_store_unlock;
1842 	}
1843 
1844 	/*
1845 	 * For version before VERSION_WIN10_V5_3, the following warning holds:
1846 	 *
1847 	 * Warning.  At this point, there is *no* guarantee that the host will
1848 	 * have successfully processed the vmbus_send_modifychannel() request.
1849 	 * See the header comment of vmbus_send_modifychannel() for more info.
1850 	 *
1851 	 * Lags in the processing of the above vmbus_send_modifychannel() can
1852 	 * result in missed interrupts if the "old" target CPU is taken offline
1853 	 * before Hyper-V starts sending interrupts to the "new" target CPU.
1854 	 * But apart from this offlining scenario, the code tolerates such
1855 	 * lags.  It will function correctly even if a channel interrupt comes
1856 	 * in on a CPU that is different from the channel target_cpu value.
1857 	 */
1858 
1859 	channel->target_cpu = target_cpu;
1860 
1861 	/* See init_vp_index(). */
1862 	if (hv_is_perf_channel(channel))
1863 		hv_update_allocated_cpus(origin_cpu, target_cpu);
1864 
1865 	/* Currently set only for storvsc channels. */
1866 	if (channel->change_target_cpu_callback) {
1867 		(*channel->change_target_cpu_callback)(channel,
1868 				origin_cpu, target_cpu);
1869 	}
1870 
1871 cpu_store_unlock:
1872 	mutex_unlock(&vmbus_connection.channel_mutex);
1873 	cpus_read_unlock();
1874 	return ret;
1875 }
1876 static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
1877 
1878 static ssize_t channel_pending_show(struct vmbus_channel *channel,
1879 				    char *buf)
1880 {
1881 	return sprintf(buf, "%d\n",
1882 		       channel_pending(channel,
1883 				       vmbus_connection.monitor_pages[1]));
1884 }
1885 static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL);
1886 
1887 static ssize_t channel_latency_show(struct vmbus_channel *channel,
1888 				    char *buf)
1889 {
1890 	return sprintf(buf, "%d\n",
1891 		       channel_latency(channel,
1892 				       vmbus_connection.monitor_pages[1]));
1893 }
1894 static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL);
1895 
1896 static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
1897 {
1898 	return sprintf(buf, "%llu\n", channel->interrupts);
1899 }
1900 static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL);
1901 
1902 static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
1903 {
1904 	return sprintf(buf, "%llu\n", channel->sig_events);
1905 }
1906 static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL);
1907 
1908 static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
1909 					 char *buf)
1910 {
1911 	return sprintf(buf, "%llu\n",
1912 		       (unsigned long long)channel->intr_in_full);
1913 }
1914 static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
1915 
1916 static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
1917 					   char *buf)
1918 {
1919 	return sprintf(buf, "%llu\n",
1920 		       (unsigned long long)channel->intr_out_empty);
1921 }
1922 static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
1923 
1924 static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
1925 					   char *buf)
1926 {
1927 	return sprintf(buf, "%llu\n",
1928 		       (unsigned long long)channel->out_full_first);
1929 }
1930 static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
1931 
1932 static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
1933 					   char *buf)
1934 {
1935 	return sprintf(buf, "%llu\n",
1936 		       (unsigned long long)channel->out_full_total);
1937 }
1938 static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
1939 
1940 static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
1941 					  char *buf)
1942 {
1943 	return sprintf(buf, "%u\n", channel->offermsg.monitorid);
1944 }
1945 static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL);
1946 
1947 static ssize_t subchannel_id_show(struct vmbus_channel *channel,
1948 				  char *buf)
1949 {
1950 	return sprintf(buf, "%u\n",
1951 		       channel->offermsg.offer.sub_channel_index);
1952 }
1953 static VMBUS_CHAN_ATTR_RO(subchannel_id);
1954 
1955 static struct attribute *vmbus_chan_attrs[] = {
1956 	&chan_attr_out_mask.attr,
1957 	&chan_attr_in_mask.attr,
1958 	&chan_attr_read_avail.attr,
1959 	&chan_attr_write_avail.attr,
1960 	&chan_attr_cpu.attr,
1961 	&chan_attr_pending.attr,
1962 	&chan_attr_latency.attr,
1963 	&chan_attr_interrupts.attr,
1964 	&chan_attr_events.attr,
1965 	&chan_attr_intr_in_full.attr,
1966 	&chan_attr_intr_out_empty.attr,
1967 	&chan_attr_out_full_first.attr,
1968 	&chan_attr_out_full_total.attr,
1969 	&chan_attr_monitor_id.attr,
1970 	&chan_attr_subchannel_id.attr,
1971 	NULL
1972 };
1973 
1974 /*
1975  * Channel-level attribute_group callback function. Returns the permission for
1976  * each attribute, and returns 0 if an attribute is not visible.
1977  */
1978 static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
1979 					  struct attribute *attr, int idx)
1980 {
1981 	const struct vmbus_channel *channel =
1982 		container_of(kobj, struct vmbus_channel, kobj);
1983 
1984 	/* Hide the monitor attributes if the monitor mechanism is not used. */
1985 	if (!channel->offermsg.monitor_allocated &&
1986 	    (attr == &chan_attr_pending.attr ||
1987 	     attr == &chan_attr_latency.attr ||
1988 	     attr == &chan_attr_monitor_id.attr))
1989 		return 0;
1990 
1991 	return attr->mode;
1992 }
1993 
1994 static struct attribute_group vmbus_chan_group = {
1995 	.attrs = vmbus_chan_attrs,
1996 	.is_visible = vmbus_chan_attr_is_visible
1997 };
1998 
1999 static struct kobj_type vmbus_chan_ktype = {
2000 	.sysfs_ops = &vmbus_chan_sysfs_ops,
2001 	.release = vmbus_chan_release,
2002 };
2003 
2004 /*
2005  * vmbus_add_channel_kobj - setup a sub-directory under device/channels
2006  */
2007 int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
2008 {
2009 	const struct device *device = &dev->device;
2010 	struct kobject *kobj = &channel->kobj;
2011 	u32 relid = channel->offermsg.child_relid;
2012 	int ret;
2013 
2014 	kobj->kset = dev->channels_kset;
2015 	ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
2016 				   "%u", relid);
2017 	if (ret) {
2018 		kobject_put(kobj);
2019 		return ret;
2020 	}
2021 
2022 	ret = sysfs_create_group(kobj, &vmbus_chan_group);
2023 
2024 	if (ret) {
2025 		/*
2026 		 * The calling functions' error handling paths will cleanup the
2027 		 * empty channel directory.
2028 		 */
2029 		kobject_put(kobj);
2030 		dev_err(device, "Unable to set up channel sysfs files\n");
2031 		return ret;
2032 	}
2033 
2034 	kobject_uevent(kobj, KOBJ_ADD);
2035 
2036 	return 0;
2037 }
2038 
2039 /*
2040  * vmbus_remove_channel_attr_group - remove the channel's attribute group
2041  */
2042 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
2043 {
2044 	sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
2045 }
2046 
2047 /*
2048  * vmbus_device_create - Creates and registers a new child device
2049  * on the vmbus.
2050  */
2051 struct hv_device *vmbus_device_create(const guid_t *type,
2052 				      const guid_t *instance,
2053 				      struct vmbus_channel *channel)
2054 {
2055 	struct hv_device *child_device_obj;
2056 
2057 	child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
2058 	if (!child_device_obj) {
2059 		pr_err("Unable to allocate device object for child device\n");
2060 		return NULL;
2061 	}
2062 
2063 	child_device_obj->channel = channel;
2064 	guid_copy(&child_device_obj->dev_type, type);
2065 	guid_copy(&child_device_obj->dev_instance, instance);
2066 	child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
2067 
2068 	return child_device_obj;
2069 }
2070 
2071 /*
2072  * vmbus_device_register - Register the child device
2073  */
2074 int vmbus_device_register(struct hv_device *child_device_obj)
2075 {
2076 	struct kobject *kobj = &child_device_obj->device.kobj;
2077 	int ret;
2078 
2079 	dev_set_name(&child_device_obj->device, "%pUl",
2080 		     &child_device_obj->channel->offermsg.offer.if_instance);
2081 
2082 	child_device_obj->device.bus = &hv_bus;
2083 	child_device_obj->device.parent = &hv_acpi_dev->dev;
2084 	child_device_obj->device.release = vmbus_device_release;
2085 
2086 	child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
2087 	child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
2088 	dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
2089 
2090 	/*
2091 	 * Register with the LDM. This will kick off the driver/device
2092 	 * binding...which will eventually call vmbus_match() and vmbus_probe()
2093 	 */
2094 	ret = device_register(&child_device_obj->device);
2095 	if (ret) {
2096 		pr_err("Unable to register child device\n");
2097 		return ret;
2098 	}
2099 
2100 	child_device_obj->channels_kset = kset_create_and_add("channels",
2101 							      NULL, kobj);
2102 	if (!child_device_obj->channels_kset) {
2103 		ret = -ENOMEM;
2104 		goto err_dev_unregister;
2105 	}
2106 
2107 	ret = vmbus_add_channel_kobj(child_device_obj,
2108 				     child_device_obj->channel);
2109 	if (ret) {
2110 		pr_err("Unable to register primary channeln");
2111 		goto err_kset_unregister;
2112 	}
2113 	hv_debug_add_dev_dir(child_device_obj);
2114 
2115 	return 0;
2116 
2117 err_kset_unregister:
2118 	kset_unregister(child_device_obj->channels_kset);
2119 
2120 err_dev_unregister:
2121 	device_unregister(&child_device_obj->device);
2122 	return ret;
2123 }
2124 
2125 /*
2126  * vmbus_device_unregister - Remove the specified child device
2127  * from the vmbus.
2128  */
2129 void vmbus_device_unregister(struct hv_device *device_obj)
2130 {
2131 	pr_debug("child device %s unregistered\n",
2132 		dev_name(&device_obj->device));
2133 
2134 	kset_unregister(device_obj->channels_kset);
2135 
2136 	/*
2137 	 * Kick off the process of unregistering the device.
2138 	 * This will call vmbus_remove() and eventually vmbus_device_release()
2139 	 */
2140 	device_unregister(&device_obj->device);
2141 }
2142 
2143 
2144 /*
2145  * VMBUS is an acpi enumerated device. Get the information we
2146  * need from DSDT.
2147  */
2148 #define VTPM_BASE_ADDRESS 0xfed40000
2149 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
2150 {
2151 	resource_size_t start = 0;
2152 	resource_size_t end = 0;
2153 	struct resource *new_res;
2154 	struct resource **old_res = &hyperv_mmio;
2155 	struct resource **prev_res = NULL;
2156 	struct resource r;
2157 
2158 	switch (res->type) {
2159 
2160 	/*
2161 	 * "Address" descriptors are for bus windows. Ignore
2162 	 * "memory" descriptors, which are for registers on
2163 	 * devices.
2164 	 */
2165 	case ACPI_RESOURCE_TYPE_ADDRESS32:
2166 		start = res->data.address32.address.minimum;
2167 		end = res->data.address32.address.maximum;
2168 		break;
2169 
2170 	case ACPI_RESOURCE_TYPE_ADDRESS64:
2171 		start = res->data.address64.address.minimum;
2172 		end = res->data.address64.address.maximum;
2173 		break;
2174 
2175 	/*
2176 	 * The IRQ information is needed only on ARM64, which Hyper-V
2177 	 * sets up in the extended format. IRQ information is present
2178 	 * on x86/x64 in the non-extended format but it is not used by
2179 	 * Linux. So don't bother checking for the non-extended format.
2180 	 */
2181 	case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
2182 		if (!acpi_dev_resource_interrupt(res, 0, &r)) {
2183 			pr_err("Unable to parse Hyper-V ACPI interrupt\n");
2184 			return AE_ERROR;
2185 		}
2186 		/* ARM64 INTID for VMbus */
2187 		vmbus_interrupt = res->data.extended_irq.interrupts[0];
2188 		/* Linux IRQ number */
2189 		vmbus_irq = r.start;
2190 		return AE_OK;
2191 
2192 	default:
2193 		/* Unused resource type */
2194 		return AE_OK;
2195 
2196 	}
2197 	/*
2198 	 * Ignore ranges that are below 1MB, as they're not
2199 	 * necessary or useful here.
2200 	 */
2201 	if (end < 0x100000)
2202 		return AE_OK;
2203 
2204 	new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
2205 	if (!new_res)
2206 		return AE_NO_MEMORY;
2207 
2208 	/* If this range overlaps the virtual TPM, truncate it. */
2209 	if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
2210 		end = VTPM_BASE_ADDRESS;
2211 
2212 	new_res->name = "hyperv mmio";
2213 	new_res->flags = IORESOURCE_MEM;
2214 	new_res->start = start;
2215 	new_res->end = end;
2216 
2217 	/*
2218 	 * If two ranges are adjacent, merge them.
2219 	 */
2220 	do {
2221 		if (!*old_res) {
2222 			*old_res = new_res;
2223 			break;
2224 		}
2225 
2226 		if (((*old_res)->end + 1) == new_res->start) {
2227 			(*old_res)->end = new_res->end;
2228 			kfree(new_res);
2229 			break;
2230 		}
2231 
2232 		if ((*old_res)->start == new_res->end + 1) {
2233 			(*old_res)->start = new_res->start;
2234 			kfree(new_res);
2235 			break;
2236 		}
2237 
2238 		if ((*old_res)->start > new_res->end) {
2239 			new_res->sibling = *old_res;
2240 			if (prev_res)
2241 				(*prev_res)->sibling = new_res;
2242 			*old_res = new_res;
2243 			break;
2244 		}
2245 
2246 		prev_res = old_res;
2247 		old_res = &(*old_res)->sibling;
2248 
2249 	} while (1);
2250 
2251 	return AE_OK;
2252 }
2253 
2254 static int vmbus_acpi_remove(struct acpi_device *device)
2255 {
2256 	struct resource *cur_res;
2257 	struct resource *next_res;
2258 
2259 	if (hyperv_mmio) {
2260 		if (fb_mmio) {
2261 			__release_region(hyperv_mmio, fb_mmio->start,
2262 					 resource_size(fb_mmio));
2263 			fb_mmio = NULL;
2264 		}
2265 
2266 		for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
2267 			next_res = cur_res->sibling;
2268 			kfree(cur_res);
2269 		}
2270 	}
2271 
2272 	return 0;
2273 }
2274 
2275 static void vmbus_reserve_fb(void)
2276 {
2277 	int size;
2278 	/*
2279 	 * Make a claim for the frame buffer in the resource tree under the
2280 	 * first node, which will be the one below 4GB.  The length seems to
2281 	 * be underreported, particularly in a Generation 1 VM.  So start out
2282 	 * reserving a larger area and make it smaller until it succeeds.
2283 	 */
2284 
2285 	if (screen_info.lfb_base) {
2286 		if (efi_enabled(EFI_BOOT))
2287 			size = max_t(__u32, screen_info.lfb_size, 0x800000);
2288 		else
2289 			size = max_t(__u32, screen_info.lfb_size, 0x4000000);
2290 
2291 		for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
2292 			fb_mmio = __request_region(hyperv_mmio,
2293 						   screen_info.lfb_base, size,
2294 						   fb_mmio_name, 0);
2295 		}
2296 	}
2297 }
2298 
2299 /**
2300  * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
2301  * @new:		If successful, supplied a pointer to the
2302  *			allocated MMIO space.
2303  * @device_obj:		Identifies the caller
2304  * @min:		Minimum guest physical address of the
2305  *			allocation
2306  * @max:		Maximum guest physical address
2307  * @size:		Size of the range to be allocated
2308  * @align:		Alignment of the range to be allocated
2309  * @fb_overlap_ok:	Whether this allocation can be allowed
2310  *			to overlap the video frame buffer.
2311  *
2312  * This function walks the resources granted to VMBus by the
2313  * _CRS object in the ACPI namespace underneath the parent
2314  * "bridge" whether that's a root PCI bus in the Generation 1
2315  * case or a Module Device in the Generation 2 case.  It then
2316  * attempts to allocate from the global MMIO pool in a way that
2317  * matches the constraints supplied in these parameters and by
2318  * that _CRS.
2319  *
2320  * Return: 0 on success, -errno on failure
2321  */
2322 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
2323 			resource_size_t min, resource_size_t max,
2324 			resource_size_t size, resource_size_t align,
2325 			bool fb_overlap_ok)
2326 {
2327 	struct resource *iter, *shadow;
2328 	resource_size_t range_min, range_max, start;
2329 	const char *dev_n = dev_name(&device_obj->device);
2330 	int retval;
2331 
2332 	retval = -ENXIO;
2333 	mutex_lock(&hyperv_mmio_lock);
2334 
2335 	/*
2336 	 * If overlaps with frame buffers are allowed, then first attempt to
2337 	 * make the allocation from within the reserved region.  Because it
2338 	 * is already reserved, no shadow allocation is necessary.
2339 	 */
2340 	if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
2341 	    !(max < fb_mmio->start)) {
2342 
2343 		range_min = fb_mmio->start;
2344 		range_max = fb_mmio->end;
2345 		start = (range_min + align - 1) & ~(align - 1);
2346 		for (; start + size - 1 <= range_max; start += align) {
2347 			*new = request_mem_region_exclusive(start, size, dev_n);
2348 			if (*new) {
2349 				retval = 0;
2350 				goto exit;
2351 			}
2352 		}
2353 	}
2354 
2355 	for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2356 		if ((iter->start >= max) || (iter->end <= min))
2357 			continue;
2358 
2359 		range_min = iter->start;
2360 		range_max = iter->end;
2361 		start = (range_min + align - 1) & ~(align - 1);
2362 		for (; start + size - 1 <= range_max; start += align) {
2363 			shadow = __request_region(iter, start, size, NULL,
2364 						  IORESOURCE_BUSY);
2365 			if (!shadow)
2366 				continue;
2367 
2368 			*new = request_mem_region_exclusive(start, size, dev_n);
2369 			if (*new) {
2370 				shadow->name = (char *)*new;
2371 				retval = 0;
2372 				goto exit;
2373 			}
2374 
2375 			__release_region(iter, start, size);
2376 		}
2377 	}
2378 
2379 exit:
2380 	mutex_unlock(&hyperv_mmio_lock);
2381 	return retval;
2382 }
2383 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
2384 
2385 /**
2386  * vmbus_free_mmio() - Free a memory-mapped I/O range.
2387  * @start:		Base address of region to release.
2388  * @size:		Size of the range to be allocated
2389  *
2390  * This function releases anything requested by
2391  * vmbus_mmio_allocate().
2392  */
2393 void vmbus_free_mmio(resource_size_t start, resource_size_t size)
2394 {
2395 	struct resource *iter;
2396 
2397 	mutex_lock(&hyperv_mmio_lock);
2398 	for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2399 		if ((iter->start >= start + size) || (iter->end <= start))
2400 			continue;
2401 
2402 		__release_region(iter, start, size);
2403 	}
2404 	release_mem_region(start, size);
2405 	mutex_unlock(&hyperv_mmio_lock);
2406 
2407 }
2408 EXPORT_SYMBOL_GPL(vmbus_free_mmio);
2409 
2410 static int vmbus_acpi_add(struct acpi_device *device)
2411 {
2412 	acpi_status result;
2413 	int ret_val = -ENODEV;
2414 	struct acpi_device *ancestor;
2415 
2416 	hv_acpi_dev = device;
2417 
2418 	/*
2419 	 * Older versions of Hyper-V for ARM64 fail to include the _CCA
2420 	 * method on the top level VMbus device in the DSDT. But devices
2421 	 * are hardware coherent in all current Hyper-V use cases, so fix
2422 	 * up the ACPI device to behave as if _CCA is present and indicates
2423 	 * hardware coherence.
2424 	 */
2425 	ACPI_COMPANION_SET(&device->dev, device);
2426 	if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) &&
2427 	    device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) {
2428 		pr_info("No ACPI _CCA found; assuming coherent device I/O\n");
2429 		device->flags.cca_seen = true;
2430 		device->flags.coherent_dma = true;
2431 	}
2432 
2433 	result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
2434 					vmbus_walk_resources, NULL);
2435 
2436 	if (ACPI_FAILURE(result))
2437 		goto acpi_walk_err;
2438 	/*
2439 	 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
2440 	 * firmware) is the VMOD that has the mmio ranges. Get that.
2441 	 */
2442 	for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
2443 		result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
2444 					     vmbus_walk_resources, NULL);
2445 
2446 		if (ACPI_FAILURE(result))
2447 			continue;
2448 		if (hyperv_mmio) {
2449 			vmbus_reserve_fb();
2450 			break;
2451 		}
2452 	}
2453 	ret_val = 0;
2454 
2455 acpi_walk_err:
2456 	complete(&probe_event);
2457 	if (ret_val)
2458 		vmbus_acpi_remove(device);
2459 	return ret_val;
2460 }
2461 
2462 #ifdef CONFIG_PM_SLEEP
2463 static int vmbus_bus_suspend(struct device *dev)
2464 {
2465 	struct vmbus_channel *channel, *sc;
2466 
2467 	while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
2468 		/*
2469 		 * We wait here until the completion of any channel
2470 		 * offers that are currently in progress.
2471 		 */
2472 		usleep_range(1000, 2000);
2473 	}
2474 
2475 	mutex_lock(&vmbus_connection.channel_mutex);
2476 	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2477 		if (!is_hvsock_channel(channel))
2478 			continue;
2479 
2480 		vmbus_force_channel_rescinded(channel);
2481 	}
2482 	mutex_unlock(&vmbus_connection.channel_mutex);
2483 
2484 	/*
2485 	 * Wait until all the sub-channels and hv_sock channels have been
2486 	 * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
2487 	 * they would conflict with the new sub-channels that will be created
2488 	 * in the resume path. hv_sock channels should also be destroyed, but
2489 	 * a hv_sock channel of an established hv_sock connection can not be
2490 	 * really destroyed since it may still be referenced by the userspace
2491 	 * application, so we just force the hv_sock channel to be rescinded
2492 	 * by vmbus_force_channel_rescinded(), and the userspace application
2493 	 * will thoroughly destroy the channel after hibernation.
2494 	 *
2495 	 * Note: the counter nr_chan_close_on_suspend may never go above 0 if
2496 	 * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
2497 	 */
2498 	if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
2499 		wait_for_completion(&vmbus_connection.ready_for_suspend_event);
2500 
2501 	if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
2502 		pr_err("Can not suspend due to a previous failed resuming\n");
2503 		return -EBUSY;
2504 	}
2505 
2506 	mutex_lock(&vmbus_connection.channel_mutex);
2507 
2508 	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2509 		/*
2510 		 * Remove the channel from the array of channels and invalidate
2511 		 * the channel's relid.  Upon resume, vmbus_onoffer() will fix
2512 		 * up the relid (and other fields, if necessary) and add the
2513 		 * channel back to the array.
2514 		 */
2515 		vmbus_channel_unmap_relid(channel);
2516 		channel->offermsg.child_relid = INVALID_RELID;
2517 
2518 		if (is_hvsock_channel(channel)) {
2519 			if (!channel->rescind) {
2520 				pr_err("hv_sock channel not rescinded!\n");
2521 				WARN_ON_ONCE(1);
2522 			}
2523 			continue;
2524 		}
2525 
2526 		list_for_each_entry(sc, &channel->sc_list, sc_list) {
2527 			pr_err("Sub-channel not deleted!\n");
2528 			WARN_ON_ONCE(1);
2529 		}
2530 
2531 		atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
2532 	}
2533 
2534 	mutex_unlock(&vmbus_connection.channel_mutex);
2535 
2536 	vmbus_initiate_unload(false);
2537 
2538 	/* Reset the event for the next resume. */
2539 	reinit_completion(&vmbus_connection.ready_for_resume_event);
2540 
2541 	return 0;
2542 }
2543 
2544 static int vmbus_bus_resume(struct device *dev)
2545 {
2546 	struct vmbus_channel_msginfo *msginfo;
2547 	size_t msgsize;
2548 	int ret;
2549 
2550 	/*
2551 	 * We only use the 'vmbus_proto_version', which was in use before
2552 	 * hibernation, to re-negotiate with the host.
2553 	 */
2554 	if (!vmbus_proto_version) {
2555 		pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
2556 		return -EINVAL;
2557 	}
2558 
2559 	msgsize = sizeof(*msginfo) +
2560 		  sizeof(struct vmbus_channel_initiate_contact);
2561 
2562 	msginfo = kzalloc(msgsize, GFP_KERNEL);
2563 
2564 	if (msginfo == NULL)
2565 		return -ENOMEM;
2566 
2567 	ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
2568 
2569 	kfree(msginfo);
2570 
2571 	if (ret != 0)
2572 		return ret;
2573 
2574 	WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
2575 
2576 	vmbus_request_offers();
2577 
2578 	if (wait_for_completion_timeout(
2579 		&vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
2580 		pr_err("Some vmbus device is missing after suspending?\n");
2581 
2582 	/* Reset the event for the next suspend. */
2583 	reinit_completion(&vmbus_connection.ready_for_suspend_event);
2584 
2585 	return 0;
2586 }
2587 #else
2588 #define vmbus_bus_suspend NULL
2589 #define vmbus_bus_resume NULL
2590 #endif /* CONFIG_PM_SLEEP */
2591 
2592 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
2593 	{"VMBUS", 0},
2594 	{"VMBus", 0},
2595 	{"", 0},
2596 };
2597 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
2598 
2599 /*
2600  * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
2601  * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
2602  * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
2603  * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
2604  * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
2605  * resume callback must also run via the "noirq" ops.
2606  *
2607  * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
2608  * earlier in this file before vmbus_pm.
2609  */
2610 
2611 static const struct dev_pm_ops vmbus_bus_pm = {
2612 	.suspend_noirq	= NULL,
2613 	.resume_noirq	= NULL,
2614 	.freeze_noirq	= vmbus_bus_suspend,
2615 	.thaw_noirq	= vmbus_bus_resume,
2616 	.poweroff_noirq	= vmbus_bus_suspend,
2617 	.restore_noirq	= vmbus_bus_resume
2618 };
2619 
2620 static struct acpi_driver vmbus_acpi_driver = {
2621 	.name = "vmbus",
2622 	.ids = vmbus_acpi_device_ids,
2623 	.ops = {
2624 		.add = vmbus_acpi_add,
2625 		.remove = vmbus_acpi_remove,
2626 	},
2627 	.drv.pm = &vmbus_bus_pm,
2628 };
2629 
2630 static void hv_kexec_handler(void)
2631 {
2632 	hv_stimer_global_cleanup();
2633 	vmbus_initiate_unload(false);
2634 	/* Make sure conn_state is set as hv_synic_cleanup checks for it */
2635 	mb();
2636 	cpuhp_remove_state(hyperv_cpuhp_online);
2637 };
2638 
2639 static void hv_crash_handler(struct pt_regs *regs)
2640 {
2641 	int cpu;
2642 
2643 	vmbus_initiate_unload(true);
2644 	/*
2645 	 * In crash handler we can't schedule synic cleanup for all CPUs,
2646 	 * doing the cleanup for current CPU only. This should be sufficient
2647 	 * for kdump.
2648 	 */
2649 	cpu = smp_processor_id();
2650 	hv_stimer_cleanup(cpu);
2651 	hv_synic_disable_regs(cpu);
2652 };
2653 
2654 static int hv_synic_suspend(void)
2655 {
2656 	/*
2657 	 * When we reach here, all the non-boot CPUs have been offlined.
2658 	 * If we're in a legacy configuration where stimer Direct Mode is
2659 	 * not enabled, the stimers on the non-boot CPUs have been unbound
2660 	 * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
2661 	 * hv_stimer_cleanup() -> clockevents_unbind_device().
2662 	 *
2663 	 * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
2664 	 * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
2665 	 * 1) it's unnecessary as interrupts remain disabled between
2666 	 * syscore_suspend() and syscore_resume(): see create_image() and
2667 	 * resume_target_kernel()
2668 	 * 2) the stimer on CPU0 is automatically disabled later by
2669 	 * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
2670 	 * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
2671 	 * 3) a warning would be triggered if we call
2672 	 * clockevents_unbind_device(), which may sleep, in an
2673 	 * interrupts-disabled context.
2674 	 */
2675 
2676 	hv_synic_disable_regs(0);
2677 
2678 	return 0;
2679 }
2680 
2681 static void hv_synic_resume(void)
2682 {
2683 	hv_synic_enable_regs(0);
2684 
2685 	/*
2686 	 * Note: we don't need to call hv_stimer_init(0), because the timer
2687 	 * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
2688 	 * automatically re-enabled in timekeeping_resume().
2689 	 */
2690 }
2691 
2692 /* The callbacks run only on CPU0, with irqs_disabled. */
2693 static struct syscore_ops hv_synic_syscore_ops = {
2694 	.suspend = hv_synic_suspend,
2695 	.resume = hv_synic_resume,
2696 };
2697 
2698 static int __init hv_acpi_init(void)
2699 {
2700 	int ret, t;
2701 
2702 	if (!hv_is_hyperv_initialized())
2703 		return -ENODEV;
2704 
2705 	if (hv_root_partition)
2706 		return 0;
2707 
2708 	init_completion(&probe_event);
2709 
2710 	/*
2711 	 * Get ACPI resources first.
2712 	 */
2713 	ret = acpi_bus_register_driver(&vmbus_acpi_driver);
2714 
2715 	if (ret)
2716 		return ret;
2717 
2718 	t = wait_for_completion_timeout(&probe_event, 5*HZ);
2719 	if (t == 0) {
2720 		ret = -ETIMEDOUT;
2721 		goto cleanup;
2722 	}
2723 
2724 	/*
2725 	 * If we're on an architecture with a hardcoded hypervisor
2726 	 * vector (i.e. x86/x64), override the VMbus interrupt found
2727 	 * in the ACPI tables. Ensure vmbus_irq is not set since the
2728 	 * normal Linux IRQ mechanism is not used in this case.
2729 	 */
2730 #ifdef HYPERVISOR_CALLBACK_VECTOR
2731 	vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR;
2732 	vmbus_irq = -1;
2733 #endif
2734 
2735 	hv_debug_init();
2736 
2737 	ret = vmbus_bus_init();
2738 	if (ret)
2739 		goto cleanup;
2740 
2741 	hv_setup_kexec_handler(hv_kexec_handler);
2742 	hv_setup_crash_handler(hv_crash_handler);
2743 
2744 	register_syscore_ops(&hv_synic_syscore_ops);
2745 
2746 	return 0;
2747 
2748 cleanup:
2749 	acpi_bus_unregister_driver(&vmbus_acpi_driver);
2750 	hv_acpi_dev = NULL;
2751 	return ret;
2752 }
2753 
2754 static void __exit vmbus_exit(void)
2755 {
2756 	int cpu;
2757 
2758 	unregister_syscore_ops(&hv_synic_syscore_ops);
2759 
2760 	hv_remove_kexec_handler();
2761 	hv_remove_crash_handler();
2762 	vmbus_connection.conn_state = DISCONNECTED;
2763 	hv_stimer_global_cleanup();
2764 	vmbus_disconnect();
2765 	if (vmbus_irq == -1) {
2766 		hv_remove_vmbus_handler();
2767 	} else {
2768 		free_percpu_irq(vmbus_irq, vmbus_evt);
2769 		free_percpu(vmbus_evt);
2770 	}
2771 	for_each_online_cpu(cpu) {
2772 		struct hv_per_cpu_context *hv_cpu
2773 			= per_cpu_ptr(hv_context.cpu_context, cpu);
2774 
2775 		tasklet_kill(&hv_cpu->msg_dpc);
2776 	}
2777 	hv_debug_rm_all_dir();
2778 
2779 	vmbus_free_channels();
2780 	kfree(vmbus_connection.channels);
2781 
2782 	if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
2783 		kmsg_dump_unregister(&hv_kmsg_dumper);
2784 		unregister_die_notifier(&hyperv_die_block);
2785 	}
2786 
2787 	/*
2788 	 * The panic notifier is always registered, hence we should
2789 	 * also unconditionally unregister it here as well.
2790 	 */
2791 	atomic_notifier_chain_unregister(&panic_notifier_list,
2792 					 &hyperv_panic_block);
2793 
2794 	free_page((unsigned long)hv_panic_page);
2795 	unregister_sysctl_table(hv_ctl_table_hdr);
2796 	hv_ctl_table_hdr = NULL;
2797 	bus_unregister(&hv_bus);
2798 
2799 	cpuhp_remove_state(hyperv_cpuhp_online);
2800 	hv_synic_free();
2801 	acpi_bus_unregister_driver(&vmbus_acpi_driver);
2802 }
2803 
2804 
2805 MODULE_LICENSE("GPL");
2806 MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
2807 
2808 subsys_initcall(hv_acpi_init);
2809 module_exit(vmbus_exit);
2810