xref: /openbmc/linux/drivers/hv/vmbus_drv.c (revision 8730046c)
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  *   K. Y. Srinivasan <kys@microsoft.com>
21  *
22  */
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/device.h>
28 #include <linux/interrupt.h>
29 #include <linux/sysctl.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/completion.h>
33 #include <linux/hyperv.h>
34 #include <linux/kernel_stat.h>
35 #include <linux/clockchips.h>
36 #include <linux/cpu.h>
37 #include <asm/hyperv.h>
38 #include <asm/hypervisor.h>
39 #include <asm/mshyperv.h>
40 #include <linux/notifier.h>
41 #include <linux/ptrace.h>
42 #include <linux/screen_info.h>
43 #include <linux/kdebug.h>
44 #include <linux/efi.h>
45 #include <linux/random.h>
46 #include "hyperv_vmbus.h"
47 
48 struct vmbus_dynid {
49 	struct list_head node;
50 	struct hv_vmbus_device_id id;
51 };
52 
53 static struct acpi_device  *hv_acpi_dev;
54 
55 static struct completion probe_event;
56 
57 static int hyperv_cpuhp_online;
58 
59 static void hyperv_report_panic(struct pt_regs *regs)
60 {
61 	static bool panic_reported;
62 
63 	/*
64 	 * We prefer to report panic on 'die' chain as we have proper
65 	 * registers to report, but if we miss it (e.g. on BUG()) we need
66 	 * to report it on 'panic'.
67 	 */
68 	if (panic_reported)
69 		return;
70 	panic_reported = true;
71 
72 	wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
73 	wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
74 	wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
75 	wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
76 	wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
77 
78 	/*
79 	 * Let Hyper-V know there is crash data available
80 	 */
81 	wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
82 }
83 
84 static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
85 			      void *args)
86 {
87 	struct pt_regs *regs;
88 
89 	regs = current_pt_regs();
90 
91 	hyperv_report_panic(regs);
92 	return NOTIFY_DONE;
93 }
94 
95 static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
96 			    void *args)
97 {
98 	struct die_args *die = (struct die_args *)args;
99 	struct pt_regs *regs = die->regs;
100 
101 	hyperv_report_panic(regs);
102 	return NOTIFY_DONE;
103 }
104 
105 static struct notifier_block hyperv_die_block = {
106 	.notifier_call = hyperv_die_event,
107 };
108 static struct notifier_block hyperv_panic_block = {
109 	.notifier_call = hyperv_panic_event,
110 };
111 
112 static const char *fb_mmio_name = "fb_range";
113 static struct resource *fb_mmio;
114 static struct resource *hyperv_mmio;
115 static DEFINE_SEMAPHORE(hyperv_mmio_lock);
116 
117 static int vmbus_exists(void)
118 {
119 	if (hv_acpi_dev == NULL)
120 		return -ENODEV;
121 
122 	return 0;
123 }
124 
125 #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
126 static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
127 {
128 	int i;
129 	for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
130 		sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
131 }
132 
133 static u8 channel_monitor_group(struct vmbus_channel *channel)
134 {
135 	return (u8)channel->offermsg.monitorid / 32;
136 }
137 
138 static u8 channel_monitor_offset(struct vmbus_channel *channel)
139 {
140 	return (u8)channel->offermsg.monitorid % 32;
141 }
142 
143 static u32 channel_pending(struct vmbus_channel *channel,
144 			   struct hv_monitor_page *monitor_page)
145 {
146 	u8 monitor_group = channel_monitor_group(channel);
147 	return monitor_page->trigger_group[monitor_group].pending;
148 }
149 
150 static u32 channel_latency(struct vmbus_channel *channel,
151 			   struct hv_monitor_page *monitor_page)
152 {
153 	u8 monitor_group = channel_monitor_group(channel);
154 	u8 monitor_offset = channel_monitor_offset(channel);
155 	return monitor_page->latency[monitor_group][monitor_offset];
156 }
157 
158 static u32 channel_conn_id(struct vmbus_channel *channel,
159 			   struct hv_monitor_page *monitor_page)
160 {
161 	u8 monitor_group = channel_monitor_group(channel);
162 	u8 monitor_offset = channel_monitor_offset(channel);
163 	return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
164 }
165 
166 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
167 		       char *buf)
168 {
169 	struct hv_device *hv_dev = device_to_hv_device(dev);
170 
171 	if (!hv_dev->channel)
172 		return -ENODEV;
173 	return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
174 }
175 static DEVICE_ATTR_RO(id);
176 
177 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
178 			  char *buf)
179 {
180 	struct hv_device *hv_dev = device_to_hv_device(dev);
181 
182 	if (!hv_dev->channel)
183 		return -ENODEV;
184 	return sprintf(buf, "%d\n", hv_dev->channel->state);
185 }
186 static DEVICE_ATTR_RO(state);
187 
188 static ssize_t monitor_id_show(struct device *dev,
189 			       struct device_attribute *dev_attr, char *buf)
190 {
191 	struct hv_device *hv_dev = device_to_hv_device(dev);
192 
193 	if (!hv_dev->channel)
194 		return -ENODEV;
195 	return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
196 }
197 static DEVICE_ATTR_RO(monitor_id);
198 
199 static ssize_t class_id_show(struct device *dev,
200 			       struct device_attribute *dev_attr, char *buf)
201 {
202 	struct hv_device *hv_dev = device_to_hv_device(dev);
203 
204 	if (!hv_dev->channel)
205 		return -ENODEV;
206 	return sprintf(buf, "{%pUl}\n",
207 		       hv_dev->channel->offermsg.offer.if_type.b);
208 }
209 static DEVICE_ATTR_RO(class_id);
210 
211 static ssize_t device_id_show(struct device *dev,
212 			      struct device_attribute *dev_attr, char *buf)
213 {
214 	struct hv_device *hv_dev = device_to_hv_device(dev);
215 
216 	if (!hv_dev->channel)
217 		return -ENODEV;
218 	return sprintf(buf, "{%pUl}\n",
219 		       hv_dev->channel->offermsg.offer.if_instance.b);
220 }
221 static DEVICE_ATTR_RO(device_id);
222 
223 static ssize_t modalias_show(struct device *dev,
224 			     struct device_attribute *dev_attr, char *buf)
225 {
226 	struct hv_device *hv_dev = device_to_hv_device(dev);
227 	char alias_name[VMBUS_ALIAS_LEN + 1];
228 
229 	print_alias_name(hv_dev, alias_name);
230 	return sprintf(buf, "vmbus:%s\n", alias_name);
231 }
232 static DEVICE_ATTR_RO(modalias);
233 
234 static ssize_t server_monitor_pending_show(struct device *dev,
235 					   struct device_attribute *dev_attr,
236 					   char *buf)
237 {
238 	struct hv_device *hv_dev = device_to_hv_device(dev);
239 
240 	if (!hv_dev->channel)
241 		return -ENODEV;
242 	return sprintf(buf, "%d\n",
243 		       channel_pending(hv_dev->channel,
244 				       vmbus_connection.monitor_pages[1]));
245 }
246 static DEVICE_ATTR_RO(server_monitor_pending);
247 
248 static ssize_t client_monitor_pending_show(struct device *dev,
249 					   struct device_attribute *dev_attr,
250 					   char *buf)
251 {
252 	struct hv_device *hv_dev = device_to_hv_device(dev);
253 
254 	if (!hv_dev->channel)
255 		return -ENODEV;
256 	return sprintf(buf, "%d\n",
257 		       channel_pending(hv_dev->channel,
258 				       vmbus_connection.monitor_pages[1]));
259 }
260 static DEVICE_ATTR_RO(client_monitor_pending);
261 
262 static ssize_t server_monitor_latency_show(struct device *dev,
263 					   struct device_attribute *dev_attr,
264 					   char *buf)
265 {
266 	struct hv_device *hv_dev = device_to_hv_device(dev);
267 
268 	if (!hv_dev->channel)
269 		return -ENODEV;
270 	return sprintf(buf, "%d\n",
271 		       channel_latency(hv_dev->channel,
272 				       vmbus_connection.monitor_pages[0]));
273 }
274 static DEVICE_ATTR_RO(server_monitor_latency);
275 
276 static ssize_t client_monitor_latency_show(struct device *dev,
277 					   struct device_attribute *dev_attr,
278 					   char *buf)
279 {
280 	struct hv_device *hv_dev = device_to_hv_device(dev);
281 
282 	if (!hv_dev->channel)
283 		return -ENODEV;
284 	return sprintf(buf, "%d\n",
285 		       channel_latency(hv_dev->channel,
286 				       vmbus_connection.monitor_pages[1]));
287 }
288 static DEVICE_ATTR_RO(client_monitor_latency);
289 
290 static ssize_t server_monitor_conn_id_show(struct device *dev,
291 					   struct device_attribute *dev_attr,
292 					   char *buf)
293 {
294 	struct hv_device *hv_dev = device_to_hv_device(dev);
295 
296 	if (!hv_dev->channel)
297 		return -ENODEV;
298 	return sprintf(buf, "%d\n",
299 		       channel_conn_id(hv_dev->channel,
300 				       vmbus_connection.monitor_pages[0]));
301 }
302 static DEVICE_ATTR_RO(server_monitor_conn_id);
303 
304 static ssize_t client_monitor_conn_id_show(struct device *dev,
305 					   struct device_attribute *dev_attr,
306 					   char *buf)
307 {
308 	struct hv_device *hv_dev = device_to_hv_device(dev);
309 
310 	if (!hv_dev->channel)
311 		return -ENODEV;
312 	return sprintf(buf, "%d\n",
313 		       channel_conn_id(hv_dev->channel,
314 				       vmbus_connection.monitor_pages[1]));
315 }
316 static DEVICE_ATTR_RO(client_monitor_conn_id);
317 
318 static ssize_t out_intr_mask_show(struct device *dev,
319 				  struct device_attribute *dev_attr, char *buf)
320 {
321 	struct hv_device *hv_dev = device_to_hv_device(dev);
322 	struct hv_ring_buffer_debug_info outbound;
323 
324 	if (!hv_dev->channel)
325 		return -ENODEV;
326 	hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
327 	return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
328 }
329 static DEVICE_ATTR_RO(out_intr_mask);
330 
331 static ssize_t out_read_index_show(struct device *dev,
332 				   struct device_attribute *dev_attr, char *buf)
333 {
334 	struct hv_device *hv_dev = device_to_hv_device(dev);
335 	struct hv_ring_buffer_debug_info outbound;
336 
337 	if (!hv_dev->channel)
338 		return -ENODEV;
339 	hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
340 	return sprintf(buf, "%d\n", outbound.current_read_index);
341 }
342 static DEVICE_ATTR_RO(out_read_index);
343 
344 static ssize_t out_write_index_show(struct device *dev,
345 				    struct device_attribute *dev_attr,
346 				    char *buf)
347 {
348 	struct hv_device *hv_dev = device_to_hv_device(dev);
349 	struct hv_ring_buffer_debug_info outbound;
350 
351 	if (!hv_dev->channel)
352 		return -ENODEV;
353 	hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
354 	return sprintf(buf, "%d\n", outbound.current_write_index);
355 }
356 static DEVICE_ATTR_RO(out_write_index);
357 
358 static ssize_t out_read_bytes_avail_show(struct device *dev,
359 					 struct device_attribute *dev_attr,
360 					 char *buf)
361 {
362 	struct hv_device *hv_dev = device_to_hv_device(dev);
363 	struct hv_ring_buffer_debug_info outbound;
364 
365 	if (!hv_dev->channel)
366 		return -ENODEV;
367 	hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
368 	return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
369 }
370 static DEVICE_ATTR_RO(out_read_bytes_avail);
371 
372 static ssize_t out_write_bytes_avail_show(struct device *dev,
373 					  struct device_attribute *dev_attr,
374 					  char *buf)
375 {
376 	struct hv_device *hv_dev = device_to_hv_device(dev);
377 	struct hv_ring_buffer_debug_info outbound;
378 
379 	if (!hv_dev->channel)
380 		return -ENODEV;
381 	hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
382 	return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
383 }
384 static DEVICE_ATTR_RO(out_write_bytes_avail);
385 
386 static ssize_t in_intr_mask_show(struct device *dev,
387 				 struct device_attribute *dev_attr, char *buf)
388 {
389 	struct hv_device *hv_dev = device_to_hv_device(dev);
390 	struct hv_ring_buffer_debug_info inbound;
391 
392 	if (!hv_dev->channel)
393 		return -ENODEV;
394 	hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
395 	return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
396 }
397 static DEVICE_ATTR_RO(in_intr_mask);
398 
399 static ssize_t in_read_index_show(struct device *dev,
400 				  struct device_attribute *dev_attr, char *buf)
401 {
402 	struct hv_device *hv_dev = device_to_hv_device(dev);
403 	struct hv_ring_buffer_debug_info inbound;
404 
405 	if (!hv_dev->channel)
406 		return -ENODEV;
407 	hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
408 	return sprintf(buf, "%d\n", inbound.current_read_index);
409 }
410 static DEVICE_ATTR_RO(in_read_index);
411 
412 static ssize_t in_write_index_show(struct device *dev,
413 				   struct device_attribute *dev_attr, char *buf)
414 {
415 	struct hv_device *hv_dev = device_to_hv_device(dev);
416 	struct hv_ring_buffer_debug_info inbound;
417 
418 	if (!hv_dev->channel)
419 		return -ENODEV;
420 	hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
421 	return sprintf(buf, "%d\n", inbound.current_write_index);
422 }
423 static DEVICE_ATTR_RO(in_write_index);
424 
425 static ssize_t in_read_bytes_avail_show(struct device *dev,
426 					struct device_attribute *dev_attr,
427 					char *buf)
428 {
429 	struct hv_device *hv_dev = device_to_hv_device(dev);
430 	struct hv_ring_buffer_debug_info inbound;
431 
432 	if (!hv_dev->channel)
433 		return -ENODEV;
434 	hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
435 	return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
436 }
437 static DEVICE_ATTR_RO(in_read_bytes_avail);
438 
439 static ssize_t in_write_bytes_avail_show(struct device *dev,
440 					 struct device_attribute *dev_attr,
441 					 char *buf)
442 {
443 	struct hv_device *hv_dev = device_to_hv_device(dev);
444 	struct hv_ring_buffer_debug_info inbound;
445 
446 	if (!hv_dev->channel)
447 		return -ENODEV;
448 	hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
449 	return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
450 }
451 static DEVICE_ATTR_RO(in_write_bytes_avail);
452 
453 static ssize_t channel_vp_mapping_show(struct device *dev,
454 				       struct device_attribute *dev_attr,
455 				       char *buf)
456 {
457 	struct hv_device *hv_dev = device_to_hv_device(dev);
458 	struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
459 	unsigned long flags;
460 	int buf_size = PAGE_SIZE, n_written, tot_written;
461 	struct list_head *cur;
462 
463 	if (!channel)
464 		return -ENODEV;
465 
466 	tot_written = snprintf(buf, buf_size, "%u:%u\n",
467 		channel->offermsg.child_relid, channel->target_cpu);
468 
469 	spin_lock_irqsave(&channel->lock, flags);
470 
471 	list_for_each(cur, &channel->sc_list) {
472 		if (tot_written >= buf_size - 1)
473 			break;
474 
475 		cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
476 		n_written = scnprintf(buf + tot_written,
477 				     buf_size - tot_written,
478 				     "%u:%u\n",
479 				     cur_sc->offermsg.child_relid,
480 				     cur_sc->target_cpu);
481 		tot_written += n_written;
482 	}
483 
484 	spin_unlock_irqrestore(&channel->lock, flags);
485 
486 	return tot_written;
487 }
488 static DEVICE_ATTR_RO(channel_vp_mapping);
489 
490 static ssize_t vendor_show(struct device *dev,
491 			   struct device_attribute *dev_attr,
492 			   char *buf)
493 {
494 	struct hv_device *hv_dev = device_to_hv_device(dev);
495 	return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
496 }
497 static DEVICE_ATTR_RO(vendor);
498 
499 static ssize_t device_show(struct device *dev,
500 			   struct device_attribute *dev_attr,
501 			   char *buf)
502 {
503 	struct hv_device *hv_dev = device_to_hv_device(dev);
504 	return sprintf(buf, "0x%x\n", hv_dev->device_id);
505 }
506 static DEVICE_ATTR_RO(device);
507 
508 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
509 static struct attribute *vmbus_dev_attrs[] = {
510 	&dev_attr_id.attr,
511 	&dev_attr_state.attr,
512 	&dev_attr_monitor_id.attr,
513 	&dev_attr_class_id.attr,
514 	&dev_attr_device_id.attr,
515 	&dev_attr_modalias.attr,
516 	&dev_attr_server_monitor_pending.attr,
517 	&dev_attr_client_monitor_pending.attr,
518 	&dev_attr_server_monitor_latency.attr,
519 	&dev_attr_client_monitor_latency.attr,
520 	&dev_attr_server_monitor_conn_id.attr,
521 	&dev_attr_client_monitor_conn_id.attr,
522 	&dev_attr_out_intr_mask.attr,
523 	&dev_attr_out_read_index.attr,
524 	&dev_attr_out_write_index.attr,
525 	&dev_attr_out_read_bytes_avail.attr,
526 	&dev_attr_out_write_bytes_avail.attr,
527 	&dev_attr_in_intr_mask.attr,
528 	&dev_attr_in_read_index.attr,
529 	&dev_attr_in_write_index.attr,
530 	&dev_attr_in_read_bytes_avail.attr,
531 	&dev_attr_in_write_bytes_avail.attr,
532 	&dev_attr_channel_vp_mapping.attr,
533 	&dev_attr_vendor.attr,
534 	&dev_attr_device.attr,
535 	NULL,
536 };
537 ATTRIBUTE_GROUPS(vmbus_dev);
538 
539 /*
540  * vmbus_uevent - add uevent for our device
541  *
542  * This routine is invoked when a device is added or removed on the vmbus to
543  * generate a uevent to udev in the userspace. The udev will then look at its
544  * rule and the uevent generated here to load the appropriate driver
545  *
546  * The alias string will be of the form vmbus:guid where guid is the string
547  * representation of the device guid (each byte of the guid will be
548  * represented with two hex characters.
549  */
550 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
551 {
552 	struct hv_device *dev = device_to_hv_device(device);
553 	int ret;
554 	char alias_name[VMBUS_ALIAS_LEN + 1];
555 
556 	print_alias_name(dev, alias_name);
557 	ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
558 	return ret;
559 }
560 
561 static const uuid_le null_guid;
562 
563 static inline bool is_null_guid(const uuid_le *guid)
564 {
565 	if (uuid_le_cmp(*guid, null_guid))
566 		return false;
567 	return true;
568 }
569 
570 /*
571  * Return a matching hv_vmbus_device_id pointer.
572  * If there is no match, return NULL.
573  */
574 static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
575 					const uuid_le *guid)
576 {
577 	const struct hv_vmbus_device_id *id = NULL;
578 	struct vmbus_dynid *dynid;
579 
580 	/* Look at the dynamic ids first, before the static ones */
581 	spin_lock(&drv->dynids.lock);
582 	list_for_each_entry(dynid, &drv->dynids.list, node) {
583 		if (!uuid_le_cmp(dynid->id.guid, *guid)) {
584 			id = &dynid->id;
585 			break;
586 		}
587 	}
588 	spin_unlock(&drv->dynids.lock);
589 
590 	if (id)
591 		return id;
592 
593 	id = drv->id_table;
594 	if (id == NULL)
595 		return NULL; /* empty device table */
596 
597 	for (; !is_null_guid(&id->guid); id++)
598 		if (!uuid_le_cmp(id->guid, *guid))
599 			return id;
600 
601 	return NULL;
602 }
603 
604 /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
605 static int vmbus_add_dynid(struct hv_driver *drv, uuid_le *guid)
606 {
607 	struct vmbus_dynid *dynid;
608 
609 	dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
610 	if (!dynid)
611 		return -ENOMEM;
612 
613 	dynid->id.guid = *guid;
614 
615 	spin_lock(&drv->dynids.lock);
616 	list_add_tail(&dynid->node, &drv->dynids.list);
617 	spin_unlock(&drv->dynids.lock);
618 
619 	return driver_attach(&drv->driver);
620 }
621 
622 static void vmbus_free_dynids(struct hv_driver *drv)
623 {
624 	struct vmbus_dynid *dynid, *n;
625 
626 	spin_lock(&drv->dynids.lock);
627 	list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
628 		list_del(&dynid->node);
629 		kfree(dynid);
630 	}
631 	spin_unlock(&drv->dynids.lock);
632 }
633 
634 /* Parse string of form: 1b4e28ba-2fa1-11d2-883f-b9a761bde3f */
635 static int get_uuid_le(const char *str, uuid_le *uu)
636 {
637 	unsigned int b[16];
638 	int i;
639 
640 	if (strlen(str) < 37)
641 		return -1;
642 
643 	for (i = 0; i < 36; i++) {
644 		switch (i) {
645 		case 8: case 13: case 18: case 23:
646 			if (str[i] != '-')
647 				return -1;
648 			break;
649 		default:
650 			if (!isxdigit(str[i]))
651 				return -1;
652 		}
653 	}
654 
655 	/* unparse little endian output byte order */
656 	if (sscanf(str,
657 		   "%2x%2x%2x%2x-%2x%2x-%2x%2x-%2x%2x-%2x%2x%2x%2x%2x%2x",
658 		   &b[3], &b[2], &b[1], &b[0],
659 		   &b[5], &b[4], &b[7], &b[6], &b[8], &b[9],
660 		   &b[10], &b[11], &b[12], &b[13], &b[14], &b[15]) != 16)
661 		return -1;
662 
663 	for (i = 0; i < 16; i++)
664 		uu->b[i] = b[i];
665 	return 0;
666 }
667 
668 /*
669  * store_new_id - sysfs frontend to vmbus_add_dynid()
670  *
671  * Allow GUIDs to be added to an existing driver via sysfs.
672  */
673 static ssize_t new_id_store(struct device_driver *driver, const char *buf,
674 			    size_t count)
675 {
676 	struct hv_driver *drv = drv_to_hv_drv(driver);
677 	uuid_le guid = NULL_UUID_LE;
678 	ssize_t retval;
679 
680 	if (get_uuid_le(buf, &guid) != 0)
681 		return -EINVAL;
682 
683 	if (hv_vmbus_get_id(drv, &guid))
684 		return -EEXIST;
685 
686 	retval = vmbus_add_dynid(drv, &guid);
687 	if (retval)
688 		return retval;
689 	return count;
690 }
691 static DRIVER_ATTR_WO(new_id);
692 
693 /*
694  * store_remove_id - remove a PCI device ID from this driver
695  *
696  * Removes a dynamic pci device ID to this driver.
697  */
698 static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
699 			       size_t count)
700 {
701 	struct hv_driver *drv = drv_to_hv_drv(driver);
702 	struct vmbus_dynid *dynid, *n;
703 	uuid_le guid = NULL_UUID_LE;
704 	size_t retval = -ENODEV;
705 
706 	if (get_uuid_le(buf, &guid))
707 		return -EINVAL;
708 
709 	spin_lock(&drv->dynids.lock);
710 	list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
711 		struct hv_vmbus_device_id *id = &dynid->id;
712 
713 		if (!uuid_le_cmp(id->guid, guid)) {
714 			list_del(&dynid->node);
715 			kfree(dynid);
716 			retval = count;
717 			break;
718 		}
719 	}
720 	spin_unlock(&drv->dynids.lock);
721 
722 	return retval;
723 }
724 static DRIVER_ATTR_WO(remove_id);
725 
726 static struct attribute *vmbus_drv_attrs[] = {
727 	&driver_attr_new_id.attr,
728 	&driver_attr_remove_id.attr,
729 	NULL,
730 };
731 ATTRIBUTE_GROUPS(vmbus_drv);
732 
733 
734 /*
735  * vmbus_match - Attempt to match the specified device to the specified driver
736  */
737 static int vmbus_match(struct device *device, struct device_driver *driver)
738 {
739 	struct hv_driver *drv = drv_to_hv_drv(driver);
740 	struct hv_device *hv_dev = device_to_hv_device(device);
741 
742 	/* The hv_sock driver handles all hv_sock offers. */
743 	if (is_hvsock_channel(hv_dev->channel))
744 		return drv->hvsock;
745 
746 	if (hv_vmbus_get_id(drv, &hv_dev->dev_type))
747 		return 1;
748 
749 	return 0;
750 }
751 
752 /*
753  * vmbus_probe - Add the new vmbus's child device
754  */
755 static int vmbus_probe(struct device *child_device)
756 {
757 	int ret = 0;
758 	struct hv_driver *drv =
759 			drv_to_hv_drv(child_device->driver);
760 	struct hv_device *dev = device_to_hv_device(child_device);
761 	const struct hv_vmbus_device_id *dev_id;
762 
763 	dev_id = hv_vmbus_get_id(drv, &dev->dev_type);
764 	if (drv->probe) {
765 		ret = drv->probe(dev, dev_id);
766 		if (ret != 0)
767 			pr_err("probe failed for device %s (%d)\n",
768 			       dev_name(child_device), ret);
769 
770 	} else {
771 		pr_err("probe not set for driver %s\n",
772 		       dev_name(child_device));
773 		ret = -ENODEV;
774 	}
775 	return ret;
776 }
777 
778 /*
779  * vmbus_remove - Remove a vmbus device
780  */
781 static int vmbus_remove(struct device *child_device)
782 {
783 	struct hv_driver *drv;
784 	struct hv_device *dev = device_to_hv_device(child_device);
785 
786 	if (child_device->driver) {
787 		drv = drv_to_hv_drv(child_device->driver);
788 		if (drv->remove)
789 			drv->remove(dev);
790 	}
791 
792 	return 0;
793 }
794 
795 
796 /*
797  * vmbus_shutdown - Shutdown a vmbus device
798  */
799 static void vmbus_shutdown(struct device *child_device)
800 {
801 	struct hv_driver *drv;
802 	struct hv_device *dev = device_to_hv_device(child_device);
803 
804 
805 	/* The device may not be attached yet */
806 	if (!child_device->driver)
807 		return;
808 
809 	drv = drv_to_hv_drv(child_device->driver);
810 
811 	if (drv->shutdown)
812 		drv->shutdown(dev);
813 
814 	return;
815 }
816 
817 
818 /*
819  * vmbus_device_release - Final callback release of the vmbus child device
820  */
821 static void vmbus_device_release(struct device *device)
822 {
823 	struct hv_device *hv_dev = device_to_hv_device(device);
824 	struct vmbus_channel *channel = hv_dev->channel;
825 
826 	hv_process_channel_removal(channel,
827 				   channel->offermsg.child_relid);
828 	kfree(hv_dev);
829 
830 }
831 
832 /* The one and only one */
833 static struct bus_type  hv_bus = {
834 	.name =		"vmbus",
835 	.match =		vmbus_match,
836 	.shutdown =		vmbus_shutdown,
837 	.remove =		vmbus_remove,
838 	.probe =		vmbus_probe,
839 	.uevent =		vmbus_uevent,
840 	.dev_groups =		vmbus_dev_groups,
841 	.drv_groups =		vmbus_drv_groups,
842 };
843 
844 struct onmessage_work_context {
845 	struct work_struct work;
846 	struct hv_message msg;
847 };
848 
849 static void vmbus_onmessage_work(struct work_struct *work)
850 {
851 	struct onmessage_work_context *ctx;
852 
853 	/* Do not process messages if we're in DISCONNECTED state */
854 	if (vmbus_connection.conn_state == DISCONNECTED)
855 		return;
856 
857 	ctx = container_of(work, struct onmessage_work_context,
858 			   work);
859 	vmbus_onmessage(&ctx->msg);
860 	kfree(ctx);
861 }
862 
863 static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
864 {
865 	struct clock_event_device *dev = hv_context.clk_evt[cpu];
866 
867 	if (dev->event_handler)
868 		dev->event_handler(dev);
869 
870 	vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
871 }
872 
873 void vmbus_on_msg_dpc(unsigned long data)
874 {
875 	int cpu = smp_processor_id();
876 	void *page_addr = hv_context.synic_message_page[cpu];
877 	struct hv_message *msg = (struct hv_message *)page_addr +
878 				  VMBUS_MESSAGE_SINT;
879 	struct vmbus_channel_message_header *hdr;
880 	struct vmbus_channel_message_table_entry *entry;
881 	struct onmessage_work_context *ctx;
882 	u32 message_type = msg->header.message_type;
883 
884 	if (message_type == HVMSG_NONE)
885 		/* no msg */
886 		return;
887 
888 	hdr = (struct vmbus_channel_message_header *)msg->u.payload;
889 
890 	if (hdr->msgtype >= CHANNELMSG_COUNT) {
891 		WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
892 		goto msg_handled;
893 	}
894 
895 	entry = &channel_message_table[hdr->msgtype];
896 	if (entry->handler_type	== VMHT_BLOCKING) {
897 		ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
898 		if (ctx == NULL)
899 			return;
900 
901 		INIT_WORK(&ctx->work, vmbus_onmessage_work);
902 		memcpy(&ctx->msg, msg, sizeof(*msg));
903 
904 		queue_work(vmbus_connection.work_queue, &ctx->work);
905 	} else
906 		entry->message_handler(hdr);
907 
908 msg_handled:
909 	vmbus_signal_eom(msg, message_type);
910 }
911 
912 static void vmbus_isr(void)
913 {
914 	int cpu = smp_processor_id();
915 	void *page_addr;
916 	struct hv_message *msg;
917 	union hv_synic_event_flags *event;
918 	bool handled = false;
919 
920 	page_addr = hv_context.synic_event_page[cpu];
921 	if (page_addr == NULL)
922 		return;
923 
924 	event = (union hv_synic_event_flags *)page_addr +
925 					 VMBUS_MESSAGE_SINT;
926 	/*
927 	 * Check for events before checking for messages. This is the order
928 	 * in which events and messages are checked in Windows guests on
929 	 * Hyper-V, and the Windows team suggested we do the same.
930 	 */
931 
932 	if ((vmbus_proto_version == VERSION_WS2008) ||
933 		(vmbus_proto_version == VERSION_WIN7)) {
934 
935 		/* Since we are a child, we only need to check bit 0 */
936 		if (sync_test_and_clear_bit(0,
937 			(unsigned long *) &event->flags32[0])) {
938 			handled = true;
939 		}
940 	} else {
941 		/*
942 		 * Our host is win8 or above. The signaling mechanism
943 		 * has changed and we can directly look at the event page.
944 		 * If bit n is set then we have an interrup on the channel
945 		 * whose id is n.
946 		 */
947 		handled = true;
948 	}
949 
950 	if (handled)
951 		tasklet_schedule(hv_context.event_dpc[cpu]);
952 
953 
954 	page_addr = hv_context.synic_message_page[cpu];
955 	msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
956 
957 	/* Check if there are actual msgs to be processed */
958 	if (msg->header.message_type != HVMSG_NONE) {
959 		if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
960 			hv_process_timer_expiration(msg, cpu);
961 		else
962 			tasklet_schedule(hv_context.msg_dpc[cpu]);
963 	}
964 
965 	add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
966 }
967 
968 
969 /*
970  * vmbus_bus_init -Main vmbus driver initialization routine.
971  *
972  * Here, we
973  *	- initialize the vmbus driver context
974  *	- invoke the vmbus hv main init routine
975  *	- retrieve the channel offers
976  */
977 static int vmbus_bus_init(void)
978 {
979 	int ret;
980 
981 	/* Hypervisor initialization...setup hypercall page..etc */
982 	ret = hv_init();
983 	if (ret != 0) {
984 		pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
985 		return ret;
986 	}
987 
988 	ret = bus_register(&hv_bus);
989 	if (ret)
990 		goto err_cleanup;
991 
992 	hv_setup_vmbus_irq(vmbus_isr);
993 
994 	ret = hv_synic_alloc();
995 	if (ret)
996 		goto err_alloc;
997 	/*
998 	 * Initialize the per-cpu interrupt state and
999 	 * connect to the host.
1000 	 */
1001 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv:online",
1002 				hv_synic_init, hv_synic_cleanup);
1003 	if (ret < 0)
1004 		goto err_alloc;
1005 	hyperv_cpuhp_online = ret;
1006 
1007 	ret = vmbus_connect();
1008 	if (ret)
1009 		goto err_connect;
1010 
1011 	/*
1012 	 * Only register if the crash MSRs are available
1013 	 */
1014 	if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
1015 		register_die_notifier(&hyperv_die_block);
1016 		atomic_notifier_chain_register(&panic_notifier_list,
1017 					       &hyperv_panic_block);
1018 	}
1019 
1020 	vmbus_request_offers();
1021 
1022 	return 0;
1023 
1024 err_connect:
1025 	cpuhp_remove_state(hyperv_cpuhp_online);
1026 err_alloc:
1027 	hv_synic_free();
1028 	hv_remove_vmbus_irq();
1029 
1030 	bus_unregister(&hv_bus);
1031 
1032 err_cleanup:
1033 	hv_cleanup(false);
1034 
1035 	return ret;
1036 }
1037 
1038 /**
1039  * __vmbus_child_driver_register() - Register a vmbus's driver
1040  * @hv_driver: Pointer to driver structure you want to register
1041  * @owner: owner module of the drv
1042  * @mod_name: module name string
1043  *
1044  * Registers the given driver with Linux through the 'driver_register()' call
1045  * and sets up the hyper-v vmbus handling for this driver.
1046  * It will return the state of the 'driver_register()' call.
1047  *
1048  */
1049 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
1050 {
1051 	int ret;
1052 
1053 	pr_info("registering driver %s\n", hv_driver->name);
1054 
1055 	ret = vmbus_exists();
1056 	if (ret < 0)
1057 		return ret;
1058 
1059 	hv_driver->driver.name = hv_driver->name;
1060 	hv_driver->driver.owner = owner;
1061 	hv_driver->driver.mod_name = mod_name;
1062 	hv_driver->driver.bus = &hv_bus;
1063 
1064 	spin_lock_init(&hv_driver->dynids.lock);
1065 	INIT_LIST_HEAD(&hv_driver->dynids.list);
1066 
1067 	ret = driver_register(&hv_driver->driver);
1068 
1069 	return ret;
1070 }
1071 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
1072 
1073 /**
1074  * vmbus_driver_unregister() - Unregister a vmbus's driver
1075  * @hv_driver: Pointer to driver structure you want to
1076  *             un-register
1077  *
1078  * Un-register the given driver that was previous registered with a call to
1079  * vmbus_driver_register()
1080  */
1081 void vmbus_driver_unregister(struct hv_driver *hv_driver)
1082 {
1083 	pr_info("unregistering driver %s\n", hv_driver->name);
1084 
1085 	if (!vmbus_exists()) {
1086 		driver_unregister(&hv_driver->driver);
1087 		vmbus_free_dynids(hv_driver);
1088 	}
1089 }
1090 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
1091 
1092 /*
1093  * vmbus_device_create - Creates and registers a new child device
1094  * on the vmbus.
1095  */
1096 struct hv_device *vmbus_device_create(const uuid_le *type,
1097 				      const uuid_le *instance,
1098 				      struct vmbus_channel *channel)
1099 {
1100 	struct hv_device *child_device_obj;
1101 
1102 	child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
1103 	if (!child_device_obj) {
1104 		pr_err("Unable to allocate device object for child device\n");
1105 		return NULL;
1106 	}
1107 
1108 	child_device_obj->channel = channel;
1109 	memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
1110 	memcpy(&child_device_obj->dev_instance, instance,
1111 	       sizeof(uuid_le));
1112 	child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
1113 
1114 
1115 	return child_device_obj;
1116 }
1117 
1118 /*
1119  * vmbus_device_register - Register the child device
1120  */
1121 int vmbus_device_register(struct hv_device *child_device_obj)
1122 {
1123 	int ret = 0;
1124 
1125 	dev_set_name(&child_device_obj->device, "%pUl",
1126 		     child_device_obj->channel->offermsg.offer.if_instance.b);
1127 
1128 	child_device_obj->device.bus = &hv_bus;
1129 	child_device_obj->device.parent = &hv_acpi_dev->dev;
1130 	child_device_obj->device.release = vmbus_device_release;
1131 
1132 	/*
1133 	 * Register with the LDM. This will kick off the driver/device
1134 	 * binding...which will eventually call vmbus_match() and vmbus_probe()
1135 	 */
1136 	ret = device_register(&child_device_obj->device);
1137 
1138 	if (ret)
1139 		pr_err("Unable to register child device\n");
1140 	else
1141 		pr_debug("child device %s registered\n",
1142 			dev_name(&child_device_obj->device));
1143 
1144 	return ret;
1145 }
1146 
1147 /*
1148  * vmbus_device_unregister - Remove the specified child device
1149  * from the vmbus.
1150  */
1151 void vmbus_device_unregister(struct hv_device *device_obj)
1152 {
1153 	pr_debug("child device %s unregistered\n",
1154 		dev_name(&device_obj->device));
1155 
1156 	/*
1157 	 * Kick off the process of unregistering the device.
1158 	 * This will call vmbus_remove() and eventually vmbus_device_release()
1159 	 */
1160 	device_unregister(&device_obj->device);
1161 }
1162 
1163 
1164 /*
1165  * VMBUS is an acpi enumerated device. Get the information we
1166  * need from DSDT.
1167  */
1168 #define VTPM_BASE_ADDRESS 0xfed40000
1169 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
1170 {
1171 	resource_size_t start = 0;
1172 	resource_size_t end = 0;
1173 	struct resource *new_res;
1174 	struct resource **old_res = &hyperv_mmio;
1175 	struct resource **prev_res = NULL;
1176 
1177 	switch (res->type) {
1178 
1179 	/*
1180 	 * "Address" descriptors are for bus windows. Ignore
1181 	 * "memory" descriptors, which are for registers on
1182 	 * devices.
1183 	 */
1184 	case ACPI_RESOURCE_TYPE_ADDRESS32:
1185 		start = res->data.address32.address.minimum;
1186 		end = res->data.address32.address.maximum;
1187 		break;
1188 
1189 	case ACPI_RESOURCE_TYPE_ADDRESS64:
1190 		start = res->data.address64.address.minimum;
1191 		end = res->data.address64.address.maximum;
1192 		break;
1193 
1194 	default:
1195 		/* Unused resource type */
1196 		return AE_OK;
1197 
1198 	}
1199 	/*
1200 	 * Ignore ranges that are below 1MB, as they're not
1201 	 * necessary or useful here.
1202 	 */
1203 	if (end < 0x100000)
1204 		return AE_OK;
1205 
1206 	new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
1207 	if (!new_res)
1208 		return AE_NO_MEMORY;
1209 
1210 	/* If this range overlaps the virtual TPM, truncate it. */
1211 	if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
1212 		end = VTPM_BASE_ADDRESS;
1213 
1214 	new_res->name = "hyperv mmio";
1215 	new_res->flags = IORESOURCE_MEM;
1216 	new_res->start = start;
1217 	new_res->end = end;
1218 
1219 	/*
1220 	 * If two ranges are adjacent, merge them.
1221 	 */
1222 	do {
1223 		if (!*old_res) {
1224 			*old_res = new_res;
1225 			break;
1226 		}
1227 
1228 		if (((*old_res)->end + 1) == new_res->start) {
1229 			(*old_res)->end = new_res->end;
1230 			kfree(new_res);
1231 			break;
1232 		}
1233 
1234 		if ((*old_res)->start == new_res->end + 1) {
1235 			(*old_res)->start = new_res->start;
1236 			kfree(new_res);
1237 			break;
1238 		}
1239 
1240 		if ((*old_res)->start > new_res->end) {
1241 			new_res->sibling = *old_res;
1242 			if (prev_res)
1243 				(*prev_res)->sibling = new_res;
1244 			*old_res = new_res;
1245 			break;
1246 		}
1247 
1248 		prev_res = old_res;
1249 		old_res = &(*old_res)->sibling;
1250 
1251 	} while (1);
1252 
1253 	return AE_OK;
1254 }
1255 
1256 static int vmbus_acpi_remove(struct acpi_device *device)
1257 {
1258 	struct resource *cur_res;
1259 	struct resource *next_res;
1260 
1261 	if (hyperv_mmio) {
1262 		if (fb_mmio) {
1263 			__release_region(hyperv_mmio, fb_mmio->start,
1264 					 resource_size(fb_mmio));
1265 			fb_mmio = NULL;
1266 		}
1267 
1268 		for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
1269 			next_res = cur_res->sibling;
1270 			kfree(cur_res);
1271 		}
1272 	}
1273 
1274 	return 0;
1275 }
1276 
1277 static void vmbus_reserve_fb(void)
1278 {
1279 	int size;
1280 	/*
1281 	 * Make a claim for the frame buffer in the resource tree under the
1282 	 * first node, which will be the one below 4GB.  The length seems to
1283 	 * be underreported, particularly in a Generation 1 VM.  So start out
1284 	 * reserving a larger area and make it smaller until it succeeds.
1285 	 */
1286 
1287 	if (screen_info.lfb_base) {
1288 		if (efi_enabled(EFI_BOOT))
1289 			size = max_t(__u32, screen_info.lfb_size, 0x800000);
1290 		else
1291 			size = max_t(__u32, screen_info.lfb_size, 0x4000000);
1292 
1293 		for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
1294 			fb_mmio = __request_region(hyperv_mmio,
1295 						   screen_info.lfb_base, size,
1296 						   fb_mmio_name, 0);
1297 		}
1298 	}
1299 }
1300 
1301 /**
1302  * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
1303  * @new:		If successful, supplied a pointer to the
1304  *			allocated MMIO space.
1305  * @device_obj:		Identifies the caller
1306  * @min:		Minimum guest physical address of the
1307  *			allocation
1308  * @max:		Maximum guest physical address
1309  * @size:		Size of the range to be allocated
1310  * @align:		Alignment of the range to be allocated
1311  * @fb_overlap_ok:	Whether this allocation can be allowed
1312  *			to overlap the video frame buffer.
1313  *
1314  * This function walks the resources granted to VMBus by the
1315  * _CRS object in the ACPI namespace underneath the parent
1316  * "bridge" whether that's a root PCI bus in the Generation 1
1317  * case or a Module Device in the Generation 2 case.  It then
1318  * attempts to allocate from the global MMIO pool in a way that
1319  * matches the constraints supplied in these parameters and by
1320  * that _CRS.
1321  *
1322  * Return: 0 on success, -errno on failure
1323  */
1324 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1325 			resource_size_t min, resource_size_t max,
1326 			resource_size_t size, resource_size_t align,
1327 			bool fb_overlap_ok)
1328 {
1329 	struct resource *iter, *shadow;
1330 	resource_size_t range_min, range_max, start;
1331 	const char *dev_n = dev_name(&device_obj->device);
1332 	int retval;
1333 
1334 	retval = -ENXIO;
1335 	down(&hyperv_mmio_lock);
1336 
1337 	/*
1338 	 * If overlaps with frame buffers are allowed, then first attempt to
1339 	 * make the allocation from within the reserved region.  Because it
1340 	 * is already reserved, no shadow allocation is necessary.
1341 	 */
1342 	if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
1343 	    !(max < fb_mmio->start)) {
1344 
1345 		range_min = fb_mmio->start;
1346 		range_max = fb_mmio->end;
1347 		start = (range_min + align - 1) & ~(align - 1);
1348 		for (; start + size - 1 <= range_max; start += align) {
1349 			*new = request_mem_region_exclusive(start, size, dev_n);
1350 			if (*new) {
1351 				retval = 0;
1352 				goto exit;
1353 			}
1354 		}
1355 	}
1356 
1357 	for (iter = hyperv_mmio; iter; iter = iter->sibling) {
1358 		if ((iter->start >= max) || (iter->end <= min))
1359 			continue;
1360 
1361 		range_min = iter->start;
1362 		range_max = iter->end;
1363 		start = (range_min + align - 1) & ~(align - 1);
1364 		for (; start + size - 1 <= range_max; start += align) {
1365 			shadow = __request_region(iter, start, size, NULL,
1366 						  IORESOURCE_BUSY);
1367 			if (!shadow)
1368 				continue;
1369 
1370 			*new = request_mem_region_exclusive(start, size, dev_n);
1371 			if (*new) {
1372 				shadow->name = (char *)*new;
1373 				retval = 0;
1374 				goto exit;
1375 			}
1376 
1377 			__release_region(iter, start, size);
1378 		}
1379 	}
1380 
1381 exit:
1382 	up(&hyperv_mmio_lock);
1383 	return retval;
1384 }
1385 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
1386 
1387 /**
1388  * vmbus_free_mmio() - Free a memory-mapped I/O range.
1389  * @start:		Base address of region to release.
1390  * @size:		Size of the range to be allocated
1391  *
1392  * This function releases anything requested by
1393  * vmbus_mmio_allocate().
1394  */
1395 void vmbus_free_mmio(resource_size_t start, resource_size_t size)
1396 {
1397 	struct resource *iter;
1398 
1399 	down(&hyperv_mmio_lock);
1400 	for (iter = hyperv_mmio; iter; iter = iter->sibling) {
1401 		if ((iter->start >= start + size) || (iter->end <= start))
1402 			continue;
1403 
1404 		__release_region(iter, start, size);
1405 	}
1406 	release_mem_region(start, size);
1407 	up(&hyperv_mmio_lock);
1408 
1409 }
1410 EXPORT_SYMBOL_GPL(vmbus_free_mmio);
1411 
1412 /**
1413  * vmbus_cpu_number_to_vp_number() - Map CPU to VP.
1414  * @cpu_number: CPU number in Linux terms
1415  *
1416  * This function returns the mapping between the Linux processor
1417  * number and the hypervisor's virtual processor number, useful
1418  * in making hypercalls and such that talk about specific
1419  * processors.
1420  *
1421  * Return: Virtual processor number in Hyper-V terms
1422  */
1423 int vmbus_cpu_number_to_vp_number(int cpu_number)
1424 {
1425 	return hv_context.vp_index[cpu_number];
1426 }
1427 EXPORT_SYMBOL_GPL(vmbus_cpu_number_to_vp_number);
1428 
1429 static int vmbus_acpi_add(struct acpi_device *device)
1430 {
1431 	acpi_status result;
1432 	int ret_val = -ENODEV;
1433 	struct acpi_device *ancestor;
1434 
1435 	hv_acpi_dev = device;
1436 
1437 	result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
1438 					vmbus_walk_resources, NULL);
1439 
1440 	if (ACPI_FAILURE(result))
1441 		goto acpi_walk_err;
1442 	/*
1443 	 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
1444 	 * firmware) is the VMOD that has the mmio ranges. Get that.
1445 	 */
1446 	for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
1447 		result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
1448 					     vmbus_walk_resources, NULL);
1449 
1450 		if (ACPI_FAILURE(result))
1451 			continue;
1452 		if (hyperv_mmio) {
1453 			vmbus_reserve_fb();
1454 			break;
1455 		}
1456 	}
1457 	ret_val = 0;
1458 
1459 acpi_walk_err:
1460 	complete(&probe_event);
1461 	if (ret_val)
1462 		vmbus_acpi_remove(device);
1463 	return ret_val;
1464 }
1465 
1466 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
1467 	{"VMBUS", 0},
1468 	{"VMBus", 0},
1469 	{"", 0},
1470 };
1471 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
1472 
1473 static struct acpi_driver vmbus_acpi_driver = {
1474 	.name = "vmbus",
1475 	.ids = vmbus_acpi_device_ids,
1476 	.ops = {
1477 		.add = vmbus_acpi_add,
1478 		.remove = vmbus_acpi_remove,
1479 	},
1480 };
1481 
1482 static void hv_kexec_handler(void)
1483 {
1484 	hv_synic_clockevents_cleanup();
1485 	vmbus_initiate_unload(false);
1486 	vmbus_connection.conn_state = DISCONNECTED;
1487 	/* Make sure conn_state is set as hv_synic_cleanup checks for it */
1488 	mb();
1489 	cpuhp_remove_state(hyperv_cpuhp_online);
1490 	hv_cleanup(false);
1491 };
1492 
1493 static void hv_crash_handler(struct pt_regs *regs)
1494 {
1495 	vmbus_initiate_unload(true);
1496 	/*
1497 	 * In crash handler we can't schedule synic cleanup for all CPUs,
1498 	 * doing the cleanup for current CPU only. This should be sufficient
1499 	 * for kdump.
1500 	 */
1501 	vmbus_connection.conn_state = DISCONNECTED;
1502 	hv_synic_cleanup(smp_processor_id());
1503 	hv_cleanup(true);
1504 };
1505 
1506 static int __init hv_acpi_init(void)
1507 {
1508 	int ret, t;
1509 
1510 	if (x86_hyper != &x86_hyper_ms_hyperv)
1511 		return -ENODEV;
1512 
1513 	init_completion(&probe_event);
1514 
1515 	/*
1516 	 * Get ACPI resources first.
1517 	 */
1518 	ret = acpi_bus_register_driver(&vmbus_acpi_driver);
1519 
1520 	if (ret)
1521 		return ret;
1522 
1523 	t = wait_for_completion_timeout(&probe_event, 5*HZ);
1524 	if (t == 0) {
1525 		ret = -ETIMEDOUT;
1526 		goto cleanup;
1527 	}
1528 
1529 	ret = vmbus_bus_init();
1530 	if (ret)
1531 		goto cleanup;
1532 
1533 	hv_setup_kexec_handler(hv_kexec_handler);
1534 	hv_setup_crash_handler(hv_crash_handler);
1535 
1536 	return 0;
1537 
1538 cleanup:
1539 	acpi_bus_unregister_driver(&vmbus_acpi_driver);
1540 	hv_acpi_dev = NULL;
1541 	return ret;
1542 }
1543 
1544 static void __exit vmbus_exit(void)
1545 {
1546 	int cpu;
1547 
1548 	hv_remove_kexec_handler();
1549 	hv_remove_crash_handler();
1550 	vmbus_connection.conn_state = DISCONNECTED;
1551 	hv_synic_clockevents_cleanup();
1552 	vmbus_disconnect();
1553 	hv_remove_vmbus_irq();
1554 	for_each_online_cpu(cpu)
1555 		tasklet_kill(hv_context.msg_dpc[cpu]);
1556 	vmbus_free_channels();
1557 	if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
1558 		unregister_die_notifier(&hyperv_die_block);
1559 		atomic_notifier_chain_unregister(&panic_notifier_list,
1560 						 &hyperv_panic_block);
1561 	}
1562 	bus_unregister(&hv_bus);
1563 	hv_cleanup(false);
1564 	for_each_online_cpu(cpu) {
1565 		tasklet_kill(hv_context.event_dpc[cpu]);
1566 	}
1567 	cpuhp_remove_state(hyperv_cpuhp_online);
1568 	hv_synic_free();
1569 	acpi_bus_unregister_driver(&vmbus_acpi_driver);
1570 }
1571 
1572 
1573 MODULE_LICENSE("GPL");
1574 
1575 subsys_initcall(hv_acpi_init);
1576 module_exit(vmbus_exit);
1577