xref: /openbmc/linux/drivers/hv/channel_mgmt.c (revision 31e67366)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/sched.h>
14 #include <linux/wait.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/completion.h>
20 #include <linux/delay.h>
21 #include <linux/cpu.h>
22 #include <linux/hyperv.h>
23 #include <asm/mshyperv.h>
24 
25 #include "hyperv_vmbus.h"
26 
27 static void init_vp_index(struct vmbus_channel *channel);
28 
29 const struct vmbus_device vmbus_devs[] = {
30 	/* IDE */
31 	{ .dev_type = HV_IDE,
32 	  HV_IDE_GUID,
33 	  .perf_device = true,
34 	  .allowed_in_isolated = false,
35 	},
36 
37 	/* SCSI */
38 	{ .dev_type = HV_SCSI,
39 	  HV_SCSI_GUID,
40 	  .perf_device = true,
41 	  .allowed_in_isolated = true,
42 	},
43 
44 	/* Fibre Channel */
45 	{ .dev_type = HV_FC,
46 	  HV_SYNTHFC_GUID,
47 	  .perf_device = true,
48 	  .allowed_in_isolated = false,
49 	},
50 
51 	/* Synthetic NIC */
52 	{ .dev_type = HV_NIC,
53 	  HV_NIC_GUID,
54 	  .perf_device = true,
55 	  .allowed_in_isolated = true,
56 	},
57 
58 	/* Network Direct */
59 	{ .dev_type = HV_ND,
60 	  HV_ND_GUID,
61 	  .perf_device = true,
62 	  .allowed_in_isolated = false,
63 	},
64 
65 	/* PCIE */
66 	{ .dev_type = HV_PCIE,
67 	  HV_PCIE_GUID,
68 	  .perf_device = false,
69 	  .allowed_in_isolated = false,
70 	},
71 
72 	/* Synthetic Frame Buffer */
73 	{ .dev_type = HV_FB,
74 	  HV_SYNTHVID_GUID,
75 	  .perf_device = false,
76 	  .allowed_in_isolated = false,
77 	},
78 
79 	/* Synthetic Keyboard */
80 	{ .dev_type = HV_KBD,
81 	  HV_KBD_GUID,
82 	  .perf_device = false,
83 	  .allowed_in_isolated = false,
84 	},
85 
86 	/* Synthetic MOUSE */
87 	{ .dev_type = HV_MOUSE,
88 	  HV_MOUSE_GUID,
89 	  .perf_device = false,
90 	  .allowed_in_isolated = false,
91 	},
92 
93 	/* KVP */
94 	{ .dev_type = HV_KVP,
95 	  HV_KVP_GUID,
96 	  .perf_device = false,
97 	  .allowed_in_isolated = false,
98 	},
99 
100 	/* Time Synch */
101 	{ .dev_type = HV_TS,
102 	  HV_TS_GUID,
103 	  .perf_device = false,
104 	  .allowed_in_isolated = true,
105 	},
106 
107 	/* Heartbeat */
108 	{ .dev_type = HV_HB,
109 	  HV_HEART_BEAT_GUID,
110 	  .perf_device = false,
111 	  .allowed_in_isolated = true,
112 	},
113 
114 	/* Shutdown */
115 	{ .dev_type = HV_SHUTDOWN,
116 	  HV_SHUTDOWN_GUID,
117 	  .perf_device = false,
118 	  .allowed_in_isolated = true,
119 	},
120 
121 	/* File copy */
122 	{ .dev_type = HV_FCOPY,
123 	  HV_FCOPY_GUID,
124 	  .perf_device = false,
125 	  .allowed_in_isolated = false,
126 	},
127 
128 	/* Backup */
129 	{ .dev_type = HV_BACKUP,
130 	  HV_VSS_GUID,
131 	  .perf_device = false,
132 	  .allowed_in_isolated = false,
133 	},
134 
135 	/* Dynamic Memory */
136 	{ .dev_type = HV_DM,
137 	  HV_DM_GUID,
138 	  .perf_device = false,
139 	  .allowed_in_isolated = false,
140 	},
141 
142 	/* Unknown GUID */
143 	{ .dev_type = HV_UNKNOWN,
144 	  .perf_device = false,
145 	  .allowed_in_isolated = false,
146 	},
147 };
148 
149 static const struct {
150 	guid_t guid;
151 } vmbus_unsupported_devs[] = {
152 	{ HV_AVMA1_GUID },
153 	{ HV_AVMA2_GUID },
154 	{ HV_RDV_GUID	},
155 };
156 
157 /*
158  * The rescinded channel may be blocked waiting for a response from the host;
159  * take care of that.
160  */
161 static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
162 {
163 	struct vmbus_channel_msginfo *msginfo;
164 	unsigned long flags;
165 
166 
167 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
168 	channel->rescind = true;
169 	list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
170 				msglistentry) {
171 
172 		if (msginfo->waiting_channel == channel) {
173 			complete(&msginfo->waitevent);
174 			break;
175 		}
176 	}
177 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
178 }
179 
180 static bool is_unsupported_vmbus_devs(const guid_t *guid)
181 {
182 	int i;
183 
184 	for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
185 		if (guid_equal(guid, &vmbus_unsupported_devs[i].guid))
186 			return true;
187 	return false;
188 }
189 
190 static u16 hv_get_dev_type(const struct vmbus_channel *channel)
191 {
192 	const guid_t *guid = &channel->offermsg.offer.if_type;
193 	u16 i;
194 
195 	if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
196 		return HV_UNKNOWN;
197 
198 	for (i = HV_IDE; i < HV_UNKNOWN; i++) {
199 		if (guid_equal(guid, &vmbus_devs[i].guid))
200 			return i;
201 	}
202 	pr_info("Unknown GUID: %pUl\n", guid);
203 	return i;
204 }
205 
206 /**
207  * vmbus_prep_negotiate_resp() - Create default response for Negotiate message
208  * @icmsghdrp: Pointer to msg header structure
209  * @buf: Raw buffer channel data
210  * @buflen: Length of the raw buffer channel data.
211  * @fw_version: The framework versions we can support.
212  * @fw_vercnt: The size of @fw_version.
213  * @srv_version: The service versions we can support.
214  * @srv_vercnt: The size of @srv_version.
215  * @nego_fw_version: The selected framework version.
216  * @nego_srv_version: The selected service version.
217  *
218  * Note: Versions are given in decreasing order.
219  *
220  * Set up and fill in default negotiate response message.
221  * Mainly used by Hyper-V drivers.
222  */
223 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
224 				u32 buflen, const int *fw_version, int fw_vercnt,
225 				const int *srv_version, int srv_vercnt,
226 				int *nego_fw_version, int *nego_srv_version)
227 {
228 	int icframe_major, icframe_minor;
229 	int icmsg_major, icmsg_minor;
230 	int fw_major, fw_minor;
231 	int srv_major, srv_minor;
232 	int i, j;
233 	bool found_match = false;
234 	struct icmsg_negotiate *negop;
235 
236 	/* Check that there's enough space for icframe_vercnt, icmsg_vercnt */
237 	if (buflen < ICMSG_HDR + offsetof(struct icmsg_negotiate, reserved)) {
238 		pr_err_ratelimited("Invalid icmsg negotiate\n");
239 		return false;
240 	}
241 
242 	icmsghdrp->icmsgsize = 0x10;
243 	negop = (struct icmsg_negotiate *)&buf[ICMSG_HDR];
244 
245 	icframe_major = negop->icframe_vercnt;
246 	icframe_minor = 0;
247 
248 	icmsg_major = negop->icmsg_vercnt;
249 	icmsg_minor = 0;
250 
251 	/* Validate negop packet */
252 	if (icframe_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT ||
253 	    icmsg_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT ||
254 	    ICMSG_NEGOTIATE_PKT_SIZE(icframe_major, icmsg_major) > buflen) {
255 		pr_err_ratelimited("Invalid icmsg negotiate - icframe_major: %u, icmsg_major: %u\n",
256 				   icframe_major, icmsg_major);
257 		goto fw_error;
258 	}
259 
260 	/*
261 	 * Select the framework version number we will
262 	 * support.
263 	 */
264 
265 	for (i = 0; i < fw_vercnt; i++) {
266 		fw_major = (fw_version[i] >> 16);
267 		fw_minor = (fw_version[i] & 0xFFFF);
268 
269 		for (j = 0; j < negop->icframe_vercnt; j++) {
270 			if ((negop->icversion_data[j].major == fw_major) &&
271 			    (negop->icversion_data[j].minor == fw_minor)) {
272 				icframe_major = negop->icversion_data[j].major;
273 				icframe_minor = negop->icversion_data[j].minor;
274 				found_match = true;
275 				break;
276 			}
277 		}
278 
279 		if (found_match)
280 			break;
281 	}
282 
283 	if (!found_match)
284 		goto fw_error;
285 
286 	found_match = false;
287 
288 	for (i = 0; i < srv_vercnt; i++) {
289 		srv_major = (srv_version[i] >> 16);
290 		srv_minor = (srv_version[i] & 0xFFFF);
291 
292 		for (j = negop->icframe_vercnt;
293 			(j < negop->icframe_vercnt + negop->icmsg_vercnt);
294 			j++) {
295 
296 			if ((negop->icversion_data[j].major == srv_major) &&
297 				(negop->icversion_data[j].minor == srv_minor)) {
298 
299 				icmsg_major = negop->icversion_data[j].major;
300 				icmsg_minor = negop->icversion_data[j].minor;
301 				found_match = true;
302 				break;
303 			}
304 		}
305 
306 		if (found_match)
307 			break;
308 	}
309 
310 	/*
311 	 * Respond with the framework and service
312 	 * version numbers we can support.
313 	 */
314 
315 fw_error:
316 	if (!found_match) {
317 		negop->icframe_vercnt = 0;
318 		negop->icmsg_vercnt = 0;
319 	} else {
320 		negop->icframe_vercnt = 1;
321 		negop->icmsg_vercnt = 1;
322 	}
323 
324 	if (nego_fw_version)
325 		*nego_fw_version = (icframe_major << 16) | icframe_minor;
326 
327 	if (nego_srv_version)
328 		*nego_srv_version = (icmsg_major << 16) | icmsg_minor;
329 
330 	negop->icversion_data[0].major = icframe_major;
331 	negop->icversion_data[0].minor = icframe_minor;
332 	negop->icversion_data[1].major = icmsg_major;
333 	negop->icversion_data[1].minor = icmsg_minor;
334 	return found_match;
335 }
336 
337 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
338 
339 /*
340  * alloc_channel - Allocate and initialize a vmbus channel object
341  */
342 static struct vmbus_channel *alloc_channel(void)
343 {
344 	struct vmbus_channel *channel;
345 
346 	channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
347 	if (!channel)
348 		return NULL;
349 
350 	spin_lock_init(&channel->sched_lock);
351 	init_completion(&channel->rescind_event);
352 
353 	INIT_LIST_HEAD(&channel->sc_list);
354 
355 	tasklet_init(&channel->callback_event,
356 		     vmbus_on_event, (unsigned long)channel);
357 
358 	hv_ringbuffer_pre_init(channel);
359 
360 	return channel;
361 }
362 
363 /*
364  * free_channel - Release the resources used by the vmbus channel object
365  */
366 static void free_channel(struct vmbus_channel *channel)
367 {
368 	tasklet_kill(&channel->callback_event);
369 	vmbus_remove_channel_attr_group(channel);
370 
371 	kobject_put(&channel->kobj);
372 }
373 
374 void vmbus_channel_map_relid(struct vmbus_channel *channel)
375 {
376 	if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
377 		return;
378 	/*
379 	 * The mapping of the channel's relid is visible from the CPUs that
380 	 * execute vmbus_chan_sched() by the time that vmbus_chan_sched() will
381 	 * execute:
382 	 *
383 	 *  (a) In the "normal (i.e., not resuming from hibernation)" path,
384 	 *      the full barrier in smp_store_mb() guarantees that the store
385 	 *      is propagated to all CPUs before the add_channel_work work
386 	 *      is queued.  In turn, add_channel_work is queued before the
387 	 *      channel's ring buffer is allocated/initialized and the
388 	 *      OPENCHANNEL message for the channel is sent in vmbus_open().
389 	 *      Hyper-V won't start sending the interrupts for the channel
390 	 *      before the OPENCHANNEL message is acked.  The memory barrier
391 	 *      in vmbus_chan_sched() -> sync_test_and_clear_bit() ensures
392 	 *      that vmbus_chan_sched() must find the channel's relid in
393 	 *      recv_int_page before retrieving the channel pointer from the
394 	 *      array of channels.
395 	 *
396 	 *  (b) In the "resuming from hibernation" path, the smp_store_mb()
397 	 *      guarantees that the store is propagated to all CPUs before
398 	 *      the VMBus connection is marked as ready for the resume event
399 	 *      (cf. check_ready_for_resume_event()).  The interrupt handler
400 	 *      of the VMBus driver and vmbus_chan_sched() can not run before
401 	 *      vmbus_bus_resume() has completed execution (cf. resume_noirq).
402 	 */
403 	smp_store_mb(
404 		vmbus_connection.channels[channel->offermsg.child_relid],
405 		channel);
406 }
407 
408 void vmbus_channel_unmap_relid(struct vmbus_channel *channel)
409 {
410 	if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
411 		return;
412 	WRITE_ONCE(
413 		vmbus_connection.channels[channel->offermsg.child_relid],
414 		NULL);
415 }
416 
417 static void vmbus_release_relid(u32 relid)
418 {
419 	struct vmbus_channel_relid_released msg;
420 	int ret;
421 
422 	memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
423 	msg.child_relid = relid;
424 	msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
425 	ret = vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
426 			     true);
427 
428 	trace_vmbus_release_relid(&msg, ret);
429 }
430 
431 void hv_process_channel_removal(struct vmbus_channel *channel)
432 {
433 	lockdep_assert_held(&vmbus_connection.channel_mutex);
434 	BUG_ON(!channel->rescind);
435 
436 	/*
437 	 * hv_process_channel_removal() could find INVALID_RELID only for
438 	 * hv_sock channels.  See the inline comments in vmbus_onoffer().
439 	 */
440 	WARN_ON(channel->offermsg.child_relid == INVALID_RELID &&
441 		!is_hvsock_channel(channel));
442 
443 	/*
444 	 * Upon suspend, an in-use hv_sock channel is removed from the array of
445 	 * channels and the relid is invalidated.  After hibernation, when the
446 	 * user-space appplication destroys the channel, it's unnecessary and
447 	 * unsafe to remove the channel from the array of channels.  See also
448 	 * the inline comments before the call of vmbus_release_relid() below.
449 	 */
450 	if (channel->offermsg.child_relid != INVALID_RELID)
451 		vmbus_channel_unmap_relid(channel);
452 
453 	if (channel->primary_channel == NULL)
454 		list_del(&channel->listentry);
455 	else
456 		list_del(&channel->sc_list);
457 
458 	/*
459 	 * If this is a "perf" channel, updates the hv_numa_map[] masks so that
460 	 * init_vp_index() can (re-)use the CPU.
461 	 */
462 	if (hv_is_perf_channel(channel))
463 		hv_clear_alloced_cpu(channel->target_cpu);
464 
465 	/*
466 	 * Upon suspend, an in-use hv_sock channel is marked as "rescinded" and
467 	 * the relid is invalidated; after hibernation, when the user-space app
468 	 * destroys the channel, the relid is INVALID_RELID, and in this case
469 	 * it's unnecessary and unsafe to release the old relid, since the same
470 	 * relid can refer to a completely different channel now.
471 	 */
472 	if (channel->offermsg.child_relid != INVALID_RELID)
473 		vmbus_release_relid(channel->offermsg.child_relid);
474 
475 	free_channel(channel);
476 }
477 
478 void vmbus_free_channels(void)
479 {
480 	struct vmbus_channel *channel, *tmp;
481 
482 	list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
483 		listentry) {
484 		/* hv_process_channel_removal() needs this */
485 		channel->rescind = true;
486 
487 		vmbus_device_unregister(channel->device_obj);
488 	}
489 }
490 
491 /* Note: the function can run concurrently for primary/sub channels. */
492 static void vmbus_add_channel_work(struct work_struct *work)
493 {
494 	struct vmbus_channel *newchannel =
495 		container_of(work, struct vmbus_channel, add_channel_work);
496 	struct vmbus_channel *primary_channel = newchannel->primary_channel;
497 	int ret;
498 
499 	/*
500 	 * This state is used to indicate a successful open
501 	 * so that when we do close the channel normally, we
502 	 * can cleanup properly.
503 	 */
504 	newchannel->state = CHANNEL_OPEN_STATE;
505 
506 	if (primary_channel != NULL) {
507 		/* newchannel is a sub-channel. */
508 		struct hv_device *dev = primary_channel->device_obj;
509 
510 		if (vmbus_add_channel_kobj(dev, newchannel))
511 			goto err_deq_chan;
512 
513 		if (primary_channel->sc_creation_callback != NULL)
514 			primary_channel->sc_creation_callback(newchannel);
515 
516 		newchannel->probe_done = true;
517 		return;
518 	}
519 
520 	/*
521 	 * Start the process of binding the primary channel to the driver
522 	 */
523 	newchannel->device_obj = vmbus_device_create(
524 		&newchannel->offermsg.offer.if_type,
525 		&newchannel->offermsg.offer.if_instance,
526 		newchannel);
527 	if (!newchannel->device_obj)
528 		goto err_deq_chan;
529 
530 	newchannel->device_obj->device_id = newchannel->device_id;
531 	/*
532 	 * Add the new device to the bus. This will kick off device-driver
533 	 * binding which eventually invokes the device driver's AddDevice()
534 	 * method.
535 	 */
536 	ret = vmbus_device_register(newchannel->device_obj);
537 
538 	if (ret != 0) {
539 		pr_err("unable to add child device object (relid %d)\n",
540 			newchannel->offermsg.child_relid);
541 		kfree(newchannel->device_obj);
542 		goto err_deq_chan;
543 	}
544 
545 	newchannel->probe_done = true;
546 	return;
547 
548 err_deq_chan:
549 	mutex_lock(&vmbus_connection.channel_mutex);
550 
551 	/*
552 	 * We need to set the flag, otherwise
553 	 * vmbus_onoffer_rescind() can be blocked.
554 	 */
555 	newchannel->probe_done = true;
556 
557 	if (primary_channel == NULL)
558 		list_del(&newchannel->listentry);
559 	else
560 		list_del(&newchannel->sc_list);
561 
562 	/* vmbus_process_offer() has mapped the channel. */
563 	vmbus_channel_unmap_relid(newchannel);
564 
565 	mutex_unlock(&vmbus_connection.channel_mutex);
566 
567 	vmbus_release_relid(newchannel->offermsg.child_relid);
568 
569 	free_channel(newchannel);
570 }
571 
572 /*
573  * vmbus_process_offer - Process the offer by creating a channel/device
574  * associated with this offer
575  */
576 static void vmbus_process_offer(struct vmbus_channel *newchannel)
577 {
578 	struct vmbus_channel *channel;
579 	struct workqueue_struct *wq;
580 	bool fnew = true;
581 
582 	/*
583 	 * Synchronize vmbus_process_offer() and CPU hotplugging:
584 	 *
585 	 * CPU1				CPU2
586 	 *
587 	 * [vmbus_process_offer()]	[Hot removal of the CPU]
588 	 *
589 	 * CPU_READ_LOCK		CPUS_WRITE_LOCK
590 	 * LOAD cpu_online_mask		SEARCH chn_list
591 	 * STORE target_cpu		LOAD target_cpu
592 	 * INSERT chn_list		STORE cpu_online_mask
593 	 * CPUS_READ_UNLOCK		CPUS_WRITE_UNLOCK
594 	 *
595 	 * Forbids: CPU1's LOAD from *not* seing CPU2's STORE &&
596 	 * 		CPU2's SEARCH from *not* seeing CPU1's INSERT
597 	 *
598 	 * Forbids: CPU2's SEARCH from seeing CPU1's INSERT &&
599 	 * 		CPU2's LOAD from *not* seing CPU1's STORE
600 	 */
601 	cpus_read_lock();
602 
603 	/*
604 	 * Serializes the modifications of the chn_list list as well as
605 	 * the accesses to next_numa_node_id in init_vp_index().
606 	 */
607 	mutex_lock(&vmbus_connection.channel_mutex);
608 
609 	init_vp_index(newchannel);
610 
611 	/* Remember the channels that should be cleaned up upon suspend. */
612 	if (is_hvsock_channel(newchannel) || is_sub_channel(newchannel))
613 		atomic_inc(&vmbus_connection.nr_chan_close_on_suspend);
614 
615 	/*
616 	 * Now that we have acquired the channel_mutex,
617 	 * we can release the potentially racing rescind thread.
618 	 */
619 	atomic_dec(&vmbus_connection.offer_in_progress);
620 
621 	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
622 		if (guid_equal(&channel->offermsg.offer.if_type,
623 			       &newchannel->offermsg.offer.if_type) &&
624 		    guid_equal(&channel->offermsg.offer.if_instance,
625 			       &newchannel->offermsg.offer.if_instance)) {
626 			fnew = false;
627 			break;
628 		}
629 	}
630 
631 	if (fnew) {
632 		list_add_tail(&newchannel->listentry,
633 			      &vmbus_connection.chn_list);
634 	} else {
635 		/*
636 		 * Check to see if this is a valid sub-channel.
637 		 */
638 		if (newchannel->offermsg.offer.sub_channel_index == 0) {
639 			mutex_unlock(&vmbus_connection.channel_mutex);
640 			/*
641 			 * Don't call free_channel(), because newchannel->kobj
642 			 * is not initialized yet.
643 			 */
644 			kfree(newchannel);
645 			WARN_ON_ONCE(1);
646 			return;
647 		}
648 		/*
649 		 * Process the sub-channel.
650 		 */
651 		newchannel->primary_channel = channel;
652 		list_add_tail(&newchannel->sc_list, &channel->sc_list);
653 	}
654 
655 	vmbus_channel_map_relid(newchannel);
656 
657 	mutex_unlock(&vmbus_connection.channel_mutex);
658 	cpus_read_unlock();
659 
660 	/*
661 	 * vmbus_process_offer() mustn't call channel->sc_creation_callback()
662 	 * directly for sub-channels, because sc_creation_callback() ->
663 	 * vmbus_open() may never get the host's response to the
664 	 * OPEN_CHANNEL message (the host may rescind a channel at any time,
665 	 * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind()
666 	 * may not wake up the vmbus_open() as it's blocked due to a non-zero
667 	 * vmbus_connection.offer_in_progress, and finally we have a deadlock.
668 	 *
669 	 * The above is also true for primary channels, if the related device
670 	 * drivers use sync probing mode by default.
671 	 *
672 	 * And, usually the handling of primary channels and sub-channels can
673 	 * depend on each other, so we should offload them to different
674 	 * workqueues to avoid possible deadlock, e.g. in sync-probing mode,
675 	 * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() ->
676 	 * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock
677 	 * and waits for all the sub-channels to appear, but the latter
678 	 * can't get the rtnl_lock and this blocks the handling of
679 	 * sub-channels.
680 	 */
681 	INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
682 	wq = fnew ? vmbus_connection.handle_primary_chan_wq :
683 		    vmbus_connection.handle_sub_chan_wq;
684 	queue_work(wq, &newchannel->add_channel_work);
685 }
686 
687 /*
688  * We use this state to statically distribute the channel interrupt load.
689  */
690 static int next_numa_node_id;
691 
692 /*
693  * Starting with Win8, we can statically distribute the incoming
694  * channel interrupt load by binding a channel to VCPU.
695  *
696  * For pre-win8 hosts or non-performance critical channels we assign the
697  * VMBUS_CONNECT_CPU.
698  *
699  * Starting with win8, performance critical channels will be distributed
700  * evenly among all the available NUMA nodes.  Once the node is assigned,
701  * we will assign the CPU based on a simple round robin scheme.
702  */
703 static void init_vp_index(struct vmbus_channel *channel)
704 {
705 	bool perf_chn = hv_is_perf_channel(channel);
706 	cpumask_var_t available_mask;
707 	struct cpumask *alloced_mask;
708 	u32 target_cpu;
709 	int numa_node;
710 
711 	if ((vmbus_proto_version == VERSION_WS2008) ||
712 	    (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
713 	    !alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
714 		/*
715 		 * Prior to win8, all channel interrupts are
716 		 * delivered on VMBUS_CONNECT_CPU.
717 		 * Also if the channel is not a performance critical
718 		 * channel, bind it to VMBUS_CONNECT_CPU.
719 		 * In case alloc_cpumask_var() fails, bind it to
720 		 * VMBUS_CONNECT_CPU.
721 		 */
722 		channel->target_cpu = VMBUS_CONNECT_CPU;
723 		if (perf_chn)
724 			hv_set_alloced_cpu(VMBUS_CONNECT_CPU);
725 		return;
726 	}
727 
728 	while (true) {
729 		numa_node = next_numa_node_id++;
730 		if (numa_node == nr_node_ids) {
731 			next_numa_node_id = 0;
732 			continue;
733 		}
734 		if (cpumask_empty(cpumask_of_node(numa_node)))
735 			continue;
736 		break;
737 	}
738 	alloced_mask = &hv_context.hv_numa_map[numa_node];
739 
740 	if (cpumask_weight(alloced_mask) ==
741 	    cpumask_weight(cpumask_of_node(numa_node))) {
742 		/*
743 		 * We have cycled through all the CPUs in the node;
744 		 * reset the alloced map.
745 		 */
746 		cpumask_clear(alloced_mask);
747 	}
748 
749 	cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node));
750 
751 	target_cpu = cpumask_first(available_mask);
752 	cpumask_set_cpu(target_cpu, alloced_mask);
753 
754 	channel->target_cpu = target_cpu;
755 
756 	free_cpumask_var(available_mask);
757 }
758 
759 static void vmbus_wait_for_unload(void)
760 {
761 	int cpu;
762 	void *page_addr;
763 	struct hv_message *msg;
764 	struct vmbus_channel_message_header *hdr;
765 	u32 message_type, i;
766 
767 	/*
768 	 * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
769 	 * used for initial contact or to CPU0 depending on host version. When
770 	 * we're crashing on a different CPU let's hope that IRQ handler on
771 	 * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
772 	 * functional and vmbus_unload_response() will complete
773 	 * vmbus_connection.unload_event. If not, the last thing we can do is
774 	 * read message pages for all CPUs directly.
775 	 *
776 	 * Wait no more than 10 seconds so that the panic path can't get
777 	 * hung forever in case the response message isn't seen.
778 	 */
779 	for (i = 0; i < 1000; i++) {
780 		if (completion_done(&vmbus_connection.unload_event))
781 			break;
782 
783 		for_each_online_cpu(cpu) {
784 			struct hv_per_cpu_context *hv_cpu
785 				= per_cpu_ptr(hv_context.cpu_context, cpu);
786 
787 			page_addr = hv_cpu->synic_message_page;
788 			msg = (struct hv_message *)page_addr
789 				+ VMBUS_MESSAGE_SINT;
790 
791 			message_type = READ_ONCE(msg->header.message_type);
792 			if (message_type == HVMSG_NONE)
793 				continue;
794 
795 			hdr = (struct vmbus_channel_message_header *)
796 				msg->u.payload;
797 
798 			if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
799 				complete(&vmbus_connection.unload_event);
800 
801 			vmbus_signal_eom(msg, message_type);
802 		}
803 
804 		mdelay(10);
805 	}
806 
807 	/*
808 	 * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
809 	 * maybe-pending messages on all CPUs to be able to receive new
810 	 * messages after we reconnect.
811 	 */
812 	for_each_online_cpu(cpu) {
813 		struct hv_per_cpu_context *hv_cpu
814 			= per_cpu_ptr(hv_context.cpu_context, cpu);
815 
816 		page_addr = hv_cpu->synic_message_page;
817 		msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
818 		msg->header.message_type = HVMSG_NONE;
819 	}
820 }
821 
822 /*
823  * vmbus_unload_response - Handler for the unload response.
824  */
825 static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
826 {
827 	/*
828 	 * This is a global event; just wakeup the waiting thread.
829 	 * Once we successfully unload, we can cleanup the monitor state.
830 	 */
831 	complete(&vmbus_connection.unload_event);
832 }
833 
834 void vmbus_initiate_unload(bool crash)
835 {
836 	struct vmbus_channel_message_header hdr;
837 
838 	if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED)
839 		return;
840 
841 	/* Pre-Win2012R2 hosts don't support reconnect */
842 	if (vmbus_proto_version < VERSION_WIN8_1)
843 		return;
844 
845 	init_completion(&vmbus_connection.unload_event);
846 	memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
847 	hdr.msgtype = CHANNELMSG_UNLOAD;
848 	vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
849 		       !crash);
850 
851 	/*
852 	 * vmbus_initiate_unload() is also called on crash and the crash can be
853 	 * happening in an interrupt context, where scheduling is impossible.
854 	 */
855 	if (!crash)
856 		wait_for_completion(&vmbus_connection.unload_event);
857 	else
858 		vmbus_wait_for_unload();
859 }
860 
861 static void check_ready_for_resume_event(void)
862 {
863 	/*
864 	 * If all the old primary channels have been fixed up, then it's safe
865 	 * to resume.
866 	 */
867 	if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
868 		complete(&vmbus_connection.ready_for_resume_event);
869 }
870 
871 static void vmbus_setup_channel_state(struct vmbus_channel *channel,
872 				      struct vmbus_channel_offer_channel *offer)
873 {
874 	/*
875 	 * Setup state for signalling the host.
876 	 */
877 	channel->sig_event = VMBUS_EVENT_CONNECTION_ID;
878 
879 	if (vmbus_proto_version != VERSION_WS2008) {
880 		channel->is_dedicated_interrupt =
881 				(offer->is_dedicated_interrupt != 0);
882 		channel->sig_event = offer->connection_id;
883 	}
884 
885 	memcpy(&channel->offermsg, offer,
886 	       sizeof(struct vmbus_channel_offer_channel));
887 	channel->monitor_grp = (u8)offer->monitorid / 32;
888 	channel->monitor_bit = (u8)offer->monitorid % 32;
889 	channel->device_id = hv_get_dev_type(channel);
890 }
891 
892 /*
893  * find_primary_channel_by_offer - Get the channel object given the new offer.
894  * This is only used in the resume path of hibernation.
895  */
896 static struct vmbus_channel *
897 find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
898 {
899 	struct vmbus_channel *channel = NULL, *iter;
900 	const guid_t *inst1, *inst2;
901 
902 	/* Ignore sub-channel offers. */
903 	if (offer->offer.sub_channel_index != 0)
904 		return NULL;
905 
906 	mutex_lock(&vmbus_connection.channel_mutex);
907 
908 	list_for_each_entry(iter, &vmbus_connection.chn_list, listentry) {
909 		inst1 = &iter->offermsg.offer.if_instance;
910 		inst2 = &offer->offer.if_instance;
911 
912 		if (guid_equal(inst1, inst2)) {
913 			channel = iter;
914 			break;
915 		}
916 	}
917 
918 	mutex_unlock(&vmbus_connection.channel_mutex);
919 
920 	return channel;
921 }
922 
923 static bool vmbus_is_valid_device(const guid_t *guid)
924 {
925 	u16 i;
926 
927 	if (!hv_is_isolation_supported())
928 		return true;
929 
930 	for (i = 0; i < ARRAY_SIZE(vmbus_devs); i++) {
931 		if (guid_equal(guid, &vmbus_devs[i].guid))
932 			return vmbus_devs[i].allowed_in_isolated;
933 	}
934 	return false;
935 }
936 
937 /*
938  * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
939  *
940  */
941 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
942 {
943 	struct vmbus_channel_offer_channel *offer;
944 	struct vmbus_channel *oldchannel, *newchannel;
945 	size_t offer_sz;
946 
947 	offer = (struct vmbus_channel_offer_channel *)hdr;
948 
949 	trace_vmbus_onoffer(offer);
950 
951 	if (!vmbus_is_valid_device(&offer->offer.if_type)) {
952 		pr_err_ratelimited("Invalid offer %d from the host supporting isolation\n",
953 				   offer->child_relid);
954 		atomic_dec(&vmbus_connection.offer_in_progress);
955 		return;
956 	}
957 
958 	oldchannel = find_primary_channel_by_offer(offer);
959 
960 	if (oldchannel != NULL) {
961 		/*
962 		 * We're resuming from hibernation: all the sub-channel and
963 		 * hv_sock channels we had before the hibernation should have
964 		 * been cleaned up, and now we must be seeing a re-offered
965 		 * primary channel that we had before the hibernation.
966 		 */
967 
968 		/*
969 		 * { Initially: channel relid = INVALID_RELID,
970 		 *		channels[valid_relid] = NULL }
971 		 *
972 		 * CPU1					CPU2
973 		 *
974 		 * [vmbus_onoffer()]			[vmbus_device_release()]
975 		 *
976 		 * LOCK channel_mutex			LOCK channel_mutex
977 		 * STORE channel relid = valid_relid	LOAD r1 = channel relid
978 		 * MAP_RELID channel			if (r1 != INVALID_RELID)
979 		 * UNLOCK channel_mutex			  UNMAP_RELID channel
980 		 *					UNLOCK channel_mutex
981 		 *
982 		 * Forbids: r1 == valid_relid &&
983 		 * 		channels[valid_relid] == channel
984 		 *
985 		 * Note.  r1 can be INVALID_RELID only for an hv_sock channel.
986 		 * None of the hv_sock channels which were present before the
987 		 * suspend are re-offered upon the resume.  See the WARN_ON()
988 		 * in hv_process_channel_removal().
989 		 */
990 		mutex_lock(&vmbus_connection.channel_mutex);
991 
992 		atomic_dec(&vmbus_connection.offer_in_progress);
993 
994 		WARN_ON(oldchannel->offermsg.child_relid != INVALID_RELID);
995 		/* Fix up the relid. */
996 		oldchannel->offermsg.child_relid = offer->child_relid;
997 
998 		offer_sz = sizeof(*offer);
999 		if (memcmp(offer, &oldchannel->offermsg, offer_sz) != 0) {
1000 			/*
1001 			 * This is not an error, since the host can also change
1002 			 * the other field(s) of the offer, e.g. on WS RS5
1003 			 * (Build 17763), the offer->connection_id of the
1004 			 * Mellanox VF vmbus device can change when the host
1005 			 * reoffers the device upon resume.
1006 			 */
1007 			pr_debug("vmbus offer changed: relid=%d\n",
1008 				 offer->child_relid);
1009 
1010 			print_hex_dump_debug("Old vmbus offer: ",
1011 					     DUMP_PREFIX_OFFSET, 16, 4,
1012 					     &oldchannel->offermsg, offer_sz,
1013 					     false);
1014 			print_hex_dump_debug("New vmbus offer: ",
1015 					     DUMP_PREFIX_OFFSET, 16, 4,
1016 					     offer, offer_sz, false);
1017 
1018 			/* Fix up the old channel. */
1019 			vmbus_setup_channel_state(oldchannel, offer);
1020 		}
1021 
1022 		/* Add the channel back to the array of channels. */
1023 		vmbus_channel_map_relid(oldchannel);
1024 		check_ready_for_resume_event();
1025 
1026 		mutex_unlock(&vmbus_connection.channel_mutex);
1027 		return;
1028 	}
1029 
1030 	/* Allocate the channel object and save this offer. */
1031 	newchannel = alloc_channel();
1032 	if (!newchannel) {
1033 		vmbus_release_relid(offer->child_relid);
1034 		atomic_dec(&vmbus_connection.offer_in_progress);
1035 		pr_err("Unable to allocate channel object\n");
1036 		return;
1037 	}
1038 
1039 	vmbus_setup_channel_state(newchannel, offer);
1040 
1041 	vmbus_process_offer(newchannel);
1042 }
1043 
1044 static void check_ready_for_suspend_event(void)
1045 {
1046 	/*
1047 	 * If all the sub-channels or hv_sock channels have been cleaned up,
1048 	 * then it's safe to suspend.
1049 	 */
1050 	if (atomic_dec_and_test(&vmbus_connection.nr_chan_close_on_suspend))
1051 		complete(&vmbus_connection.ready_for_suspend_event);
1052 }
1053 
1054 /*
1055  * vmbus_onoffer_rescind - Rescind offer handler.
1056  *
1057  * We queue a work item to process this offer synchronously
1058  */
1059 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
1060 {
1061 	struct vmbus_channel_rescind_offer *rescind;
1062 	struct vmbus_channel *channel;
1063 	struct device *dev;
1064 	bool clean_up_chan_for_suspend;
1065 
1066 	rescind = (struct vmbus_channel_rescind_offer *)hdr;
1067 
1068 	trace_vmbus_onoffer_rescind(rescind);
1069 
1070 	/*
1071 	 * The offer msg and the corresponding rescind msg
1072 	 * from the host are guranteed to be ordered -
1073 	 * offer comes in first and then the rescind.
1074 	 * Since we process these events in work elements,
1075 	 * and with preemption, we may end up processing
1076 	 * the events out of order.  We rely on the synchronization
1077 	 * provided by offer_in_progress and by channel_mutex for
1078 	 * ordering these events:
1079 	 *
1080 	 * { Initially: offer_in_progress = 1 }
1081 	 *
1082 	 * CPU1				CPU2
1083 	 *
1084 	 * [vmbus_onoffer()]		[vmbus_onoffer_rescind()]
1085 	 *
1086 	 * LOCK channel_mutex		WAIT_ON offer_in_progress == 0
1087 	 * DECREMENT offer_in_progress	LOCK channel_mutex
1088 	 * STORE channels[]		LOAD channels[]
1089 	 * UNLOCK channel_mutex		UNLOCK channel_mutex
1090 	 *
1091 	 * Forbids: CPU2's LOAD from *not* seeing CPU1's STORE
1092 	 */
1093 
1094 	while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
1095 		/*
1096 		 * We wait here until any channel offer is currently
1097 		 * being processed.
1098 		 */
1099 		msleep(1);
1100 	}
1101 
1102 	mutex_lock(&vmbus_connection.channel_mutex);
1103 	channel = relid2channel(rescind->child_relid);
1104 	if (channel != NULL) {
1105 		/*
1106 		 * Guarantee that no other instance of vmbus_onoffer_rescind()
1107 		 * has got a reference to the channel object.  Synchronize on
1108 		 * &vmbus_connection.channel_mutex.
1109 		 */
1110 		if (channel->rescind_ref) {
1111 			mutex_unlock(&vmbus_connection.channel_mutex);
1112 			return;
1113 		}
1114 		channel->rescind_ref = true;
1115 	}
1116 	mutex_unlock(&vmbus_connection.channel_mutex);
1117 
1118 	if (channel == NULL) {
1119 		/*
1120 		 * We failed in processing the offer message;
1121 		 * we would have cleaned up the relid in that
1122 		 * failure path.
1123 		 */
1124 		return;
1125 	}
1126 
1127 	clean_up_chan_for_suspend = is_hvsock_channel(channel) ||
1128 				    is_sub_channel(channel);
1129 	/*
1130 	 * Before setting channel->rescind in vmbus_rescind_cleanup(), we
1131 	 * should make sure the channel callback is not running any more.
1132 	 */
1133 	vmbus_reset_channel_cb(channel);
1134 
1135 	/*
1136 	 * Now wait for offer handling to complete.
1137 	 */
1138 	vmbus_rescind_cleanup(channel);
1139 	while (READ_ONCE(channel->probe_done) == false) {
1140 		/*
1141 		 * We wait here until any channel offer is currently
1142 		 * being processed.
1143 		 */
1144 		msleep(1);
1145 	}
1146 
1147 	/*
1148 	 * At this point, the rescind handling can proceed safely.
1149 	 */
1150 
1151 	if (channel->device_obj) {
1152 		if (channel->chn_rescind_callback) {
1153 			channel->chn_rescind_callback(channel);
1154 
1155 			if (clean_up_chan_for_suspend)
1156 				check_ready_for_suspend_event();
1157 
1158 			return;
1159 		}
1160 		/*
1161 		 * We will have to unregister this device from the
1162 		 * driver core.
1163 		 */
1164 		dev = get_device(&channel->device_obj->device);
1165 		if (dev) {
1166 			vmbus_device_unregister(channel->device_obj);
1167 			put_device(dev);
1168 		}
1169 	} else if (channel->primary_channel != NULL) {
1170 		/*
1171 		 * Sub-channel is being rescinded. Following is the channel
1172 		 * close sequence when initiated from the driveri (refer to
1173 		 * vmbus_close() for details):
1174 		 * 1. Close all sub-channels first
1175 		 * 2. Then close the primary channel.
1176 		 */
1177 		mutex_lock(&vmbus_connection.channel_mutex);
1178 		if (channel->state == CHANNEL_OPEN_STATE) {
1179 			/*
1180 			 * The channel is currently not open;
1181 			 * it is safe for us to cleanup the channel.
1182 			 */
1183 			hv_process_channel_removal(channel);
1184 		} else {
1185 			complete(&channel->rescind_event);
1186 		}
1187 		mutex_unlock(&vmbus_connection.channel_mutex);
1188 	}
1189 
1190 	/* The "channel" may have been freed. Do not access it any longer. */
1191 
1192 	if (clean_up_chan_for_suspend)
1193 		check_ready_for_suspend_event();
1194 }
1195 
1196 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
1197 {
1198 	BUG_ON(!is_hvsock_channel(channel));
1199 
1200 	/* We always get a rescind msg when a connection is closed. */
1201 	while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
1202 		msleep(1);
1203 
1204 	vmbus_device_unregister(channel->device_obj);
1205 }
1206 EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
1207 
1208 
1209 /*
1210  * vmbus_onoffers_delivered -
1211  * This is invoked when all offers have been delivered.
1212  *
1213  * Nothing to do here.
1214  */
1215 static void vmbus_onoffers_delivered(
1216 			struct vmbus_channel_message_header *hdr)
1217 {
1218 }
1219 
1220 /*
1221  * vmbus_onopen_result - Open result handler.
1222  *
1223  * This is invoked when we received a response to our channel open request.
1224  * Find the matching request, copy the response and signal the requesting
1225  * thread.
1226  */
1227 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
1228 {
1229 	struct vmbus_channel_open_result *result;
1230 	struct vmbus_channel_msginfo *msginfo;
1231 	struct vmbus_channel_message_header *requestheader;
1232 	struct vmbus_channel_open_channel *openmsg;
1233 	unsigned long flags;
1234 
1235 	result = (struct vmbus_channel_open_result *)hdr;
1236 
1237 	trace_vmbus_onopen_result(result);
1238 
1239 	/*
1240 	 * Find the open msg, copy the result and signal/unblock the wait event
1241 	 */
1242 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1243 
1244 	list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1245 				msglistentry) {
1246 		requestheader =
1247 			(struct vmbus_channel_message_header *)msginfo->msg;
1248 
1249 		if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
1250 			openmsg =
1251 			(struct vmbus_channel_open_channel *)msginfo->msg;
1252 			if (openmsg->child_relid == result->child_relid &&
1253 			    openmsg->openid == result->openid) {
1254 				memcpy(&msginfo->response.open_result,
1255 				       result,
1256 				       sizeof(
1257 					struct vmbus_channel_open_result));
1258 				complete(&msginfo->waitevent);
1259 				break;
1260 			}
1261 		}
1262 	}
1263 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1264 }
1265 
1266 /*
1267  * vmbus_ongpadl_created - GPADL created handler.
1268  *
1269  * This is invoked when we received a response to our gpadl create request.
1270  * Find the matching request, copy the response and signal the requesting
1271  * thread.
1272  */
1273 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
1274 {
1275 	struct vmbus_channel_gpadl_created *gpadlcreated;
1276 	struct vmbus_channel_msginfo *msginfo;
1277 	struct vmbus_channel_message_header *requestheader;
1278 	struct vmbus_channel_gpadl_header *gpadlheader;
1279 	unsigned long flags;
1280 
1281 	gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
1282 
1283 	trace_vmbus_ongpadl_created(gpadlcreated);
1284 
1285 	/*
1286 	 * Find the establish msg, copy the result and signal/unblock the wait
1287 	 * event
1288 	 */
1289 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1290 
1291 	list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1292 				msglistentry) {
1293 		requestheader =
1294 			(struct vmbus_channel_message_header *)msginfo->msg;
1295 
1296 		if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
1297 			gpadlheader =
1298 			(struct vmbus_channel_gpadl_header *)requestheader;
1299 
1300 			if ((gpadlcreated->child_relid ==
1301 			     gpadlheader->child_relid) &&
1302 			    (gpadlcreated->gpadl == gpadlheader->gpadl)) {
1303 				memcpy(&msginfo->response.gpadl_created,
1304 				       gpadlcreated,
1305 				       sizeof(
1306 					struct vmbus_channel_gpadl_created));
1307 				complete(&msginfo->waitevent);
1308 				break;
1309 			}
1310 		}
1311 	}
1312 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1313 }
1314 
1315 /*
1316  * vmbus_ongpadl_torndown - GPADL torndown handler.
1317  *
1318  * This is invoked when we received a response to our gpadl teardown request.
1319  * Find the matching request, copy the response and signal the requesting
1320  * thread.
1321  */
1322 static void vmbus_ongpadl_torndown(
1323 			struct vmbus_channel_message_header *hdr)
1324 {
1325 	struct vmbus_channel_gpadl_torndown *gpadl_torndown;
1326 	struct vmbus_channel_msginfo *msginfo;
1327 	struct vmbus_channel_message_header *requestheader;
1328 	struct vmbus_channel_gpadl_teardown *gpadl_teardown;
1329 	unsigned long flags;
1330 
1331 	gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
1332 
1333 	trace_vmbus_ongpadl_torndown(gpadl_torndown);
1334 
1335 	/*
1336 	 * Find the open msg, copy the result and signal/unblock the wait event
1337 	 */
1338 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1339 
1340 	list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1341 				msglistentry) {
1342 		requestheader =
1343 			(struct vmbus_channel_message_header *)msginfo->msg;
1344 
1345 		if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
1346 			gpadl_teardown =
1347 			(struct vmbus_channel_gpadl_teardown *)requestheader;
1348 
1349 			if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
1350 				memcpy(&msginfo->response.gpadl_torndown,
1351 				       gpadl_torndown,
1352 				       sizeof(
1353 					struct vmbus_channel_gpadl_torndown));
1354 				complete(&msginfo->waitevent);
1355 				break;
1356 			}
1357 		}
1358 	}
1359 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1360 }
1361 
1362 /*
1363  * vmbus_onversion_response - Version response handler
1364  *
1365  * This is invoked when we received a response to our initiate contact request.
1366  * Find the matching request, copy the response and signal the requesting
1367  * thread.
1368  */
1369 static void vmbus_onversion_response(
1370 		struct vmbus_channel_message_header *hdr)
1371 {
1372 	struct vmbus_channel_msginfo *msginfo;
1373 	struct vmbus_channel_message_header *requestheader;
1374 	struct vmbus_channel_version_response *version_response;
1375 	unsigned long flags;
1376 
1377 	version_response = (struct vmbus_channel_version_response *)hdr;
1378 
1379 	trace_vmbus_onversion_response(version_response);
1380 
1381 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1382 
1383 	list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1384 				msglistentry) {
1385 		requestheader =
1386 			(struct vmbus_channel_message_header *)msginfo->msg;
1387 
1388 		if (requestheader->msgtype ==
1389 		    CHANNELMSG_INITIATE_CONTACT) {
1390 			memcpy(&msginfo->response.version_response,
1391 			      version_response,
1392 			      sizeof(struct vmbus_channel_version_response));
1393 			complete(&msginfo->waitevent);
1394 		}
1395 	}
1396 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1397 }
1398 
1399 /* Channel message dispatch table */
1400 const struct vmbus_channel_message_table_entry
1401 channel_message_table[CHANNELMSG_COUNT] = {
1402 	{ CHANNELMSG_INVALID,			0, NULL, 0},
1403 	{ CHANNELMSG_OFFERCHANNEL,		0, vmbus_onoffer,
1404 		sizeof(struct vmbus_channel_offer_channel)},
1405 	{ CHANNELMSG_RESCIND_CHANNELOFFER,	0, vmbus_onoffer_rescind,
1406 		sizeof(struct vmbus_channel_rescind_offer) },
1407 	{ CHANNELMSG_REQUESTOFFERS,		0, NULL, 0},
1408 	{ CHANNELMSG_ALLOFFERS_DELIVERED,	1, vmbus_onoffers_delivered, 0},
1409 	{ CHANNELMSG_OPENCHANNEL,		0, NULL, 0},
1410 	{ CHANNELMSG_OPENCHANNEL_RESULT,	1, vmbus_onopen_result,
1411 		sizeof(struct vmbus_channel_open_result)},
1412 	{ CHANNELMSG_CLOSECHANNEL,		0, NULL, 0},
1413 	{ CHANNELMSG_GPADL_HEADER,		0, NULL, 0},
1414 	{ CHANNELMSG_GPADL_BODY,		0, NULL, 0},
1415 	{ CHANNELMSG_GPADL_CREATED,		1, vmbus_ongpadl_created,
1416 		sizeof(struct vmbus_channel_gpadl_created)},
1417 	{ CHANNELMSG_GPADL_TEARDOWN,		0, NULL, 0},
1418 	{ CHANNELMSG_GPADL_TORNDOWN,		1, vmbus_ongpadl_torndown,
1419 		sizeof(struct vmbus_channel_gpadl_torndown) },
1420 	{ CHANNELMSG_RELID_RELEASED,		0, NULL, 0},
1421 	{ CHANNELMSG_INITIATE_CONTACT,		0, NULL, 0},
1422 	{ CHANNELMSG_VERSION_RESPONSE,		1, vmbus_onversion_response,
1423 		sizeof(struct vmbus_channel_version_response)},
1424 	{ CHANNELMSG_UNLOAD,			0, NULL, 0},
1425 	{ CHANNELMSG_UNLOAD_RESPONSE,		1, vmbus_unload_response, 0},
1426 	{ CHANNELMSG_18,			0, NULL, 0},
1427 	{ CHANNELMSG_19,			0, NULL, 0},
1428 	{ CHANNELMSG_20,			0, NULL, 0},
1429 	{ CHANNELMSG_TL_CONNECT_REQUEST,	0, NULL, 0},
1430 	{ CHANNELMSG_MODIFYCHANNEL,		0, NULL, 0},
1431 	{ CHANNELMSG_TL_CONNECT_RESULT,		0, NULL, 0},
1432 };
1433 
1434 /*
1435  * vmbus_onmessage - Handler for channel protocol messages.
1436  *
1437  * This is invoked in the vmbus worker thread context.
1438  */
1439 void vmbus_onmessage(struct vmbus_channel_message_header *hdr)
1440 {
1441 	trace_vmbus_on_message(hdr);
1442 
1443 	/*
1444 	 * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go
1445 	 * out of bound and the message_handler pointer can not be NULL.
1446 	 */
1447 	channel_message_table[hdr->msgtype].message_handler(hdr);
1448 }
1449 
1450 /*
1451  * vmbus_request_offers - Send a request to get all our pending offers.
1452  */
1453 int vmbus_request_offers(void)
1454 {
1455 	struct vmbus_channel_message_header *msg;
1456 	struct vmbus_channel_msginfo *msginfo;
1457 	int ret;
1458 
1459 	msginfo = kmalloc(sizeof(*msginfo) +
1460 			  sizeof(struct vmbus_channel_message_header),
1461 			  GFP_KERNEL);
1462 	if (!msginfo)
1463 		return -ENOMEM;
1464 
1465 	msg = (struct vmbus_channel_message_header *)msginfo->msg;
1466 
1467 	msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1468 
1469 	ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
1470 			     true);
1471 
1472 	trace_vmbus_request_offers(ret);
1473 
1474 	if (ret != 0) {
1475 		pr_err("Unable to request offers - %d\n", ret);
1476 
1477 		goto cleanup;
1478 	}
1479 
1480 cleanup:
1481 	kfree(msginfo);
1482 
1483 	return ret;
1484 }
1485 
1486 static void invoke_sc_cb(struct vmbus_channel *primary_channel)
1487 {
1488 	struct list_head *cur, *tmp;
1489 	struct vmbus_channel *cur_channel;
1490 
1491 	if (primary_channel->sc_creation_callback == NULL)
1492 		return;
1493 
1494 	list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
1495 		cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1496 
1497 		primary_channel->sc_creation_callback(cur_channel);
1498 	}
1499 }
1500 
1501 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1502 				void (*sc_cr_cb)(struct vmbus_channel *new_sc))
1503 {
1504 	primary_channel->sc_creation_callback = sc_cr_cb;
1505 }
1506 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
1507 
1508 bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
1509 {
1510 	bool ret;
1511 
1512 	ret = !list_empty(&primary->sc_list);
1513 
1514 	if (ret) {
1515 		/*
1516 		 * Invoke the callback on sub-channel creation.
1517 		 * This will present a uniform interface to the
1518 		 * clients.
1519 		 */
1520 		invoke_sc_cb(primary);
1521 	}
1522 
1523 	return ret;
1524 }
1525 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
1526 
1527 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1528 		void (*chn_rescind_cb)(struct vmbus_channel *))
1529 {
1530 	channel->chn_rescind_callback = chn_rescind_cb;
1531 }
1532 EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);
1533