xref: /openbmc/linux/drivers/net/hyperv/netvsc.c (revision 06ba8020)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/wait.h>
14 #include <linux/mm.h>
15 #include <linux/delay.h>
16 #include <linux/io.h>
17 #include <linux/slab.h>
18 #include <linux/netdevice.h>
19 #include <linux/if_ether.h>
20 #include <linux/vmalloc.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/prefetch.h>
23 #include <linux/filter.h>
24 
25 #include <asm/sync_bitops.h>
26 #include <asm/mshyperv.h>
27 
28 #include "hyperv_net.h"
29 #include "netvsc_trace.h"
30 
31 /*
32  * Switch the data path from the synthetic interface to the VF
33  * interface.
34  */
35 int netvsc_switch_datapath(struct net_device *ndev, bool vf)
36 {
37 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
38 	struct hv_device *dev = net_device_ctx->device_ctx;
39 	struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
40 	struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
41 	int ret, retry = 0;
42 
43 	/* Block sending traffic to VF if it's about to be gone */
44 	if (!vf)
45 		net_device_ctx->data_path_is_vf = vf;
46 
47 	memset(init_pkt, 0, sizeof(struct nvsp_message));
48 	init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
49 	if (vf)
50 		init_pkt->msg.v4_msg.active_dp.active_datapath =
51 			NVSP_DATAPATH_VF;
52 	else
53 		init_pkt->msg.v4_msg.active_dp.active_datapath =
54 			NVSP_DATAPATH_SYNTHETIC;
55 
56 again:
57 	trace_nvsp_send(ndev, init_pkt);
58 
59 	ret = vmbus_sendpacket(dev->channel, init_pkt,
60 			       sizeof(struct nvsp_message),
61 			       (unsigned long)init_pkt, VM_PKT_DATA_INBAND,
62 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
63 
64 	/* If failed to switch to/from VF, let data_path_is_vf stay false,
65 	 * so we use synthetic path to send data.
66 	 */
67 	if (ret) {
68 		if (ret != -EAGAIN) {
69 			netdev_err(ndev,
70 				   "Unable to send sw datapath msg, err: %d\n",
71 				   ret);
72 			return ret;
73 		}
74 
75 		if (retry++ < RETRY_MAX) {
76 			usleep_range(RETRY_US_LO, RETRY_US_HI);
77 			goto again;
78 		} else {
79 			netdev_err(
80 				ndev,
81 				"Retry failed to send sw datapath msg, err: %d\n",
82 				ret);
83 			return ret;
84 		}
85 	}
86 
87 	wait_for_completion(&nv_dev->channel_init_wait);
88 	net_device_ctx->data_path_is_vf = vf;
89 
90 	return 0;
91 }
92 
93 /* Worker to setup sub channels on initial setup
94  * Initial hotplug event occurs in softirq context
95  * and can't wait for channels.
96  */
97 static void netvsc_subchan_work(struct work_struct *w)
98 {
99 	struct netvsc_device *nvdev =
100 		container_of(w, struct netvsc_device, subchan_work);
101 	struct rndis_device *rdev;
102 	int i, ret;
103 
104 	/* Avoid deadlock with device removal already under RTNL */
105 	if (!rtnl_trylock()) {
106 		schedule_work(w);
107 		return;
108 	}
109 
110 	rdev = nvdev->extension;
111 	if (rdev) {
112 		ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
113 		if (ret == 0) {
114 			netif_device_attach(rdev->ndev);
115 		} else {
116 			/* fallback to only primary channel */
117 			for (i = 1; i < nvdev->num_chn; i++)
118 				netif_napi_del(&nvdev->chan_table[i].napi);
119 
120 			nvdev->max_chn = 1;
121 			nvdev->num_chn = 1;
122 		}
123 	}
124 
125 	rtnl_unlock();
126 }
127 
128 static struct netvsc_device *alloc_net_device(void)
129 {
130 	struct netvsc_device *net_device;
131 
132 	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
133 	if (!net_device)
134 		return NULL;
135 
136 	init_waitqueue_head(&net_device->wait_drain);
137 	net_device->destroy = false;
138 	net_device->tx_disable = true;
139 
140 	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
141 	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
142 
143 	init_completion(&net_device->channel_init_wait);
144 	init_waitqueue_head(&net_device->subchan_open);
145 	INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
146 
147 	return net_device;
148 }
149 
150 static void free_netvsc_device(struct rcu_head *head)
151 {
152 	struct netvsc_device *nvdev
153 		= container_of(head, struct netvsc_device, rcu);
154 	int i;
155 
156 	kfree(nvdev->extension);
157 	vfree(nvdev->recv_buf);
158 	vfree(nvdev->send_buf);
159 	bitmap_free(nvdev->send_section_map);
160 
161 	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
162 		xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
163 		kfree(nvdev->chan_table[i].recv_buf);
164 		vfree(nvdev->chan_table[i].mrc.slots);
165 	}
166 
167 	kfree(nvdev);
168 }
169 
170 static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
171 {
172 	call_rcu(&nvdev->rcu, free_netvsc_device);
173 }
174 
175 static void netvsc_revoke_recv_buf(struct hv_device *device,
176 				   struct netvsc_device *net_device,
177 				   struct net_device *ndev)
178 {
179 	struct nvsp_message *revoke_packet;
180 	int ret;
181 
182 	/*
183 	 * If we got a section count, it means we received a
184 	 * SendReceiveBufferComplete msg (ie sent
185 	 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
186 	 * to send a revoke msg here
187 	 */
188 	if (net_device->recv_section_cnt) {
189 		/* Send the revoke receive buffer */
190 		revoke_packet = &net_device->revoke_packet;
191 		memset(revoke_packet, 0, sizeof(struct nvsp_message));
192 
193 		revoke_packet->hdr.msg_type =
194 			NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
195 		revoke_packet->msg.v1_msg.
196 		revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
197 
198 		trace_nvsp_send(ndev, revoke_packet);
199 
200 		ret = vmbus_sendpacket(device->channel,
201 				       revoke_packet,
202 				       sizeof(struct nvsp_message),
203 				       VMBUS_RQST_ID_NO_RESPONSE,
204 				       VM_PKT_DATA_INBAND, 0);
205 		/* If the failure is because the channel is rescinded;
206 		 * ignore the failure since we cannot send on a rescinded
207 		 * channel. This would allow us to properly cleanup
208 		 * even when the channel is rescinded.
209 		 */
210 		if (device->channel->rescind)
211 			ret = 0;
212 		/*
213 		 * If we failed here, we might as well return and
214 		 * have a leak rather than continue and a bugchk
215 		 */
216 		if (ret != 0) {
217 			netdev_err(ndev, "unable to send "
218 				"revoke receive buffer to netvsp\n");
219 			return;
220 		}
221 		net_device->recv_section_cnt = 0;
222 	}
223 }
224 
225 static void netvsc_revoke_send_buf(struct hv_device *device,
226 				   struct netvsc_device *net_device,
227 				   struct net_device *ndev)
228 {
229 	struct nvsp_message *revoke_packet;
230 	int ret;
231 
232 	/* Deal with the send buffer we may have setup.
233 	 * If we got a  send section size, it means we received a
234 	 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
235 	 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
236 	 * to send a revoke msg here
237 	 */
238 	if (net_device->send_section_cnt) {
239 		/* Send the revoke receive buffer */
240 		revoke_packet = &net_device->revoke_packet;
241 		memset(revoke_packet, 0, sizeof(struct nvsp_message));
242 
243 		revoke_packet->hdr.msg_type =
244 			NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
245 		revoke_packet->msg.v1_msg.revoke_send_buf.id =
246 			NETVSC_SEND_BUFFER_ID;
247 
248 		trace_nvsp_send(ndev, revoke_packet);
249 
250 		ret = vmbus_sendpacket(device->channel,
251 				       revoke_packet,
252 				       sizeof(struct nvsp_message),
253 				       VMBUS_RQST_ID_NO_RESPONSE,
254 				       VM_PKT_DATA_INBAND, 0);
255 
256 		/* If the failure is because the channel is rescinded;
257 		 * ignore the failure since we cannot send on a rescinded
258 		 * channel. This would allow us to properly cleanup
259 		 * even when the channel is rescinded.
260 		 */
261 		if (device->channel->rescind)
262 			ret = 0;
263 
264 		/* If we failed here, we might as well return and
265 		 * have a leak rather than continue and a bugchk
266 		 */
267 		if (ret != 0) {
268 			netdev_err(ndev, "unable to send "
269 				   "revoke send buffer to netvsp\n");
270 			return;
271 		}
272 		net_device->send_section_cnt = 0;
273 	}
274 }
275 
276 static void netvsc_teardown_recv_gpadl(struct hv_device *device,
277 				       struct netvsc_device *net_device,
278 				       struct net_device *ndev)
279 {
280 	int ret;
281 
282 	if (net_device->recv_buf_gpadl_handle.gpadl_handle) {
283 		ret = vmbus_teardown_gpadl(device->channel,
284 					   &net_device->recv_buf_gpadl_handle);
285 
286 		/* If we failed here, we might as well return and have a leak
287 		 * rather than continue and a bugchk
288 		 */
289 		if (ret != 0) {
290 			netdev_err(ndev,
291 				   "unable to teardown receive buffer's gpadl\n");
292 			return;
293 		}
294 	}
295 }
296 
297 static void netvsc_teardown_send_gpadl(struct hv_device *device,
298 				       struct netvsc_device *net_device,
299 				       struct net_device *ndev)
300 {
301 	int ret;
302 
303 	if (net_device->send_buf_gpadl_handle.gpadl_handle) {
304 		ret = vmbus_teardown_gpadl(device->channel,
305 					   &net_device->send_buf_gpadl_handle);
306 
307 		/* If we failed here, we might as well return and have a leak
308 		 * rather than continue and a bugchk
309 		 */
310 		if (ret != 0) {
311 			netdev_err(ndev,
312 				   "unable to teardown send buffer's gpadl\n");
313 			return;
314 		}
315 	}
316 }
317 
318 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
319 {
320 	struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
321 	int node = cpu_to_node(nvchan->channel->target_cpu);
322 	size_t size;
323 
324 	size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
325 	nvchan->mrc.slots = vzalloc_node(size, node);
326 	if (!nvchan->mrc.slots)
327 		nvchan->mrc.slots = vzalloc(size);
328 
329 	return nvchan->mrc.slots ? 0 : -ENOMEM;
330 }
331 
332 static int netvsc_init_buf(struct hv_device *device,
333 			   struct netvsc_device *net_device,
334 			   const struct netvsc_device_info *device_info)
335 {
336 	struct nvsp_1_message_send_receive_buffer_complete *resp;
337 	struct net_device *ndev = hv_get_drvdata(device);
338 	struct nvsp_message *init_packet;
339 	unsigned int buf_size;
340 	int i, ret = 0;
341 
342 	/* Get receive buffer area. */
343 	buf_size = device_info->recv_sections * device_info->recv_section_size;
344 	buf_size = roundup(buf_size, PAGE_SIZE);
345 
346 	/* Legacy hosts only allow smaller receive buffer */
347 	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
348 		buf_size = min_t(unsigned int, buf_size,
349 				 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
350 
351 	net_device->recv_buf = vzalloc(buf_size);
352 	if (!net_device->recv_buf) {
353 		netdev_err(ndev,
354 			   "unable to allocate receive buffer of size %u\n",
355 			   buf_size);
356 		ret = -ENOMEM;
357 		goto cleanup;
358 	}
359 
360 	net_device->recv_buf_size = buf_size;
361 
362 	/*
363 	 * Establish the gpadl handle for this buffer on this
364 	 * channel.  Note: This call uses the vmbus connection rather
365 	 * than the channel to establish the gpadl handle.
366 	 */
367 	ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
368 				    buf_size,
369 				    &net_device->recv_buf_gpadl_handle);
370 	if (ret != 0) {
371 		netdev_err(ndev,
372 			"unable to establish receive buffer's gpadl\n");
373 		goto cleanup;
374 	}
375 
376 	/* Notify the NetVsp of the gpadl handle */
377 	init_packet = &net_device->channel_init_pkt;
378 	memset(init_packet, 0, sizeof(struct nvsp_message));
379 	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
380 	init_packet->msg.v1_msg.send_recv_buf.
381 		gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle;
382 	init_packet->msg.v1_msg.
383 		send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
384 
385 	trace_nvsp_send(ndev, init_packet);
386 
387 	/* Send the gpadl notification request */
388 	ret = vmbus_sendpacket(device->channel, init_packet,
389 			       sizeof(struct nvsp_message),
390 			       (unsigned long)init_packet,
391 			       VM_PKT_DATA_INBAND,
392 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
393 	if (ret != 0) {
394 		netdev_err(ndev,
395 			"unable to send receive buffer's gpadl to netvsp\n");
396 		goto cleanup;
397 	}
398 
399 	wait_for_completion(&net_device->channel_init_wait);
400 
401 	/* Check the response */
402 	resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
403 	if (resp->status != NVSP_STAT_SUCCESS) {
404 		netdev_err(ndev,
405 			   "Unable to complete receive buffer initialization with NetVsp - status %d\n",
406 			   resp->status);
407 		ret = -EINVAL;
408 		goto cleanup;
409 	}
410 
411 	/* Parse the response */
412 	netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
413 		   resp->num_sections, resp->sections[0].sub_alloc_size,
414 		   resp->sections[0].num_sub_allocs);
415 
416 	/* There should only be one section for the entire receive buffer */
417 	if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
418 		ret = -EINVAL;
419 		goto cleanup;
420 	}
421 
422 	net_device->recv_section_size = resp->sections[0].sub_alloc_size;
423 	net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
424 
425 	/* Ensure buffer will not overflow */
426 	if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
427 	    (u64)net_device->recv_section_cnt > (u64)buf_size) {
428 		netdev_err(ndev, "invalid recv_section_size %u\n",
429 			   net_device->recv_section_size);
430 		ret = -EINVAL;
431 		goto cleanup;
432 	}
433 
434 	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
435 		struct netvsc_channel *nvchan = &net_device->chan_table[i];
436 
437 		nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
438 		if (nvchan->recv_buf == NULL) {
439 			ret = -ENOMEM;
440 			goto cleanup;
441 		}
442 	}
443 
444 	/* Setup receive completion ring.
445 	 * Add 1 to the recv_section_cnt because at least one entry in a
446 	 * ring buffer has to be empty.
447 	 */
448 	net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
449 	ret = netvsc_alloc_recv_comp_ring(net_device, 0);
450 	if (ret)
451 		goto cleanup;
452 
453 	/* Now setup the send buffer. */
454 	buf_size = device_info->send_sections * device_info->send_section_size;
455 	buf_size = round_up(buf_size, PAGE_SIZE);
456 
457 	net_device->send_buf = vzalloc(buf_size);
458 	if (!net_device->send_buf) {
459 		netdev_err(ndev, "unable to allocate send buffer of size %u\n",
460 			   buf_size);
461 		ret = -ENOMEM;
462 		goto cleanup;
463 	}
464 	net_device->send_buf_size = buf_size;
465 
466 	/* Establish the gpadl handle for this buffer on this
467 	 * channel.  Note: This call uses the vmbus connection rather
468 	 * than the channel to establish the gpadl handle.
469 	 */
470 	ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
471 				    buf_size,
472 				    &net_device->send_buf_gpadl_handle);
473 	if (ret != 0) {
474 		netdev_err(ndev,
475 			   "unable to establish send buffer's gpadl\n");
476 		goto cleanup;
477 	}
478 
479 	/* Notify the NetVsp of the gpadl handle */
480 	init_packet = &net_device->channel_init_pkt;
481 	memset(init_packet, 0, sizeof(struct nvsp_message));
482 	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
483 	init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
484 		net_device->send_buf_gpadl_handle.gpadl_handle;
485 	init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
486 
487 	trace_nvsp_send(ndev, init_packet);
488 
489 	/* Send the gpadl notification request */
490 	ret = vmbus_sendpacket(device->channel, init_packet,
491 			       sizeof(struct nvsp_message),
492 			       (unsigned long)init_packet,
493 			       VM_PKT_DATA_INBAND,
494 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
495 	if (ret != 0) {
496 		netdev_err(ndev,
497 			   "unable to send send buffer's gpadl to netvsp\n");
498 		goto cleanup;
499 	}
500 
501 	wait_for_completion(&net_device->channel_init_wait);
502 
503 	/* Check the response */
504 	if (init_packet->msg.v1_msg.
505 	    send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
506 		netdev_err(ndev, "Unable to complete send buffer "
507 			   "initialization with NetVsp - status %d\n",
508 			   init_packet->msg.v1_msg.
509 			   send_send_buf_complete.status);
510 		ret = -EINVAL;
511 		goto cleanup;
512 	}
513 
514 	/* Parse the response */
515 	net_device->send_section_size = init_packet->msg.
516 				v1_msg.send_send_buf_complete.section_size;
517 	if (net_device->send_section_size < NETVSC_MTU_MIN) {
518 		netdev_err(ndev, "invalid send_section_size %u\n",
519 			   net_device->send_section_size);
520 		ret = -EINVAL;
521 		goto cleanup;
522 	}
523 
524 	/* Section count is simply the size divided by the section size. */
525 	net_device->send_section_cnt = buf_size / net_device->send_section_size;
526 
527 	netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
528 		   net_device->send_section_size, net_device->send_section_cnt);
529 
530 	/* Setup state for managing the send buffer. */
531 	net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt,
532 						     GFP_KERNEL);
533 	if (!net_device->send_section_map) {
534 		ret = -ENOMEM;
535 		goto cleanup;
536 	}
537 
538 	goto exit;
539 
540 cleanup:
541 	netvsc_revoke_recv_buf(device, net_device, ndev);
542 	netvsc_revoke_send_buf(device, net_device, ndev);
543 	netvsc_teardown_recv_gpadl(device, net_device, ndev);
544 	netvsc_teardown_send_gpadl(device, net_device, ndev);
545 
546 exit:
547 	return ret;
548 }
549 
550 /* Negotiate NVSP protocol version */
551 static int negotiate_nvsp_ver(struct hv_device *device,
552 			      struct netvsc_device *net_device,
553 			      struct nvsp_message *init_packet,
554 			      u32 nvsp_ver)
555 {
556 	struct net_device *ndev = hv_get_drvdata(device);
557 	int ret;
558 
559 	memset(init_packet, 0, sizeof(struct nvsp_message));
560 	init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
561 	init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
562 	init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
563 	trace_nvsp_send(ndev, init_packet);
564 
565 	/* Send the init request */
566 	ret = vmbus_sendpacket(device->channel, init_packet,
567 			       sizeof(struct nvsp_message),
568 			       (unsigned long)init_packet,
569 			       VM_PKT_DATA_INBAND,
570 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
571 
572 	if (ret != 0)
573 		return ret;
574 
575 	wait_for_completion(&net_device->channel_init_wait);
576 
577 	if (init_packet->msg.init_msg.init_complete.status !=
578 	    NVSP_STAT_SUCCESS)
579 		return -EINVAL;
580 
581 	if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
582 		return 0;
583 
584 	/* NVSPv2 or later: Send NDIS config */
585 	memset(init_packet, 0, sizeof(struct nvsp_message));
586 	init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
587 	init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
588 	init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
589 
590 	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
591 		if (hv_is_isolation_supported())
592 			netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n");
593 		else
594 			init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
595 
596 		/* Teaming bit is needed to receive link speed updates */
597 		init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
598 	}
599 
600 	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
601 		init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
602 
603 	trace_nvsp_send(ndev, init_packet);
604 
605 	ret = vmbus_sendpacket(device->channel, init_packet,
606 				sizeof(struct nvsp_message),
607 				VMBUS_RQST_ID_NO_RESPONSE,
608 				VM_PKT_DATA_INBAND, 0);
609 
610 	return ret;
611 }
612 
613 static int netvsc_connect_vsp(struct hv_device *device,
614 			      struct netvsc_device *net_device,
615 			      const struct netvsc_device_info *device_info)
616 {
617 	struct net_device *ndev = hv_get_drvdata(device);
618 	static const u32 ver_list[] = {
619 		NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
620 		NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
621 		NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
622 	};
623 	struct nvsp_message *init_packet;
624 	int ndis_version, i, ret;
625 
626 	init_packet = &net_device->channel_init_pkt;
627 
628 	/* Negotiate the latest NVSP protocol supported */
629 	for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
630 		if (negotiate_nvsp_ver(device, net_device, init_packet,
631 				       ver_list[i])  == 0) {
632 			net_device->nvsp_version = ver_list[i];
633 			break;
634 		}
635 
636 	if (i < 0) {
637 		ret = -EPROTO;
638 		goto cleanup;
639 	}
640 
641 	if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) {
642 		netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n",
643 			   net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61);
644 		ret = -EPROTO;
645 		goto cleanup;
646 	}
647 
648 	pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
649 
650 	/* Send the ndis version */
651 	memset(init_packet, 0, sizeof(struct nvsp_message));
652 
653 	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
654 		ndis_version = 0x00060001;
655 	else
656 		ndis_version = 0x0006001e;
657 
658 	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
659 	init_packet->msg.v1_msg.
660 		send_ndis_ver.ndis_major_ver =
661 				(ndis_version & 0xFFFF0000) >> 16;
662 	init_packet->msg.v1_msg.
663 		send_ndis_ver.ndis_minor_ver =
664 				ndis_version & 0xFFFF;
665 
666 	trace_nvsp_send(ndev, init_packet);
667 
668 	/* Send the init request */
669 	ret = vmbus_sendpacket(device->channel, init_packet,
670 				sizeof(struct nvsp_message),
671 				VMBUS_RQST_ID_NO_RESPONSE,
672 				VM_PKT_DATA_INBAND, 0);
673 	if (ret != 0)
674 		goto cleanup;
675 
676 
677 	ret = netvsc_init_buf(device, net_device, device_info);
678 
679 cleanup:
680 	return ret;
681 }
682 
683 /*
684  * netvsc_device_remove - Callback when the root bus device is removed
685  */
686 void netvsc_device_remove(struct hv_device *device)
687 {
688 	struct net_device *ndev = hv_get_drvdata(device);
689 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
690 	struct netvsc_device *net_device
691 		= rtnl_dereference(net_device_ctx->nvdev);
692 	int i;
693 
694 	/*
695 	 * Revoke receive buffer. If host is pre-Win2016 then tear down
696 	 * receive buffer GPADL. Do the same for send buffer.
697 	 */
698 	netvsc_revoke_recv_buf(device, net_device, ndev);
699 	if (vmbus_proto_version < VERSION_WIN10)
700 		netvsc_teardown_recv_gpadl(device, net_device, ndev);
701 
702 	netvsc_revoke_send_buf(device, net_device, ndev);
703 	if (vmbus_proto_version < VERSION_WIN10)
704 		netvsc_teardown_send_gpadl(device, net_device, ndev);
705 
706 	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
707 
708 	/* Disable NAPI and disassociate its context from the device. */
709 	for (i = 0; i < net_device->num_chn; i++) {
710 		/* See also vmbus_reset_channel_cb(). */
711 		napi_disable(&net_device->chan_table[i].napi);
712 		netif_napi_del(&net_device->chan_table[i].napi);
713 	}
714 
715 	/*
716 	 * At this point, no one should be accessing net_device
717 	 * except in here
718 	 */
719 	netdev_dbg(ndev, "net device safe to remove\n");
720 
721 	/* Now, we can close the channel safely */
722 	vmbus_close(device->channel);
723 
724 	/*
725 	 * If host is Win2016 or higher then we do the GPADL tear down
726 	 * here after VMBus is closed.
727 	*/
728 	if (vmbus_proto_version >= VERSION_WIN10) {
729 		netvsc_teardown_recv_gpadl(device, net_device, ndev);
730 		netvsc_teardown_send_gpadl(device, net_device, ndev);
731 	}
732 
733 	/* Release all resources */
734 	free_netvsc_device_rcu(net_device);
735 }
736 
737 #define RING_AVAIL_PERCENT_HIWATER 20
738 #define RING_AVAIL_PERCENT_LOWATER 10
739 
740 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
741 					 u32 index)
742 {
743 	sync_change_bit(index, net_device->send_section_map);
744 }
745 
746 static void netvsc_send_tx_complete(struct net_device *ndev,
747 				    struct netvsc_device *net_device,
748 				    struct vmbus_channel *channel,
749 				    const struct vmpacket_descriptor *desc,
750 				    int budget)
751 {
752 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
753 	struct sk_buff *skb;
754 	u16 q_idx = 0;
755 	int queue_sends;
756 	u64 cmd_rqst;
757 
758 	cmd_rqst = channel->request_addr_callback(channel, desc->trans_id);
759 	if (cmd_rqst == VMBUS_RQST_ERROR) {
760 		netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
761 		return;
762 	}
763 
764 	skb = (struct sk_buff *)(unsigned long)cmd_rqst;
765 
766 	/* Notify the layer above us */
767 	if (likely(skb)) {
768 		struct hv_netvsc_packet *packet
769 			= (struct hv_netvsc_packet *)skb->cb;
770 		u32 send_index = packet->send_buf_index;
771 		struct netvsc_stats_tx *tx_stats;
772 
773 		if (send_index != NETVSC_INVALID_INDEX)
774 			netvsc_free_send_slot(net_device, send_index);
775 		q_idx = packet->q_idx;
776 
777 		tx_stats = &net_device->chan_table[q_idx].tx_stats;
778 
779 		u64_stats_update_begin(&tx_stats->syncp);
780 		tx_stats->packets += packet->total_packets;
781 		tx_stats->bytes += packet->total_bytes;
782 		u64_stats_update_end(&tx_stats->syncp);
783 
784 		netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
785 		napi_consume_skb(skb, budget);
786 	}
787 
788 	queue_sends =
789 		atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
790 
791 	if (unlikely(net_device->destroy)) {
792 		if (queue_sends == 0)
793 			wake_up(&net_device->wait_drain);
794 	} else {
795 		struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
796 
797 		if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
798 		    (hv_get_avail_to_write_percent(&channel->outbound) >
799 		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
800 			netif_tx_wake_queue(txq);
801 			ndev_ctx->eth_stats.wake_queue++;
802 		}
803 	}
804 }
805 
806 static void netvsc_send_completion(struct net_device *ndev,
807 				   struct netvsc_device *net_device,
808 				   struct vmbus_channel *incoming_channel,
809 				   const struct vmpacket_descriptor *desc,
810 				   int budget)
811 {
812 	const struct nvsp_message *nvsp_packet;
813 	u32 msglen = hv_pkt_datalen(desc);
814 	struct nvsp_message *pkt_rqst;
815 	u64 cmd_rqst;
816 	u32 status;
817 
818 	/* First check if this is a VMBUS completion without data payload */
819 	if (!msglen) {
820 		cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
821 								   desc->trans_id);
822 		if (cmd_rqst == VMBUS_RQST_ERROR) {
823 			netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
824 			return;
825 		}
826 
827 		pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
828 		switch (pkt_rqst->hdr.msg_type) {
829 		case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
830 			complete(&net_device->channel_init_wait);
831 			break;
832 
833 		default:
834 			netdev_err(ndev, "Unexpected VMBUS completion!!\n");
835 		}
836 		return;
837 	}
838 
839 	/* Ensure packet is big enough to read header fields */
840 	if (msglen < sizeof(struct nvsp_message_header)) {
841 		netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
842 		return;
843 	}
844 
845 	nvsp_packet = hv_pkt_data(desc);
846 	switch (nvsp_packet->hdr.msg_type) {
847 	case NVSP_MSG_TYPE_INIT_COMPLETE:
848 		if (msglen < sizeof(struct nvsp_message_header) +
849 				sizeof(struct nvsp_message_init_complete)) {
850 			netdev_err(ndev, "nvsp_msg length too small: %u\n",
851 				   msglen);
852 			return;
853 		}
854 		fallthrough;
855 
856 	case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
857 		if (msglen < sizeof(struct nvsp_message_header) +
858 				sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
859 			netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
860 				   msglen);
861 			return;
862 		}
863 		fallthrough;
864 
865 	case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
866 		if (msglen < sizeof(struct nvsp_message_header) +
867 				sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
868 			netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
869 				   msglen);
870 			return;
871 		}
872 		fallthrough;
873 
874 	case NVSP_MSG5_TYPE_SUBCHANNEL:
875 		if (msglen < sizeof(struct nvsp_message_header) +
876 				sizeof(struct nvsp_5_subchannel_complete)) {
877 			netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
878 				   msglen);
879 			return;
880 		}
881 		/* Copy the response back */
882 		memcpy(&net_device->channel_init_pkt, nvsp_packet,
883 		       sizeof(struct nvsp_message));
884 		complete(&net_device->channel_init_wait);
885 		break;
886 
887 	case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
888 		if (msglen < sizeof(struct nvsp_message_header) +
889 		    sizeof(struct nvsp_1_message_send_rndis_packet_complete)) {
890 			if (net_ratelimit())
891 				netdev_err(ndev, "nvsp_rndis_pkt_complete length too small: %u\n",
892 					   msglen);
893 			return;
894 		}
895 
896 		/* If status indicates an error, output a message so we know
897 		 * there's a problem. But process the completion anyway so the
898 		 * resources are released.
899 		 */
900 		status = nvsp_packet->msg.v1_msg.send_rndis_pkt_complete.status;
901 		if (status != NVSP_STAT_SUCCESS && net_ratelimit())
902 			netdev_err(ndev, "nvsp_rndis_pkt_complete error status: %x\n",
903 				   status);
904 
905 		netvsc_send_tx_complete(ndev, net_device, incoming_channel,
906 					desc, budget);
907 		break;
908 
909 	default:
910 		netdev_err(ndev,
911 			   "Unknown send completion type %d received!!\n",
912 			   nvsp_packet->hdr.msg_type);
913 	}
914 }
915 
916 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
917 {
918 	unsigned long *map_addr = net_device->send_section_map;
919 	unsigned int i;
920 
921 	for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
922 		if (sync_test_and_set_bit(i, map_addr) == 0)
923 			return i;
924 	}
925 
926 	return NETVSC_INVALID_INDEX;
927 }
928 
929 static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
930 				    unsigned int section_index,
931 				    u32 pend_size,
932 				    struct hv_netvsc_packet *packet,
933 				    struct rndis_message *rndis_msg,
934 				    struct hv_page_buffer *pb,
935 				    bool xmit_more)
936 {
937 	char *start = net_device->send_buf;
938 	char *dest = start + (section_index * net_device->send_section_size)
939 		     + pend_size;
940 	int i;
941 	u32 padding = 0;
942 	u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
943 		packet->page_buf_cnt;
944 	u32 remain;
945 
946 	/* Add padding */
947 	remain = packet->total_data_buflen & (net_device->pkt_align - 1);
948 	if (xmit_more && remain) {
949 		padding = net_device->pkt_align - remain;
950 		rndis_msg->msg_len += padding;
951 		packet->total_data_buflen += padding;
952 	}
953 
954 	for (i = 0; i < page_count; i++) {
955 		char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
956 		u32 offset = pb[i].offset;
957 		u32 len = pb[i].len;
958 
959 		memcpy(dest, (src + offset), len);
960 		dest += len;
961 	}
962 
963 	if (padding)
964 		memset(dest, 0, padding);
965 }
966 
967 void netvsc_dma_unmap(struct hv_device *hv_dev,
968 		      struct hv_netvsc_packet *packet)
969 {
970 	int i;
971 
972 	if (!hv_is_isolation_supported())
973 		return;
974 
975 	if (!packet->dma_range)
976 		return;
977 
978 	for (i = 0; i < packet->page_buf_cnt; i++)
979 		dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
980 				 packet->dma_range[i].mapping_size,
981 				 DMA_TO_DEVICE);
982 
983 	kfree(packet->dma_range);
984 }
985 
986 /* netvsc_dma_map - Map swiotlb bounce buffer with data page of
987  * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation
988  * VM.
989  *
990  * In isolation VM, netvsc send buffer has been marked visible to
991  * host and so the data copied to send buffer doesn't need to use
992  * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer()
993  * may not be copied to send buffer and so these pages need to be
994  * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do
995  * that. The pfns in the struct hv_page_buffer need to be converted
996  * to bounce buffer's pfn. The loop here is necessary because the
997  * entries in the page buffer array are not necessarily full
998  * pages of data.  Each entry in the array has a separate offset and
999  * len that may be non-zero, even for entries in the middle of the
1000  * array.  And the entries are not physically contiguous.  So each
1001  * entry must be individually mapped rather than as a contiguous unit.
1002  * So not use dma_map_sg() here.
1003  */
1004 static int netvsc_dma_map(struct hv_device *hv_dev,
1005 			  struct hv_netvsc_packet *packet,
1006 			  struct hv_page_buffer *pb)
1007 {
1008 	u32 page_count = packet->page_buf_cnt;
1009 	dma_addr_t dma;
1010 	int i;
1011 
1012 	if (!hv_is_isolation_supported())
1013 		return 0;
1014 
1015 	packet->dma_range = kcalloc(page_count,
1016 				    sizeof(*packet->dma_range),
1017 				    GFP_ATOMIC);
1018 	if (!packet->dma_range)
1019 		return -ENOMEM;
1020 
1021 	for (i = 0; i < page_count; i++) {
1022 		char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
1023 					 + pb[i].offset);
1024 		u32 len = pb[i].len;
1025 
1026 		dma = dma_map_single(&hv_dev->device, src, len,
1027 				     DMA_TO_DEVICE);
1028 		if (dma_mapping_error(&hv_dev->device, dma)) {
1029 			kfree(packet->dma_range);
1030 			return -ENOMEM;
1031 		}
1032 
1033 		/* pb[].offset and pb[].len are not changed during dma mapping
1034 		 * and so not reassign.
1035 		 */
1036 		packet->dma_range[i].dma = dma;
1037 		packet->dma_range[i].mapping_size = len;
1038 		pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
1039 	}
1040 
1041 	return 0;
1042 }
1043 
1044 static inline int netvsc_send_pkt(
1045 	struct hv_device *device,
1046 	struct hv_netvsc_packet *packet,
1047 	struct netvsc_device *net_device,
1048 	struct hv_page_buffer *pb,
1049 	struct sk_buff *skb)
1050 {
1051 	struct nvsp_message nvmsg;
1052 	struct nvsp_1_message_send_rndis_packet *rpkt =
1053 		&nvmsg.msg.v1_msg.send_rndis_pkt;
1054 	struct netvsc_channel * const nvchan =
1055 		&net_device->chan_table[packet->q_idx];
1056 	struct vmbus_channel *out_channel = nvchan->channel;
1057 	struct net_device *ndev = hv_get_drvdata(device);
1058 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1059 	struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
1060 	u64 req_id;
1061 	int ret;
1062 	u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
1063 
1064 	memset(&nvmsg, 0, sizeof(struct nvsp_message));
1065 	nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
1066 	if (skb)
1067 		rpkt->channel_type = 0;		/* 0 is RMC_DATA */
1068 	else
1069 		rpkt->channel_type = 1;		/* 1 is RMC_CONTROL */
1070 
1071 	rpkt->send_buf_section_index = packet->send_buf_index;
1072 	if (packet->send_buf_index == NETVSC_INVALID_INDEX)
1073 		rpkt->send_buf_section_size = 0;
1074 	else
1075 		rpkt->send_buf_section_size = packet->total_data_buflen;
1076 
1077 	req_id = (ulong)skb;
1078 
1079 	if (out_channel->rescind)
1080 		return -ENODEV;
1081 
1082 	trace_nvsp_send_pkt(ndev, out_channel, rpkt);
1083 
1084 	packet->dma_range = NULL;
1085 	if (packet->page_buf_cnt) {
1086 		if (packet->cp_partial)
1087 			pb += packet->rmsg_pgcnt;
1088 
1089 		ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
1090 		if (ret) {
1091 			ret = -EAGAIN;
1092 			goto exit;
1093 		}
1094 
1095 		ret = vmbus_sendpacket_pagebuffer(out_channel,
1096 						  pb, packet->page_buf_cnt,
1097 						  &nvmsg, sizeof(nvmsg),
1098 						  req_id);
1099 
1100 		if (ret)
1101 			netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
1102 	} else {
1103 		ret = vmbus_sendpacket(out_channel,
1104 				       &nvmsg, sizeof(nvmsg),
1105 				       req_id, VM_PKT_DATA_INBAND,
1106 				       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1107 	}
1108 
1109 exit:
1110 	if (ret == 0) {
1111 		atomic_inc_return(&nvchan->queue_sends);
1112 
1113 		if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
1114 			netif_tx_stop_queue(txq);
1115 			ndev_ctx->eth_stats.stop_queue++;
1116 		}
1117 	} else if (ret == -EAGAIN) {
1118 		netif_tx_stop_queue(txq);
1119 		ndev_ctx->eth_stats.stop_queue++;
1120 	} else {
1121 		netdev_err(ndev,
1122 			   "Unable to send packet pages %u len %u, ret %d\n",
1123 			   packet->page_buf_cnt, packet->total_data_buflen,
1124 			   ret);
1125 	}
1126 
1127 	if (netif_tx_queue_stopped(txq) &&
1128 	    atomic_read(&nvchan->queue_sends) < 1 &&
1129 	    !net_device->tx_disable) {
1130 		netif_tx_wake_queue(txq);
1131 		ndev_ctx->eth_stats.wake_queue++;
1132 		if (ret == -EAGAIN)
1133 			ret = -ENOSPC;
1134 	}
1135 
1136 	return ret;
1137 }
1138 
1139 /* Move packet out of multi send data (msd), and clear msd */
1140 static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
1141 				struct sk_buff **msd_skb,
1142 				struct multi_send_data *msdp)
1143 {
1144 	*msd_skb = msdp->skb;
1145 	*msd_send = msdp->pkt;
1146 	msdp->skb = NULL;
1147 	msdp->pkt = NULL;
1148 	msdp->count = 0;
1149 }
1150 
1151 /* RCU already held by caller */
1152 /* Batching/bouncing logic is designed to attempt to optimize
1153  * performance.
1154  *
1155  * For small, non-LSO packets we copy the packet to a send buffer
1156  * which is pre-registered with the Hyper-V side. This enables the
1157  * hypervisor to avoid remapping the aperture to access the packet
1158  * descriptor and data.
1159  *
1160  * If we already started using a buffer and the netdev is transmitting
1161  * a burst of packets, keep on copying into the buffer until it is
1162  * full or we are done collecting a burst. If there is an existing
1163  * buffer with space for the RNDIS descriptor but not the packet, copy
1164  * the RNDIS descriptor to the buffer, keeping the packet in place.
1165  *
1166  * If we do batching and send more than one packet using a single
1167  * NetVSC message, free the SKBs of the packets copied, except for the
1168  * last packet. This is done to streamline the handling of the case
1169  * where the last packet only had the RNDIS descriptor copied to the
1170  * send buffer, with the data pointers included in the NetVSC message.
1171  */
1172 int netvsc_send(struct net_device *ndev,
1173 		struct hv_netvsc_packet *packet,
1174 		struct rndis_message *rndis_msg,
1175 		struct hv_page_buffer *pb,
1176 		struct sk_buff *skb,
1177 		bool xdp_tx)
1178 {
1179 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1180 	struct netvsc_device *net_device
1181 		= rcu_dereference_bh(ndev_ctx->nvdev);
1182 	struct hv_device *device = ndev_ctx->device_ctx;
1183 	int ret = 0;
1184 	struct netvsc_channel *nvchan;
1185 	u32 pktlen = packet->total_data_buflen, msd_len = 0;
1186 	unsigned int section_index = NETVSC_INVALID_INDEX;
1187 	struct multi_send_data *msdp;
1188 	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
1189 	struct sk_buff *msd_skb = NULL;
1190 	bool try_batch, xmit_more;
1191 
1192 	/* If device is rescinded, return error and packet will get dropped. */
1193 	if (unlikely(!net_device || net_device->destroy))
1194 		return -ENODEV;
1195 
1196 	nvchan = &net_device->chan_table[packet->q_idx];
1197 	packet->send_buf_index = NETVSC_INVALID_INDEX;
1198 	packet->cp_partial = false;
1199 
1200 	/* Send a control message or XDP packet directly without accessing
1201 	 * msd (Multi-Send Data) field which may be changed during data packet
1202 	 * processing.
1203 	 */
1204 	if (!skb || xdp_tx)
1205 		return netvsc_send_pkt(device, packet, net_device, pb, skb);
1206 
1207 	/* batch packets in send buffer if possible */
1208 	msdp = &nvchan->msd;
1209 	if (msdp->pkt)
1210 		msd_len = msdp->pkt->total_data_buflen;
1211 
1212 	try_batch =  msd_len > 0 && msdp->count < net_device->max_pkt;
1213 	if (try_batch && msd_len + pktlen + net_device->pkt_align <
1214 	    net_device->send_section_size) {
1215 		section_index = msdp->pkt->send_buf_index;
1216 
1217 	} else if (try_batch && msd_len + packet->rmsg_size <
1218 		   net_device->send_section_size) {
1219 		section_index = msdp->pkt->send_buf_index;
1220 		packet->cp_partial = true;
1221 
1222 	} else if (pktlen + net_device->pkt_align <
1223 		   net_device->send_section_size) {
1224 		section_index = netvsc_get_next_send_section(net_device);
1225 		if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
1226 			++ndev_ctx->eth_stats.tx_send_full;
1227 		} else {
1228 			move_pkt_msd(&msd_send, &msd_skb, msdp);
1229 			msd_len = 0;
1230 		}
1231 	}
1232 
1233 	/* Keep aggregating only if stack says more data is coming
1234 	 * and not doing mixed modes send and not flow blocked
1235 	 */
1236 	xmit_more = netdev_xmit_more() &&
1237 		!packet->cp_partial &&
1238 		!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
1239 
1240 	if (section_index != NETVSC_INVALID_INDEX) {
1241 		netvsc_copy_to_send_buf(net_device,
1242 					section_index, msd_len,
1243 					packet, rndis_msg, pb, xmit_more);
1244 
1245 		packet->send_buf_index = section_index;
1246 
1247 		if (packet->cp_partial) {
1248 			packet->page_buf_cnt -= packet->rmsg_pgcnt;
1249 			packet->total_data_buflen = msd_len + packet->rmsg_size;
1250 		} else {
1251 			packet->page_buf_cnt = 0;
1252 			packet->total_data_buflen += msd_len;
1253 		}
1254 
1255 		if (msdp->pkt) {
1256 			packet->total_packets += msdp->pkt->total_packets;
1257 			packet->total_bytes += msdp->pkt->total_bytes;
1258 		}
1259 
1260 		if (msdp->skb)
1261 			dev_consume_skb_any(msdp->skb);
1262 
1263 		if (xmit_more) {
1264 			msdp->skb = skb;
1265 			msdp->pkt = packet;
1266 			msdp->count++;
1267 		} else {
1268 			cur_send = packet;
1269 			msdp->skb = NULL;
1270 			msdp->pkt = NULL;
1271 			msdp->count = 0;
1272 		}
1273 	} else {
1274 		move_pkt_msd(&msd_send, &msd_skb, msdp);
1275 		cur_send = packet;
1276 	}
1277 
1278 	if (msd_send) {
1279 		int m_ret = netvsc_send_pkt(device, msd_send, net_device,
1280 					    NULL, msd_skb);
1281 
1282 		if (m_ret != 0) {
1283 			netvsc_free_send_slot(net_device,
1284 					      msd_send->send_buf_index);
1285 			dev_kfree_skb_any(msd_skb);
1286 		}
1287 	}
1288 
1289 	if (cur_send)
1290 		ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1291 
1292 	if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1293 		netvsc_free_send_slot(net_device, section_index);
1294 
1295 	return ret;
1296 }
1297 
1298 /* Send pending recv completions */
1299 static int send_recv_completions(struct net_device *ndev,
1300 				 struct netvsc_device *nvdev,
1301 				 struct netvsc_channel *nvchan)
1302 {
1303 	struct multi_recv_comp *mrc = &nvchan->mrc;
1304 	struct recv_comp_msg {
1305 		struct nvsp_message_header hdr;
1306 		u32 status;
1307 	}  __packed;
1308 	struct recv_comp_msg msg = {
1309 		.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
1310 	};
1311 	int ret;
1312 
1313 	while (mrc->first != mrc->next) {
1314 		const struct recv_comp_data *rcd
1315 			= mrc->slots + mrc->first;
1316 
1317 		msg.status = rcd->status;
1318 		ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
1319 				       rcd->tid, VM_PKT_COMP, 0);
1320 		if (unlikely(ret)) {
1321 			struct net_device_context *ndev_ctx = netdev_priv(ndev);
1322 
1323 			++ndev_ctx->eth_stats.rx_comp_busy;
1324 			return ret;
1325 		}
1326 
1327 		if (++mrc->first == nvdev->recv_completion_cnt)
1328 			mrc->first = 0;
1329 	}
1330 
1331 	/* receive completion ring has been emptied */
1332 	if (unlikely(nvdev->destroy))
1333 		wake_up(&nvdev->wait_drain);
1334 
1335 	return 0;
1336 }
1337 
1338 /* Count how many receive completions are outstanding */
1339 static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
1340 				 const struct multi_recv_comp *mrc,
1341 				 u32 *filled, u32 *avail)
1342 {
1343 	u32 count = nvdev->recv_completion_cnt;
1344 
1345 	if (mrc->next >= mrc->first)
1346 		*filled = mrc->next - mrc->first;
1347 	else
1348 		*filled = (count - mrc->first) + mrc->next;
1349 
1350 	*avail = count - *filled - 1;
1351 }
1352 
1353 /* Add receive complete to ring to send to host. */
1354 static void enq_receive_complete(struct net_device *ndev,
1355 				 struct netvsc_device *nvdev, u16 q_idx,
1356 				 u64 tid, u32 status)
1357 {
1358 	struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1359 	struct multi_recv_comp *mrc = &nvchan->mrc;
1360 	struct recv_comp_data *rcd;
1361 	u32 filled, avail;
1362 
1363 	recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1364 
1365 	if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1366 		send_recv_completions(ndev, nvdev, nvchan);
1367 		recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1368 	}
1369 
1370 	if (unlikely(!avail)) {
1371 		netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1372 			   q_idx, tid);
1373 		return;
1374 	}
1375 
1376 	rcd = mrc->slots + mrc->next;
1377 	rcd->tid = tid;
1378 	rcd->status = status;
1379 
1380 	if (++mrc->next == nvdev->recv_completion_cnt)
1381 		mrc->next = 0;
1382 }
1383 
1384 static int netvsc_receive(struct net_device *ndev,
1385 			  struct netvsc_device *net_device,
1386 			  struct netvsc_channel *nvchan,
1387 			  const struct vmpacket_descriptor *desc)
1388 {
1389 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1390 	struct vmbus_channel *channel = nvchan->channel;
1391 	const struct vmtransfer_page_packet_header *vmxferpage_packet
1392 		= container_of(desc, const struct vmtransfer_page_packet_header, d);
1393 	const struct nvsp_message *nvsp = hv_pkt_data(desc);
1394 	u32 msglen = hv_pkt_datalen(desc);
1395 	u16 q_idx = channel->offermsg.offer.sub_channel_index;
1396 	char *recv_buf = net_device->recv_buf;
1397 	u32 status = NVSP_STAT_SUCCESS;
1398 	int i;
1399 	int count = 0;
1400 
1401 	/* Ensure packet is big enough to read header fields */
1402 	if (msglen < sizeof(struct nvsp_message_header)) {
1403 		netif_err(net_device_ctx, rx_err, ndev,
1404 			  "invalid nvsp header, length too small: %u\n",
1405 			  msglen);
1406 		return 0;
1407 	}
1408 
1409 	/* Make sure this is a valid nvsp packet */
1410 	if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1411 		netif_err(net_device_ctx, rx_err, ndev,
1412 			  "Unknown nvsp packet type received %u\n",
1413 			  nvsp->hdr.msg_type);
1414 		return 0;
1415 	}
1416 
1417 	/* Validate xfer page pkt header */
1418 	if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
1419 		netif_err(net_device_ctx, rx_err, ndev,
1420 			  "Invalid xfer page pkt, offset too small: %u\n",
1421 			  desc->offset8 << 3);
1422 		return 0;
1423 	}
1424 
1425 	if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1426 		netif_err(net_device_ctx, rx_err, ndev,
1427 			  "Invalid xfer page set id - expecting %x got %x\n",
1428 			  NETVSC_RECEIVE_BUFFER_ID,
1429 			  vmxferpage_packet->xfer_pageset_id);
1430 		return 0;
1431 	}
1432 
1433 	count = vmxferpage_packet->range_cnt;
1434 
1435 	/* Check count for a valid value */
1436 	if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
1437 		netif_err(net_device_ctx, rx_err, ndev,
1438 			  "Range count is not valid: %d\n",
1439 			  count);
1440 		return 0;
1441 	}
1442 
1443 	/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1444 	for (i = 0; i < count; i++) {
1445 		u32 offset = vmxferpage_packet->ranges[i].byte_offset;
1446 		u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1447 		void *data;
1448 		int ret;
1449 
1450 		if (unlikely(offset > net_device->recv_buf_size ||
1451 			     buflen > net_device->recv_buf_size - offset)) {
1452 			nvchan->rsc.cnt = 0;
1453 			status = NVSP_STAT_FAIL;
1454 			netif_err(net_device_ctx, rx_err, ndev,
1455 				  "Packet offset:%u + len:%u too big\n",
1456 				  offset, buflen);
1457 
1458 			continue;
1459 		}
1460 
1461 		/* We're going to copy (sections of) the packet into nvchan->recv_buf;
1462 		 * make sure that nvchan->recv_buf is large enough to hold the packet.
1463 		 */
1464 		if (unlikely(buflen > net_device->recv_section_size)) {
1465 			nvchan->rsc.cnt = 0;
1466 			status = NVSP_STAT_FAIL;
1467 			netif_err(net_device_ctx, rx_err, ndev,
1468 				  "Packet too big: buflen=%u recv_section_size=%u\n",
1469 				  buflen, net_device->recv_section_size);
1470 
1471 			continue;
1472 		}
1473 
1474 		data = recv_buf + offset;
1475 
1476 		nvchan->rsc.is_last = (i == count - 1);
1477 
1478 		trace_rndis_recv(ndev, q_idx, data);
1479 
1480 		/* Pass it to the upper layer */
1481 		ret = rndis_filter_receive(ndev, net_device,
1482 					   nvchan, data, buflen);
1483 
1484 		if (unlikely(ret != NVSP_STAT_SUCCESS)) {
1485 			/* Drop incomplete packet */
1486 			nvchan->rsc.cnt = 0;
1487 			status = NVSP_STAT_FAIL;
1488 		}
1489 	}
1490 
1491 	enq_receive_complete(ndev, net_device, q_idx,
1492 			     vmxferpage_packet->d.trans_id, status);
1493 
1494 	return count;
1495 }
1496 
1497 static void netvsc_send_table(struct net_device *ndev,
1498 			      struct netvsc_device *nvscdev,
1499 			      const struct nvsp_message *nvmsg,
1500 			      u32 msglen)
1501 {
1502 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1503 	u32 count, offset, *tab;
1504 	int i;
1505 
1506 	/* Ensure packet is big enough to read send_table fields */
1507 	if (msglen < sizeof(struct nvsp_message_header) +
1508 		     sizeof(struct nvsp_5_send_indirect_table)) {
1509 		netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
1510 		return;
1511 	}
1512 
1513 	count = nvmsg->msg.v5_msg.send_table.count;
1514 	offset = nvmsg->msg.v5_msg.send_table.offset;
1515 
1516 	if (count != VRSS_SEND_TAB_SIZE) {
1517 		netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1518 		return;
1519 	}
1520 
1521 	/* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
1522 	 * wrong due to a host bug. So fix the offset here.
1523 	 */
1524 	if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
1525 	    msglen >= sizeof(struct nvsp_message_header) +
1526 	    sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
1527 		offset = sizeof(struct nvsp_message_header) +
1528 			 sizeof(union nvsp_6_message_uber);
1529 
1530 	/* Boundary check for all versions */
1531 	if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
1532 		netdev_err(ndev, "Received send-table offset too big:%u\n",
1533 			   offset);
1534 		return;
1535 	}
1536 
1537 	tab = (void *)nvmsg + offset;
1538 
1539 	for (i = 0; i < count; i++)
1540 		net_device_ctx->tx_table[i] = tab[i];
1541 }
1542 
1543 static void netvsc_send_vf(struct net_device *ndev,
1544 			   const struct nvsp_message *nvmsg,
1545 			   u32 msglen)
1546 {
1547 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1548 
1549 	/* Ensure packet is big enough to read its fields */
1550 	if (msglen < sizeof(struct nvsp_message_header) +
1551 		     sizeof(struct nvsp_4_send_vf_association)) {
1552 		netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
1553 		return;
1554 	}
1555 
1556 	net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1557 	net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1558 
1559 	if (net_device_ctx->vf_alloc)
1560 		complete(&net_device_ctx->vf_add);
1561 
1562 	netdev_info(ndev, "VF slot %u %s\n",
1563 		    net_device_ctx->vf_serial,
1564 		    net_device_ctx->vf_alloc ? "added" : "removed");
1565 }
1566 
1567 static void netvsc_receive_inband(struct net_device *ndev,
1568 				  struct netvsc_device *nvscdev,
1569 				  const struct vmpacket_descriptor *desc)
1570 {
1571 	const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1572 	u32 msglen = hv_pkt_datalen(desc);
1573 
1574 	/* Ensure packet is big enough to read header fields */
1575 	if (msglen < sizeof(struct nvsp_message_header)) {
1576 		netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
1577 		return;
1578 	}
1579 
1580 	switch (nvmsg->hdr.msg_type) {
1581 	case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1582 		netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
1583 		break;
1584 
1585 	case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1586 		if (hv_is_isolation_supported())
1587 			netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n");
1588 		else
1589 			netvsc_send_vf(ndev, nvmsg, msglen);
1590 		break;
1591 	}
1592 }
1593 
1594 static int netvsc_process_raw_pkt(struct hv_device *device,
1595 				  struct netvsc_channel *nvchan,
1596 				  struct netvsc_device *net_device,
1597 				  struct net_device *ndev,
1598 				  const struct vmpacket_descriptor *desc,
1599 				  int budget)
1600 {
1601 	struct vmbus_channel *channel = nvchan->channel;
1602 	const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1603 
1604 	trace_nvsp_recv(ndev, channel, nvmsg);
1605 
1606 	switch (desc->type) {
1607 	case VM_PKT_COMP:
1608 		netvsc_send_completion(ndev, net_device, channel, desc, budget);
1609 		break;
1610 
1611 	case VM_PKT_DATA_USING_XFER_PAGES:
1612 		return netvsc_receive(ndev, net_device, nvchan, desc);
1613 
1614 	case VM_PKT_DATA_INBAND:
1615 		netvsc_receive_inband(ndev, net_device, desc);
1616 		break;
1617 
1618 	default:
1619 		netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1620 			   desc->type, desc->trans_id);
1621 		break;
1622 	}
1623 
1624 	return 0;
1625 }
1626 
1627 static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1628 {
1629 	struct vmbus_channel *primary = channel->primary_channel;
1630 
1631 	return primary ? primary->device_obj : channel->device_obj;
1632 }
1633 
1634 /* Network processing softirq
1635  * Process data in incoming ring buffer from host
1636  * Stops when ring is empty or budget is met or exceeded.
1637  */
1638 int netvsc_poll(struct napi_struct *napi, int budget)
1639 {
1640 	struct netvsc_channel *nvchan
1641 		= container_of(napi, struct netvsc_channel, napi);
1642 	struct netvsc_device *net_device = nvchan->net_device;
1643 	struct vmbus_channel *channel = nvchan->channel;
1644 	struct hv_device *device = netvsc_channel_to_device(channel);
1645 	struct net_device *ndev = hv_get_drvdata(device);
1646 	int work_done = 0;
1647 	int ret;
1648 
1649 	/* If starting a new interval */
1650 	if (!nvchan->desc)
1651 		nvchan->desc = hv_pkt_iter_first(channel);
1652 
1653 	nvchan->xdp_flush = false;
1654 
1655 	while (nvchan->desc && work_done < budget) {
1656 		work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1657 						    ndev, nvchan->desc, budget);
1658 		nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1659 	}
1660 
1661 	if (nvchan->xdp_flush)
1662 		xdp_do_flush();
1663 
1664 	/* Send any pending receive completions */
1665 	ret = send_recv_completions(ndev, net_device, nvchan);
1666 
1667 	/* If it did not exhaust NAPI budget this time
1668 	 *  and not doing busy poll
1669 	 * then re-enable host interrupts
1670 	 *  and reschedule if ring is not empty
1671 	 *   or sending receive completion failed.
1672 	 */
1673 	if (work_done < budget &&
1674 	    napi_complete_done(napi, work_done) &&
1675 	    (ret || hv_end_read(&channel->inbound)) &&
1676 	    napi_schedule_prep(napi)) {
1677 		hv_begin_read(&channel->inbound);
1678 		__napi_schedule(napi);
1679 	}
1680 
1681 	/* Driver may overshoot since multiple packets per descriptor */
1682 	return min(work_done, budget);
1683 }
1684 
1685 /* Call back when data is available in host ring buffer.
1686  * Processing is deferred until network softirq (NAPI)
1687  */
1688 void netvsc_channel_cb(void *context)
1689 {
1690 	struct netvsc_channel *nvchan = context;
1691 	struct vmbus_channel *channel = nvchan->channel;
1692 	struct hv_ring_buffer_info *rbi = &channel->inbound;
1693 
1694 	/* preload first vmpacket descriptor */
1695 	prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1696 
1697 	if (napi_schedule_prep(&nvchan->napi)) {
1698 		/* disable interrupts from host */
1699 		hv_begin_read(rbi);
1700 
1701 		__napi_schedule_irqoff(&nvchan->napi);
1702 	}
1703 }
1704 
1705 /*
1706  * netvsc_device_add - Callback when the device belonging to this
1707  * driver is added
1708  */
1709 struct netvsc_device *netvsc_device_add(struct hv_device *device,
1710 				const struct netvsc_device_info *device_info)
1711 {
1712 	int i, ret = 0;
1713 	struct netvsc_device *net_device;
1714 	struct net_device *ndev = hv_get_drvdata(device);
1715 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1716 
1717 	net_device = alloc_net_device();
1718 	if (!net_device)
1719 		return ERR_PTR(-ENOMEM);
1720 
1721 	for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1722 		net_device_ctx->tx_table[i] = 0;
1723 
1724 	/* Because the device uses NAPI, all the interrupt batching and
1725 	 * control is done via Net softirq, not the channel handling
1726 	 */
1727 	set_channel_read_mode(device->channel, HV_CALL_ISR);
1728 
1729 	/* If we're reopening the device we may have multiple queues, fill the
1730 	 * chn_table with the default channel to use it before subchannels are
1731 	 * opened.
1732 	 * Initialize the channel state before we open;
1733 	 * we can be interrupted as soon as we open the channel.
1734 	 */
1735 
1736 	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1737 		struct netvsc_channel *nvchan = &net_device->chan_table[i];
1738 
1739 		nvchan->channel = device->channel;
1740 		nvchan->net_device = net_device;
1741 		u64_stats_init(&nvchan->tx_stats.syncp);
1742 		u64_stats_init(&nvchan->rx_stats.syncp);
1743 
1744 		ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0);
1745 
1746 		if (ret) {
1747 			netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
1748 			goto cleanup2;
1749 		}
1750 
1751 		ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
1752 						 MEM_TYPE_PAGE_SHARED, NULL);
1753 
1754 		if (ret) {
1755 			netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
1756 			goto cleanup2;
1757 		}
1758 	}
1759 
1760 	/* Enable NAPI handler before init callbacks */
1761 	netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
1762 
1763 	/* Open the channel */
1764 	device->channel->next_request_id_callback = vmbus_next_request_id;
1765 	device->channel->request_addr_callback = vmbus_request_addr;
1766 	device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
1767 	device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE;
1768 
1769 	ret = vmbus_open(device->channel, netvsc_ring_bytes,
1770 			 netvsc_ring_bytes,  NULL, 0,
1771 			 netvsc_channel_cb, net_device->chan_table);
1772 
1773 	if (ret != 0) {
1774 		netdev_err(ndev, "unable to open channel: %d\n", ret);
1775 		goto cleanup;
1776 	}
1777 
1778 	/* Channel is opened */
1779 	netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1780 
1781 	napi_enable(&net_device->chan_table[0].napi);
1782 
1783 	/* Connect with the NetVsp */
1784 	ret = netvsc_connect_vsp(device, net_device, device_info);
1785 	if (ret != 0) {
1786 		netdev_err(ndev,
1787 			"unable to connect to NetVSP - %d\n", ret);
1788 		goto close;
1789 	}
1790 
1791 	/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1792 	 * populated.
1793 	 */
1794 	rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1795 
1796 	return net_device;
1797 
1798 close:
1799 	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
1800 	napi_disable(&net_device->chan_table[0].napi);
1801 
1802 	/* Now, we can close the channel safely */
1803 	vmbus_close(device->channel);
1804 
1805 cleanup:
1806 	netif_napi_del(&net_device->chan_table[0].napi);
1807 
1808 cleanup2:
1809 	free_netvsc_device(&net_device->rcu);
1810 
1811 	return ERR_PTR(ret);
1812 }
1813