xref: /openbmc/linux/drivers/net/hyperv/netvsc.c (revision a9d85efb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/wait.h>
14 #include <linux/mm.h>
15 #include <linux/delay.h>
16 #include <linux/io.h>
17 #include <linux/slab.h>
18 #include <linux/netdevice.h>
19 #include <linux/if_ether.h>
20 #include <linux/vmalloc.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/prefetch.h>
23 
24 #include <asm/sync_bitops.h>
25 #include <asm/mshyperv.h>
26 
27 #include "hyperv_net.h"
28 #include "netvsc_trace.h"
29 
30 /*
31  * Switch the data path from the synthetic interface to the VF
32  * interface.
33  */
34 int netvsc_switch_datapath(struct net_device *ndev, bool vf)
35 {
36 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
37 	struct hv_device *dev = net_device_ctx->device_ctx;
38 	struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
39 	struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
40 	int ret, retry = 0;
41 
42 	/* Block sending traffic to VF if it's about to be gone */
43 	if (!vf)
44 		net_device_ctx->data_path_is_vf = vf;
45 
46 	memset(init_pkt, 0, sizeof(struct nvsp_message));
47 	init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
48 	if (vf)
49 		init_pkt->msg.v4_msg.active_dp.active_datapath =
50 			NVSP_DATAPATH_VF;
51 	else
52 		init_pkt->msg.v4_msg.active_dp.active_datapath =
53 			NVSP_DATAPATH_SYNTHETIC;
54 
55 again:
56 	trace_nvsp_send(ndev, init_pkt);
57 
58 	ret = vmbus_sendpacket(dev->channel, init_pkt,
59 			       sizeof(struct nvsp_message),
60 			       (unsigned long)init_pkt, VM_PKT_DATA_INBAND,
61 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
62 
63 	/* If failed to switch to/from VF, let data_path_is_vf stay false,
64 	 * so we use synthetic path to send data.
65 	 */
66 	if (ret) {
67 		if (ret != -EAGAIN) {
68 			netdev_err(ndev,
69 				   "Unable to send sw datapath msg, err: %d\n",
70 				   ret);
71 			return ret;
72 		}
73 
74 		if (retry++ < RETRY_MAX) {
75 			usleep_range(RETRY_US_LO, RETRY_US_HI);
76 			goto again;
77 		} else {
78 			netdev_err(
79 				ndev,
80 				"Retry failed to send sw datapath msg, err: %d\n",
81 				ret);
82 			return ret;
83 		}
84 	}
85 
86 	wait_for_completion(&nv_dev->channel_init_wait);
87 	net_device_ctx->data_path_is_vf = vf;
88 
89 	return 0;
90 }
91 
92 /* Worker to setup sub channels on initial setup
93  * Initial hotplug event occurs in softirq context
94  * and can't wait for channels.
95  */
96 static void netvsc_subchan_work(struct work_struct *w)
97 {
98 	struct netvsc_device *nvdev =
99 		container_of(w, struct netvsc_device, subchan_work);
100 	struct rndis_device *rdev;
101 	int i, ret;
102 
103 	/* Avoid deadlock with device removal already under RTNL */
104 	if (!rtnl_trylock()) {
105 		schedule_work(w);
106 		return;
107 	}
108 
109 	rdev = nvdev->extension;
110 	if (rdev) {
111 		ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
112 		if (ret == 0) {
113 			netif_device_attach(rdev->ndev);
114 		} else {
115 			/* fallback to only primary channel */
116 			for (i = 1; i < nvdev->num_chn; i++)
117 				netif_napi_del(&nvdev->chan_table[i].napi);
118 
119 			nvdev->max_chn = 1;
120 			nvdev->num_chn = 1;
121 		}
122 	}
123 
124 	rtnl_unlock();
125 }
126 
127 static struct netvsc_device *alloc_net_device(void)
128 {
129 	struct netvsc_device *net_device;
130 
131 	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
132 	if (!net_device)
133 		return NULL;
134 
135 	init_waitqueue_head(&net_device->wait_drain);
136 	net_device->destroy = false;
137 	net_device->tx_disable = true;
138 
139 	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
140 	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
141 
142 	init_completion(&net_device->channel_init_wait);
143 	init_waitqueue_head(&net_device->subchan_open);
144 	INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
145 
146 	return net_device;
147 }
148 
149 static void free_netvsc_device(struct rcu_head *head)
150 {
151 	struct netvsc_device *nvdev
152 		= container_of(head, struct netvsc_device, rcu);
153 	int i;
154 
155 	kfree(nvdev->extension);
156 	vfree(nvdev->recv_buf);
157 	vfree(nvdev->send_buf);
158 	kfree(nvdev->send_section_map);
159 
160 	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
161 		xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
162 		kfree(nvdev->chan_table[i].recv_buf);
163 		vfree(nvdev->chan_table[i].mrc.slots);
164 	}
165 
166 	kfree(nvdev);
167 }
168 
169 static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
170 {
171 	call_rcu(&nvdev->rcu, free_netvsc_device);
172 }
173 
174 static void netvsc_revoke_recv_buf(struct hv_device *device,
175 				   struct netvsc_device *net_device,
176 				   struct net_device *ndev)
177 {
178 	struct nvsp_message *revoke_packet;
179 	int ret;
180 
181 	/*
182 	 * If we got a section count, it means we received a
183 	 * SendReceiveBufferComplete msg (ie sent
184 	 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
185 	 * to send a revoke msg here
186 	 */
187 	if (net_device->recv_section_cnt) {
188 		/* Send the revoke receive buffer */
189 		revoke_packet = &net_device->revoke_packet;
190 		memset(revoke_packet, 0, sizeof(struct nvsp_message));
191 
192 		revoke_packet->hdr.msg_type =
193 			NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
194 		revoke_packet->msg.v1_msg.
195 		revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
196 
197 		trace_nvsp_send(ndev, revoke_packet);
198 
199 		ret = vmbus_sendpacket(device->channel,
200 				       revoke_packet,
201 				       sizeof(struct nvsp_message),
202 				       VMBUS_RQST_ID_NO_RESPONSE,
203 				       VM_PKT_DATA_INBAND, 0);
204 		/* If the failure is because the channel is rescinded;
205 		 * ignore the failure since we cannot send on a rescinded
206 		 * channel. This would allow us to properly cleanup
207 		 * even when the channel is rescinded.
208 		 */
209 		if (device->channel->rescind)
210 			ret = 0;
211 		/*
212 		 * If we failed here, we might as well return and
213 		 * have a leak rather than continue and a bugchk
214 		 */
215 		if (ret != 0) {
216 			netdev_err(ndev, "unable to send "
217 				"revoke receive buffer to netvsp\n");
218 			return;
219 		}
220 		net_device->recv_section_cnt = 0;
221 	}
222 }
223 
224 static void netvsc_revoke_send_buf(struct hv_device *device,
225 				   struct netvsc_device *net_device,
226 				   struct net_device *ndev)
227 {
228 	struct nvsp_message *revoke_packet;
229 	int ret;
230 
231 	/* Deal with the send buffer we may have setup.
232 	 * If we got a  send section size, it means we received a
233 	 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
234 	 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
235 	 * to send a revoke msg here
236 	 */
237 	if (net_device->send_section_cnt) {
238 		/* Send the revoke receive buffer */
239 		revoke_packet = &net_device->revoke_packet;
240 		memset(revoke_packet, 0, sizeof(struct nvsp_message));
241 
242 		revoke_packet->hdr.msg_type =
243 			NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
244 		revoke_packet->msg.v1_msg.revoke_send_buf.id =
245 			NETVSC_SEND_BUFFER_ID;
246 
247 		trace_nvsp_send(ndev, revoke_packet);
248 
249 		ret = vmbus_sendpacket(device->channel,
250 				       revoke_packet,
251 				       sizeof(struct nvsp_message),
252 				       VMBUS_RQST_ID_NO_RESPONSE,
253 				       VM_PKT_DATA_INBAND, 0);
254 
255 		/* If the failure is because the channel is rescinded;
256 		 * ignore the failure since we cannot send on a rescinded
257 		 * channel. This would allow us to properly cleanup
258 		 * even when the channel is rescinded.
259 		 */
260 		if (device->channel->rescind)
261 			ret = 0;
262 
263 		/* If we failed here, we might as well return and
264 		 * have a leak rather than continue and a bugchk
265 		 */
266 		if (ret != 0) {
267 			netdev_err(ndev, "unable to send "
268 				   "revoke send buffer to netvsp\n");
269 			return;
270 		}
271 		net_device->send_section_cnt = 0;
272 	}
273 }
274 
275 static void netvsc_teardown_recv_gpadl(struct hv_device *device,
276 				       struct netvsc_device *net_device,
277 				       struct net_device *ndev)
278 {
279 	int ret;
280 
281 	if (net_device->recv_buf_gpadl_handle) {
282 		ret = vmbus_teardown_gpadl(device->channel,
283 					   net_device->recv_buf_gpadl_handle);
284 
285 		/* If we failed here, we might as well return and have a leak
286 		 * rather than continue and a bugchk
287 		 */
288 		if (ret != 0) {
289 			netdev_err(ndev,
290 				   "unable to teardown receive buffer's gpadl\n");
291 			return;
292 		}
293 		net_device->recv_buf_gpadl_handle = 0;
294 	}
295 }
296 
297 static void netvsc_teardown_send_gpadl(struct hv_device *device,
298 				       struct netvsc_device *net_device,
299 				       struct net_device *ndev)
300 {
301 	int ret;
302 
303 	if (net_device->send_buf_gpadl_handle) {
304 		ret = vmbus_teardown_gpadl(device->channel,
305 					   net_device->send_buf_gpadl_handle);
306 
307 		/* If we failed here, we might as well return and have a leak
308 		 * rather than continue and a bugchk
309 		 */
310 		if (ret != 0) {
311 			netdev_err(ndev,
312 				   "unable to teardown send buffer's gpadl\n");
313 			return;
314 		}
315 		net_device->send_buf_gpadl_handle = 0;
316 	}
317 }
318 
319 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
320 {
321 	struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
322 	int node = cpu_to_node(nvchan->channel->target_cpu);
323 	size_t size;
324 
325 	size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
326 	nvchan->mrc.slots = vzalloc_node(size, node);
327 	if (!nvchan->mrc.slots)
328 		nvchan->mrc.slots = vzalloc(size);
329 
330 	return nvchan->mrc.slots ? 0 : -ENOMEM;
331 }
332 
333 static int netvsc_init_buf(struct hv_device *device,
334 			   struct netvsc_device *net_device,
335 			   const struct netvsc_device_info *device_info)
336 {
337 	struct nvsp_1_message_send_receive_buffer_complete *resp;
338 	struct net_device *ndev = hv_get_drvdata(device);
339 	struct nvsp_message *init_packet;
340 	unsigned int buf_size;
341 	size_t map_words;
342 	int i, ret = 0;
343 
344 	/* Get receive buffer area. */
345 	buf_size = device_info->recv_sections * device_info->recv_section_size;
346 	buf_size = roundup(buf_size, PAGE_SIZE);
347 
348 	/* Legacy hosts only allow smaller receive buffer */
349 	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
350 		buf_size = min_t(unsigned int, buf_size,
351 				 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
352 
353 	net_device->recv_buf = vzalloc(buf_size);
354 	if (!net_device->recv_buf) {
355 		netdev_err(ndev,
356 			   "unable to allocate receive buffer of size %u\n",
357 			   buf_size);
358 		ret = -ENOMEM;
359 		goto cleanup;
360 	}
361 
362 	net_device->recv_buf_size = buf_size;
363 
364 	/*
365 	 * Establish the gpadl handle for this buffer on this
366 	 * channel.  Note: This call uses the vmbus connection rather
367 	 * than the channel to establish the gpadl handle.
368 	 */
369 	ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
370 				    buf_size,
371 				    &net_device->recv_buf_gpadl_handle);
372 	if (ret != 0) {
373 		netdev_err(ndev,
374 			"unable to establish receive buffer's gpadl\n");
375 		goto cleanup;
376 	}
377 
378 	/* Notify the NetVsp of the gpadl handle */
379 	init_packet = &net_device->channel_init_pkt;
380 	memset(init_packet, 0, sizeof(struct nvsp_message));
381 	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
382 	init_packet->msg.v1_msg.send_recv_buf.
383 		gpadl_handle = net_device->recv_buf_gpadl_handle;
384 	init_packet->msg.v1_msg.
385 		send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
386 
387 	trace_nvsp_send(ndev, init_packet);
388 
389 	/* Send the gpadl notification request */
390 	ret = vmbus_sendpacket(device->channel, init_packet,
391 			       sizeof(struct nvsp_message),
392 			       (unsigned long)init_packet,
393 			       VM_PKT_DATA_INBAND,
394 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
395 	if (ret != 0) {
396 		netdev_err(ndev,
397 			"unable to send receive buffer's gpadl to netvsp\n");
398 		goto cleanup;
399 	}
400 
401 	wait_for_completion(&net_device->channel_init_wait);
402 
403 	/* Check the response */
404 	resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
405 	if (resp->status != NVSP_STAT_SUCCESS) {
406 		netdev_err(ndev,
407 			   "Unable to complete receive buffer initialization with NetVsp - status %d\n",
408 			   resp->status);
409 		ret = -EINVAL;
410 		goto cleanup;
411 	}
412 
413 	/* Parse the response */
414 	netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
415 		   resp->num_sections, resp->sections[0].sub_alloc_size,
416 		   resp->sections[0].num_sub_allocs);
417 
418 	/* There should only be one section for the entire receive buffer */
419 	if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
420 		ret = -EINVAL;
421 		goto cleanup;
422 	}
423 
424 	net_device->recv_section_size = resp->sections[0].sub_alloc_size;
425 	net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
426 
427 	/* Ensure buffer will not overflow */
428 	if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
429 	    (u64)net_device->recv_section_cnt > (u64)buf_size) {
430 		netdev_err(ndev, "invalid recv_section_size %u\n",
431 			   net_device->recv_section_size);
432 		ret = -EINVAL;
433 		goto cleanup;
434 	}
435 
436 	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
437 		struct netvsc_channel *nvchan = &net_device->chan_table[i];
438 
439 		nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
440 		if (nvchan->recv_buf == NULL) {
441 			ret = -ENOMEM;
442 			goto cleanup;
443 		}
444 	}
445 
446 	/* Setup receive completion ring.
447 	 * Add 1 to the recv_section_cnt because at least one entry in a
448 	 * ring buffer has to be empty.
449 	 */
450 	net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
451 	ret = netvsc_alloc_recv_comp_ring(net_device, 0);
452 	if (ret)
453 		goto cleanup;
454 
455 	/* Now setup the send buffer. */
456 	buf_size = device_info->send_sections * device_info->send_section_size;
457 	buf_size = round_up(buf_size, PAGE_SIZE);
458 
459 	net_device->send_buf = vzalloc(buf_size);
460 	if (!net_device->send_buf) {
461 		netdev_err(ndev, "unable to allocate send buffer of size %u\n",
462 			   buf_size);
463 		ret = -ENOMEM;
464 		goto cleanup;
465 	}
466 
467 	/* Establish the gpadl handle for this buffer on this
468 	 * channel.  Note: This call uses the vmbus connection rather
469 	 * than the channel to establish the gpadl handle.
470 	 */
471 	ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
472 				    buf_size,
473 				    &net_device->send_buf_gpadl_handle);
474 	if (ret != 0) {
475 		netdev_err(ndev,
476 			   "unable to establish send buffer's gpadl\n");
477 		goto cleanup;
478 	}
479 
480 	/* Notify the NetVsp of the gpadl handle */
481 	init_packet = &net_device->channel_init_pkt;
482 	memset(init_packet, 0, sizeof(struct nvsp_message));
483 	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
484 	init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
485 		net_device->send_buf_gpadl_handle;
486 	init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
487 
488 	trace_nvsp_send(ndev, init_packet);
489 
490 	/* Send the gpadl notification request */
491 	ret = vmbus_sendpacket(device->channel, init_packet,
492 			       sizeof(struct nvsp_message),
493 			       (unsigned long)init_packet,
494 			       VM_PKT_DATA_INBAND,
495 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
496 	if (ret != 0) {
497 		netdev_err(ndev,
498 			   "unable to send send buffer's gpadl to netvsp\n");
499 		goto cleanup;
500 	}
501 
502 	wait_for_completion(&net_device->channel_init_wait);
503 
504 	/* Check the response */
505 	if (init_packet->msg.v1_msg.
506 	    send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
507 		netdev_err(ndev, "Unable to complete send buffer "
508 			   "initialization with NetVsp - status %d\n",
509 			   init_packet->msg.v1_msg.
510 			   send_send_buf_complete.status);
511 		ret = -EINVAL;
512 		goto cleanup;
513 	}
514 
515 	/* Parse the response */
516 	net_device->send_section_size = init_packet->msg.
517 				v1_msg.send_send_buf_complete.section_size;
518 	if (net_device->send_section_size < NETVSC_MTU_MIN) {
519 		netdev_err(ndev, "invalid send_section_size %u\n",
520 			   net_device->send_section_size);
521 		ret = -EINVAL;
522 		goto cleanup;
523 	}
524 
525 	/* Section count is simply the size divided by the section size. */
526 	net_device->send_section_cnt = buf_size / net_device->send_section_size;
527 
528 	netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
529 		   net_device->send_section_size, net_device->send_section_cnt);
530 
531 	/* Setup state for managing the send buffer. */
532 	map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
533 
534 	net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
535 	if (net_device->send_section_map == NULL) {
536 		ret = -ENOMEM;
537 		goto cleanup;
538 	}
539 
540 	goto exit;
541 
542 cleanup:
543 	netvsc_revoke_recv_buf(device, net_device, ndev);
544 	netvsc_revoke_send_buf(device, net_device, ndev);
545 	netvsc_teardown_recv_gpadl(device, net_device, ndev);
546 	netvsc_teardown_send_gpadl(device, net_device, ndev);
547 
548 exit:
549 	return ret;
550 }
551 
552 /* Negotiate NVSP protocol version */
553 static int negotiate_nvsp_ver(struct hv_device *device,
554 			      struct netvsc_device *net_device,
555 			      struct nvsp_message *init_packet,
556 			      u32 nvsp_ver)
557 {
558 	struct net_device *ndev = hv_get_drvdata(device);
559 	int ret;
560 
561 	memset(init_packet, 0, sizeof(struct nvsp_message));
562 	init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
563 	init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
564 	init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
565 	trace_nvsp_send(ndev, init_packet);
566 
567 	/* Send the init request */
568 	ret = vmbus_sendpacket(device->channel, init_packet,
569 			       sizeof(struct nvsp_message),
570 			       (unsigned long)init_packet,
571 			       VM_PKT_DATA_INBAND,
572 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
573 
574 	if (ret != 0)
575 		return ret;
576 
577 	wait_for_completion(&net_device->channel_init_wait);
578 
579 	if (init_packet->msg.init_msg.init_complete.status !=
580 	    NVSP_STAT_SUCCESS)
581 		return -EINVAL;
582 
583 	if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
584 		return 0;
585 
586 	/* NVSPv2 or later: Send NDIS config */
587 	memset(init_packet, 0, sizeof(struct nvsp_message));
588 	init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
589 	init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
590 	init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
591 
592 	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
593 		if (hv_is_isolation_supported())
594 			netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n");
595 		else
596 			init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
597 
598 		/* Teaming bit is needed to receive link speed updates */
599 		init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
600 	}
601 
602 	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
603 		init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
604 
605 	trace_nvsp_send(ndev, init_packet);
606 
607 	ret = vmbus_sendpacket(device->channel, init_packet,
608 				sizeof(struct nvsp_message),
609 				VMBUS_RQST_ID_NO_RESPONSE,
610 				VM_PKT_DATA_INBAND, 0);
611 
612 	return ret;
613 }
614 
615 static int netvsc_connect_vsp(struct hv_device *device,
616 			      struct netvsc_device *net_device,
617 			      const struct netvsc_device_info *device_info)
618 {
619 	struct net_device *ndev = hv_get_drvdata(device);
620 	static const u32 ver_list[] = {
621 		NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
622 		NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
623 		NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
624 	};
625 	struct nvsp_message *init_packet;
626 	int ndis_version, i, ret;
627 
628 	init_packet = &net_device->channel_init_pkt;
629 
630 	/* Negotiate the latest NVSP protocol supported */
631 	for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
632 		if (negotiate_nvsp_ver(device, net_device, init_packet,
633 				       ver_list[i])  == 0) {
634 			net_device->nvsp_version = ver_list[i];
635 			break;
636 		}
637 
638 	if (i < 0) {
639 		ret = -EPROTO;
640 		goto cleanup;
641 	}
642 
643 	if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) {
644 		netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n",
645 			   net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61);
646 		ret = -EPROTO;
647 		goto cleanup;
648 	}
649 
650 	pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
651 
652 	/* Send the ndis version */
653 	memset(init_packet, 0, sizeof(struct nvsp_message));
654 
655 	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
656 		ndis_version = 0x00060001;
657 	else
658 		ndis_version = 0x0006001e;
659 
660 	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
661 	init_packet->msg.v1_msg.
662 		send_ndis_ver.ndis_major_ver =
663 				(ndis_version & 0xFFFF0000) >> 16;
664 	init_packet->msg.v1_msg.
665 		send_ndis_ver.ndis_minor_ver =
666 				ndis_version & 0xFFFF;
667 
668 	trace_nvsp_send(ndev, init_packet);
669 
670 	/* Send the init request */
671 	ret = vmbus_sendpacket(device->channel, init_packet,
672 				sizeof(struct nvsp_message),
673 				VMBUS_RQST_ID_NO_RESPONSE,
674 				VM_PKT_DATA_INBAND, 0);
675 	if (ret != 0)
676 		goto cleanup;
677 
678 
679 	ret = netvsc_init_buf(device, net_device, device_info);
680 
681 cleanup:
682 	return ret;
683 }
684 
685 /*
686  * netvsc_device_remove - Callback when the root bus device is removed
687  */
688 void netvsc_device_remove(struct hv_device *device)
689 {
690 	struct net_device *ndev = hv_get_drvdata(device);
691 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
692 	struct netvsc_device *net_device
693 		= rtnl_dereference(net_device_ctx->nvdev);
694 	int i;
695 
696 	/*
697 	 * Revoke receive buffer. If host is pre-Win2016 then tear down
698 	 * receive buffer GPADL. Do the same for send buffer.
699 	 */
700 	netvsc_revoke_recv_buf(device, net_device, ndev);
701 	if (vmbus_proto_version < VERSION_WIN10)
702 		netvsc_teardown_recv_gpadl(device, net_device, ndev);
703 
704 	netvsc_revoke_send_buf(device, net_device, ndev);
705 	if (vmbus_proto_version < VERSION_WIN10)
706 		netvsc_teardown_send_gpadl(device, net_device, ndev);
707 
708 	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
709 
710 	/* Disable NAPI and disassociate its context from the device. */
711 	for (i = 0; i < net_device->num_chn; i++) {
712 		/* See also vmbus_reset_channel_cb(). */
713 		napi_disable(&net_device->chan_table[i].napi);
714 		netif_napi_del(&net_device->chan_table[i].napi);
715 	}
716 
717 	/*
718 	 * At this point, no one should be accessing net_device
719 	 * except in here
720 	 */
721 	netdev_dbg(ndev, "net device safe to remove\n");
722 
723 	/* Now, we can close the channel safely */
724 	vmbus_close(device->channel);
725 
726 	/*
727 	 * If host is Win2016 or higher then we do the GPADL tear down
728 	 * here after VMBus is closed.
729 	*/
730 	if (vmbus_proto_version >= VERSION_WIN10) {
731 		netvsc_teardown_recv_gpadl(device, net_device, ndev);
732 		netvsc_teardown_send_gpadl(device, net_device, ndev);
733 	}
734 
735 	/* Release all resources */
736 	free_netvsc_device_rcu(net_device);
737 }
738 
739 #define RING_AVAIL_PERCENT_HIWATER 20
740 #define RING_AVAIL_PERCENT_LOWATER 10
741 
742 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
743 					 u32 index)
744 {
745 	sync_change_bit(index, net_device->send_section_map);
746 }
747 
748 static void netvsc_send_tx_complete(struct net_device *ndev,
749 				    struct netvsc_device *net_device,
750 				    struct vmbus_channel *channel,
751 				    const struct vmpacket_descriptor *desc,
752 				    int budget)
753 {
754 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
755 	struct sk_buff *skb;
756 	u16 q_idx = 0;
757 	int queue_sends;
758 	u64 cmd_rqst;
759 
760 	cmd_rqst = channel->request_addr_callback(channel, (u64)desc->trans_id);
761 	if (cmd_rqst == VMBUS_RQST_ERROR) {
762 		netdev_err(ndev, "Incorrect transaction id\n");
763 		return;
764 	}
765 
766 	skb = (struct sk_buff *)(unsigned long)cmd_rqst;
767 
768 	/* Notify the layer above us */
769 	if (likely(skb)) {
770 		const struct hv_netvsc_packet *packet
771 			= (struct hv_netvsc_packet *)skb->cb;
772 		u32 send_index = packet->send_buf_index;
773 		struct netvsc_stats *tx_stats;
774 
775 		if (send_index != NETVSC_INVALID_INDEX)
776 			netvsc_free_send_slot(net_device, send_index);
777 		q_idx = packet->q_idx;
778 
779 		tx_stats = &net_device->chan_table[q_idx].tx_stats;
780 
781 		u64_stats_update_begin(&tx_stats->syncp);
782 		tx_stats->packets += packet->total_packets;
783 		tx_stats->bytes += packet->total_bytes;
784 		u64_stats_update_end(&tx_stats->syncp);
785 
786 		napi_consume_skb(skb, budget);
787 	}
788 
789 	queue_sends =
790 		atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
791 
792 	if (unlikely(net_device->destroy)) {
793 		if (queue_sends == 0)
794 			wake_up(&net_device->wait_drain);
795 	} else {
796 		struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
797 
798 		if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
799 		    (hv_get_avail_to_write_percent(&channel->outbound) >
800 		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
801 			netif_tx_wake_queue(txq);
802 			ndev_ctx->eth_stats.wake_queue++;
803 		}
804 	}
805 }
806 
807 static void netvsc_send_completion(struct net_device *ndev,
808 				   struct netvsc_device *net_device,
809 				   struct vmbus_channel *incoming_channel,
810 				   const struct vmpacket_descriptor *desc,
811 				   int budget)
812 {
813 	const struct nvsp_message *nvsp_packet;
814 	u32 msglen = hv_pkt_datalen(desc);
815 	struct nvsp_message *pkt_rqst;
816 	u64 cmd_rqst;
817 
818 	/* First check if this is a VMBUS completion without data payload */
819 	if (!msglen) {
820 		cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
821 								   (u64)desc->trans_id);
822 		if (cmd_rqst == VMBUS_RQST_ERROR) {
823 			netdev_err(ndev, "Invalid transaction id\n");
824 			return;
825 		}
826 
827 		pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
828 		switch (pkt_rqst->hdr.msg_type) {
829 		case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
830 			complete(&net_device->channel_init_wait);
831 			break;
832 
833 		default:
834 			netdev_err(ndev, "Unexpected VMBUS completion!!\n");
835 		}
836 		return;
837 	}
838 
839 	/* Ensure packet is big enough to read header fields */
840 	if (msglen < sizeof(struct nvsp_message_header)) {
841 		netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
842 		return;
843 	}
844 
845 	nvsp_packet = hv_pkt_data(desc);
846 	switch (nvsp_packet->hdr.msg_type) {
847 	case NVSP_MSG_TYPE_INIT_COMPLETE:
848 		if (msglen < sizeof(struct nvsp_message_header) +
849 				sizeof(struct nvsp_message_init_complete)) {
850 			netdev_err(ndev, "nvsp_msg length too small: %u\n",
851 				   msglen);
852 			return;
853 		}
854 		fallthrough;
855 
856 	case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
857 		if (msglen < sizeof(struct nvsp_message_header) +
858 				sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
859 			netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
860 				   msglen);
861 			return;
862 		}
863 		fallthrough;
864 
865 	case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
866 		if (msglen < sizeof(struct nvsp_message_header) +
867 				sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
868 			netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
869 				   msglen);
870 			return;
871 		}
872 		fallthrough;
873 
874 	case NVSP_MSG5_TYPE_SUBCHANNEL:
875 		if (msglen < sizeof(struct nvsp_message_header) +
876 				sizeof(struct nvsp_5_subchannel_complete)) {
877 			netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
878 				   msglen);
879 			return;
880 		}
881 		/* Copy the response back */
882 		memcpy(&net_device->channel_init_pkt, nvsp_packet,
883 		       sizeof(struct nvsp_message));
884 		complete(&net_device->channel_init_wait);
885 		break;
886 
887 	case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
888 		netvsc_send_tx_complete(ndev, net_device, incoming_channel,
889 					desc, budget);
890 		break;
891 
892 	default:
893 		netdev_err(ndev,
894 			   "Unknown send completion type %d received!!\n",
895 			   nvsp_packet->hdr.msg_type);
896 	}
897 }
898 
899 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
900 {
901 	unsigned long *map_addr = net_device->send_section_map;
902 	unsigned int i;
903 
904 	for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
905 		if (sync_test_and_set_bit(i, map_addr) == 0)
906 			return i;
907 	}
908 
909 	return NETVSC_INVALID_INDEX;
910 }
911 
912 static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
913 				    unsigned int section_index,
914 				    u32 pend_size,
915 				    struct hv_netvsc_packet *packet,
916 				    struct rndis_message *rndis_msg,
917 				    struct hv_page_buffer *pb,
918 				    bool xmit_more)
919 {
920 	char *start = net_device->send_buf;
921 	char *dest = start + (section_index * net_device->send_section_size)
922 		     + pend_size;
923 	int i;
924 	u32 padding = 0;
925 	u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
926 		packet->page_buf_cnt;
927 	u32 remain;
928 
929 	/* Add padding */
930 	remain = packet->total_data_buflen & (net_device->pkt_align - 1);
931 	if (xmit_more && remain) {
932 		padding = net_device->pkt_align - remain;
933 		rndis_msg->msg_len += padding;
934 		packet->total_data_buflen += padding;
935 	}
936 
937 	for (i = 0; i < page_count; i++) {
938 		char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
939 		u32 offset = pb[i].offset;
940 		u32 len = pb[i].len;
941 
942 		memcpy(dest, (src + offset), len);
943 		dest += len;
944 	}
945 
946 	if (padding)
947 		memset(dest, 0, padding);
948 }
949 
950 static inline int netvsc_send_pkt(
951 	struct hv_device *device,
952 	struct hv_netvsc_packet *packet,
953 	struct netvsc_device *net_device,
954 	struct hv_page_buffer *pb,
955 	struct sk_buff *skb)
956 {
957 	struct nvsp_message nvmsg;
958 	struct nvsp_1_message_send_rndis_packet *rpkt =
959 		&nvmsg.msg.v1_msg.send_rndis_pkt;
960 	struct netvsc_channel * const nvchan =
961 		&net_device->chan_table[packet->q_idx];
962 	struct vmbus_channel *out_channel = nvchan->channel;
963 	struct net_device *ndev = hv_get_drvdata(device);
964 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
965 	struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
966 	u64 req_id;
967 	int ret;
968 	u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
969 
970 	memset(&nvmsg, 0, sizeof(struct nvsp_message));
971 	nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
972 	if (skb)
973 		rpkt->channel_type = 0;		/* 0 is RMC_DATA */
974 	else
975 		rpkt->channel_type = 1;		/* 1 is RMC_CONTROL */
976 
977 	rpkt->send_buf_section_index = packet->send_buf_index;
978 	if (packet->send_buf_index == NETVSC_INVALID_INDEX)
979 		rpkt->send_buf_section_size = 0;
980 	else
981 		rpkt->send_buf_section_size = packet->total_data_buflen;
982 
983 	req_id = (ulong)skb;
984 
985 	if (out_channel->rescind)
986 		return -ENODEV;
987 
988 	trace_nvsp_send_pkt(ndev, out_channel, rpkt);
989 
990 	if (packet->page_buf_cnt) {
991 		if (packet->cp_partial)
992 			pb += packet->rmsg_pgcnt;
993 
994 		ret = vmbus_sendpacket_pagebuffer(out_channel,
995 						  pb, packet->page_buf_cnt,
996 						  &nvmsg, sizeof(nvmsg),
997 						  req_id);
998 	} else {
999 		ret = vmbus_sendpacket(out_channel,
1000 				       &nvmsg, sizeof(nvmsg),
1001 				       req_id, VM_PKT_DATA_INBAND,
1002 				       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1003 	}
1004 
1005 	if (ret == 0) {
1006 		atomic_inc_return(&nvchan->queue_sends);
1007 
1008 		if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
1009 			netif_tx_stop_queue(txq);
1010 			ndev_ctx->eth_stats.stop_queue++;
1011 		}
1012 	} else if (ret == -EAGAIN) {
1013 		netif_tx_stop_queue(txq);
1014 		ndev_ctx->eth_stats.stop_queue++;
1015 	} else {
1016 		netdev_err(ndev,
1017 			   "Unable to send packet pages %u len %u, ret %d\n",
1018 			   packet->page_buf_cnt, packet->total_data_buflen,
1019 			   ret);
1020 	}
1021 
1022 	if (netif_tx_queue_stopped(txq) &&
1023 	    atomic_read(&nvchan->queue_sends) < 1 &&
1024 	    !net_device->tx_disable) {
1025 		netif_tx_wake_queue(txq);
1026 		ndev_ctx->eth_stats.wake_queue++;
1027 		if (ret == -EAGAIN)
1028 			ret = -ENOSPC;
1029 	}
1030 
1031 	return ret;
1032 }
1033 
1034 /* Move packet out of multi send data (msd), and clear msd */
1035 static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
1036 				struct sk_buff **msd_skb,
1037 				struct multi_send_data *msdp)
1038 {
1039 	*msd_skb = msdp->skb;
1040 	*msd_send = msdp->pkt;
1041 	msdp->skb = NULL;
1042 	msdp->pkt = NULL;
1043 	msdp->count = 0;
1044 }
1045 
1046 /* RCU already held by caller */
1047 /* Batching/bouncing logic is designed to attempt to optimize
1048  * performance.
1049  *
1050  * For small, non-LSO packets we copy the packet to a send buffer
1051  * which is pre-registered with the Hyper-V side. This enables the
1052  * hypervisor to avoid remapping the aperture to access the packet
1053  * descriptor and data.
1054  *
1055  * If we already started using a buffer and the netdev is transmitting
1056  * a burst of packets, keep on copying into the buffer until it is
1057  * full or we are done collecting a burst. If there is an existing
1058  * buffer with space for the RNDIS descriptor but not the packet, copy
1059  * the RNDIS descriptor to the buffer, keeping the packet in place.
1060  *
1061  * If we do batching and send more than one packet using a single
1062  * NetVSC message, free the SKBs of the packets copied, except for the
1063  * last packet. This is done to streamline the handling of the case
1064  * where the last packet only had the RNDIS descriptor copied to the
1065  * send buffer, with the data pointers included in the NetVSC message.
1066  */
1067 int netvsc_send(struct net_device *ndev,
1068 		struct hv_netvsc_packet *packet,
1069 		struct rndis_message *rndis_msg,
1070 		struct hv_page_buffer *pb,
1071 		struct sk_buff *skb,
1072 		bool xdp_tx)
1073 {
1074 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1075 	struct netvsc_device *net_device
1076 		= rcu_dereference_bh(ndev_ctx->nvdev);
1077 	struct hv_device *device = ndev_ctx->device_ctx;
1078 	int ret = 0;
1079 	struct netvsc_channel *nvchan;
1080 	u32 pktlen = packet->total_data_buflen, msd_len = 0;
1081 	unsigned int section_index = NETVSC_INVALID_INDEX;
1082 	struct multi_send_data *msdp;
1083 	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
1084 	struct sk_buff *msd_skb = NULL;
1085 	bool try_batch, xmit_more;
1086 
1087 	/* If device is rescinded, return error and packet will get dropped. */
1088 	if (unlikely(!net_device || net_device->destroy))
1089 		return -ENODEV;
1090 
1091 	nvchan = &net_device->chan_table[packet->q_idx];
1092 	packet->send_buf_index = NETVSC_INVALID_INDEX;
1093 	packet->cp_partial = false;
1094 
1095 	/* Send a control message or XDP packet directly without accessing
1096 	 * msd (Multi-Send Data) field which may be changed during data packet
1097 	 * processing.
1098 	 */
1099 	if (!skb || xdp_tx)
1100 		return netvsc_send_pkt(device, packet, net_device, pb, skb);
1101 
1102 	/* batch packets in send buffer if possible */
1103 	msdp = &nvchan->msd;
1104 	if (msdp->pkt)
1105 		msd_len = msdp->pkt->total_data_buflen;
1106 
1107 	try_batch =  msd_len > 0 && msdp->count < net_device->max_pkt;
1108 	if (try_batch && msd_len + pktlen + net_device->pkt_align <
1109 	    net_device->send_section_size) {
1110 		section_index = msdp->pkt->send_buf_index;
1111 
1112 	} else if (try_batch && msd_len + packet->rmsg_size <
1113 		   net_device->send_section_size) {
1114 		section_index = msdp->pkt->send_buf_index;
1115 		packet->cp_partial = true;
1116 
1117 	} else if (pktlen + net_device->pkt_align <
1118 		   net_device->send_section_size) {
1119 		section_index = netvsc_get_next_send_section(net_device);
1120 		if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
1121 			++ndev_ctx->eth_stats.tx_send_full;
1122 		} else {
1123 			move_pkt_msd(&msd_send, &msd_skb, msdp);
1124 			msd_len = 0;
1125 		}
1126 	}
1127 
1128 	/* Keep aggregating only if stack says more data is coming
1129 	 * and not doing mixed modes send and not flow blocked
1130 	 */
1131 	xmit_more = netdev_xmit_more() &&
1132 		!packet->cp_partial &&
1133 		!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
1134 
1135 	if (section_index != NETVSC_INVALID_INDEX) {
1136 		netvsc_copy_to_send_buf(net_device,
1137 					section_index, msd_len,
1138 					packet, rndis_msg, pb, xmit_more);
1139 
1140 		packet->send_buf_index = section_index;
1141 
1142 		if (packet->cp_partial) {
1143 			packet->page_buf_cnt -= packet->rmsg_pgcnt;
1144 			packet->total_data_buflen = msd_len + packet->rmsg_size;
1145 		} else {
1146 			packet->page_buf_cnt = 0;
1147 			packet->total_data_buflen += msd_len;
1148 		}
1149 
1150 		if (msdp->pkt) {
1151 			packet->total_packets += msdp->pkt->total_packets;
1152 			packet->total_bytes += msdp->pkt->total_bytes;
1153 		}
1154 
1155 		if (msdp->skb)
1156 			dev_consume_skb_any(msdp->skb);
1157 
1158 		if (xmit_more) {
1159 			msdp->skb = skb;
1160 			msdp->pkt = packet;
1161 			msdp->count++;
1162 		} else {
1163 			cur_send = packet;
1164 			msdp->skb = NULL;
1165 			msdp->pkt = NULL;
1166 			msdp->count = 0;
1167 		}
1168 	} else {
1169 		move_pkt_msd(&msd_send, &msd_skb, msdp);
1170 		cur_send = packet;
1171 	}
1172 
1173 	if (msd_send) {
1174 		int m_ret = netvsc_send_pkt(device, msd_send, net_device,
1175 					    NULL, msd_skb);
1176 
1177 		if (m_ret != 0) {
1178 			netvsc_free_send_slot(net_device,
1179 					      msd_send->send_buf_index);
1180 			dev_kfree_skb_any(msd_skb);
1181 		}
1182 	}
1183 
1184 	if (cur_send)
1185 		ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1186 
1187 	if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1188 		netvsc_free_send_slot(net_device, section_index);
1189 
1190 	return ret;
1191 }
1192 
1193 /* Send pending recv completions */
1194 static int send_recv_completions(struct net_device *ndev,
1195 				 struct netvsc_device *nvdev,
1196 				 struct netvsc_channel *nvchan)
1197 {
1198 	struct multi_recv_comp *mrc = &nvchan->mrc;
1199 	struct recv_comp_msg {
1200 		struct nvsp_message_header hdr;
1201 		u32 status;
1202 	}  __packed;
1203 	struct recv_comp_msg msg = {
1204 		.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
1205 	};
1206 	int ret;
1207 
1208 	while (mrc->first != mrc->next) {
1209 		const struct recv_comp_data *rcd
1210 			= mrc->slots + mrc->first;
1211 
1212 		msg.status = rcd->status;
1213 		ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
1214 				       rcd->tid, VM_PKT_COMP, 0);
1215 		if (unlikely(ret)) {
1216 			struct net_device_context *ndev_ctx = netdev_priv(ndev);
1217 
1218 			++ndev_ctx->eth_stats.rx_comp_busy;
1219 			return ret;
1220 		}
1221 
1222 		if (++mrc->first == nvdev->recv_completion_cnt)
1223 			mrc->first = 0;
1224 	}
1225 
1226 	/* receive completion ring has been emptied */
1227 	if (unlikely(nvdev->destroy))
1228 		wake_up(&nvdev->wait_drain);
1229 
1230 	return 0;
1231 }
1232 
1233 /* Count how many receive completions are outstanding */
1234 static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
1235 				 const struct multi_recv_comp *mrc,
1236 				 u32 *filled, u32 *avail)
1237 {
1238 	u32 count = nvdev->recv_completion_cnt;
1239 
1240 	if (mrc->next >= mrc->first)
1241 		*filled = mrc->next - mrc->first;
1242 	else
1243 		*filled = (count - mrc->first) + mrc->next;
1244 
1245 	*avail = count - *filled - 1;
1246 }
1247 
1248 /* Add receive complete to ring to send to host. */
1249 static void enq_receive_complete(struct net_device *ndev,
1250 				 struct netvsc_device *nvdev, u16 q_idx,
1251 				 u64 tid, u32 status)
1252 {
1253 	struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1254 	struct multi_recv_comp *mrc = &nvchan->mrc;
1255 	struct recv_comp_data *rcd;
1256 	u32 filled, avail;
1257 
1258 	recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1259 
1260 	if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1261 		send_recv_completions(ndev, nvdev, nvchan);
1262 		recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1263 	}
1264 
1265 	if (unlikely(!avail)) {
1266 		netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1267 			   q_idx, tid);
1268 		return;
1269 	}
1270 
1271 	rcd = mrc->slots + mrc->next;
1272 	rcd->tid = tid;
1273 	rcd->status = status;
1274 
1275 	if (++mrc->next == nvdev->recv_completion_cnt)
1276 		mrc->next = 0;
1277 }
1278 
1279 static int netvsc_receive(struct net_device *ndev,
1280 			  struct netvsc_device *net_device,
1281 			  struct netvsc_channel *nvchan,
1282 			  const struct vmpacket_descriptor *desc)
1283 {
1284 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1285 	struct vmbus_channel *channel = nvchan->channel;
1286 	const struct vmtransfer_page_packet_header *vmxferpage_packet
1287 		= container_of(desc, const struct vmtransfer_page_packet_header, d);
1288 	const struct nvsp_message *nvsp = hv_pkt_data(desc);
1289 	u32 msglen = hv_pkt_datalen(desc);
1290 	u16 q_idx = channel->offermsg.offer.sub_channel_index;
1291 	char *recv_buf = net_device->recv_buf;
1292 	u32 status = NVSP_STAT_SUCCESS;
1293 	int i;
1294 	int count = 0;
1295 
1296 	/* Ensure packet is big enough to read header fields */
1297 	if (msglen < sizeof(struct nvsp_message_header)) {
1298 		netif_err(net_device_ctx, rx_err, ndev,
1299 			  "invalid nvsp header, length too small: %u\n",
1300 			  msglen);
1301 		return 0;
1302 	}
1303 
1304 	/* Make sure this is a valid nvsp packet */
1305 	if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1306 		netif_err(net_device_ctx, rx_err, ndev,
1307 			  "Unknown nvsp packet type received %u\n",
1308 			  nvsp->hdr.msg_type);
1309 		return 0;
1310 	}
1311 
1312 	/* Validate xfer page pkt header */
1313 	if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
1314 		netif_err(net_device_ctx, rx_err, ndev,
1315 			  "Invalid xfer page pkt, offset too small: %u\n",
1316 			  desc->offset8 << 3);
1317 		return 0;
1318 	}
1319 
1320 	if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1321 		netif_err(net_device_ctx, rx_err, ndev,
1322 			  "Invalid xfer page set id - expecting %x got %x\n",
1323 			  NETVSC_RECEIVE_BUFFER_ID,
1324 			  vmxferpage_packet->xfer_pageset_id);
1325 		return 0;
1326 	}
1327 
1328 	count = vmxferpage_packet->range_cnt;
1329 
1330 	/* Check count for a valid value */
1331 	if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
1332 		netif_err(net_device_ctx, rx_err, ndev,
1333 			  "Range count is not valid: %d\n",
1334 			  count);
1335 		return 0;
1336 	}
1337 
1338 	/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1339 	for (i = 0; i < count; i++) {
1340 		u32 offset = vmxferpage_packet->ranges[i].byte_offset;
1341 		u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1342 		void *data;
1343 		int ret;
1344 
1345 		if (unlikely(offset > net_device->recv_buf_size ||
1346 			     buflen > net_device->recv_buf_size - offset)) {
1347 			nvchan->rsc.cnt = 0;
1348 			status = NVSP_STAT_FAIL;
1349 			netif_err(net_device_ctx, rx_err, ndev,
1350 				  "Packet offset:%u + len:%u too big\n",
1351 				  offset, buflen);
1352 
1353 			continue;
1354 		}
1355 
1356 		/* We're going to copy (sections of) the packet into nvchan->recv_buf;
1357 		 * make sure that nvchan->recv_buf is large enough to hold the packet.
1358 		 */
1359 		if (unlikely(buflen > net_device->recv_section_size)) {
1360 			nvchan->rsc.cnt = 0;
1361 			status = NVSP_STAT_FAIL;
1362 			netif_err(net_device_ctx, rx_err, ndev,
1363 				  "Packet too big: buflen=%u recv_section_size=%u\n",
1364 				  buflen, net_device->recv_section_size);
1365 
1366 			continue;
1367 		}
1368 
1369 		data = recv_buf + offset;
1370 
1371 		nvchan->rsc.is_last = (i == count - 1);
1372 
1373 		trace_rndis_recv(ndev, q_idx, data);
1374 
1375 		/* Pass it to the upper layer */
1376 		ret = rndis_filter_receive(ndev, net_device,
1377 					   nvchan, data, buflen);
1378 
1379 		if (unlikely(ret != NVSP_STAT_SUCCESS)) {
1380 			/* Drop incomplete packet */
1381 			nvchan->rsc.cnt = 0;
1382 			status = NVSP_STAT_FAIL;
1383 		}
1384 	}
1385 
1386 	enq_receive_complete(ndev, net_device, q_idx,
1387 			     vmxferpage_packet->d.trans_id, status);
1388 
1389 	return count;
1390 }
1391 
1392 static void netvsc_send_table(struct net_device *ndev,
1393 			      struct netvsc_device *nvscdev,
1394 			      const struct nvsp_message *nvmsg,
1395 			      u32 msglen)
1396 {
1397 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1398 	u32 count, offset, *tab;
1399 	int i;
1400 
1401 	/* Ensure packet is big enough to read send_table fields */
1402 	if (msglen < sizeof(struct nvsp_message_header) +
1403 		     sizeof(struct nvsp_5_send_indirect_table)) {
1404 		netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
1405 		return;
1406 	}
1407 
1408 	count = nvmsg->msg.v5_msg.send_table.count;
1409 	offset = nvmsg->msg.v5_msg.send_table.offset;
1410 
1411 	if (count != VRSS_SEND_TAB_SIZE) {
1412 		netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1413 		return;
1414 	}
1415 
1416 	/* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
1417 	 * wrong due to a host bug. So fix the offset here.
1418 	 */
1419 	if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
1420 	    msglen >= sizeof(struct nvsp_message_header) +
1421 	    sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
1422 		offset = sizeof(struct nvsp_message_header) +
1423 			 sizeof(union nvsp_6_message_uber);
1424 
1425 	/* Boundary check for all versions */
1426 	if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
1427 		netdev_err(ndev, "Received send-table offset too big:%u\n",
1428 			   offset);
1429 		return;
1430 	}
1431 
1432 	tab = (void *)nvmsg + offset;
1433 
1434 	for (i = 0; i < count; i++)
1435 		net_device_ctx->tx_table[i] = tab[i];
1436 }
1437 
1438 static void netvsc_send_vf(struct net_device *ndev,
1439 			   const struct nvsp_message *nvmsg,
1440 			   u32 msglen)
1441 {
1442 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1443 
1444 	/* Ensure packet is big enough to read its fields */
1445 	if (msglen < sizeof(struct nvsp_message_header) +
1446 		     sizeof(struct nvsp_4_send_vf_association)) {
1447 		netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
1448 		return;
1449 	}
1450 
1451 	net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1452 	net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1453 	netdev_info(ndev, "VF slot %u %s\n",
1454 		    net_device_ctx->vf_serial,
1455 		    net_device_ctx->vf_alloc ? "added" : "removed");
1456 }
1457 
1458 static void netvsc_receive_inband(struct net_device *ndev,
1459 				  struct netvsc_device *nvscdev,
1460 				  const struct vmpacket_descriptor *desc)
1461 {
1462 	const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1463 	u32 msglen = hv_pkt_datalen(desc);
1464 
1465 	/* Ensure packet is big enough to read header fields */
1466 	if (msglen < sizeof(struct nvsp_message_header)) {
1467 		netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
1468 		return;
1469 	}
1470 
1471 	switch (nvmsg->hdr.msg_type) {
1472 	case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1473 		netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
1474 		break;
1475 
1476 	case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1477 		if (hv_is_isolation_supported())
1478 			netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n");
1479 		else
1480 			netvsc_send_vf(ndev, nvmsg, msglen);
1481 		break;
1482 	}
1483 }
1484 
1485 static int netvsc_process_raw_pkt(struct hv_device *device,
1486 				  struct netvsc_channel *nvchan,
1487 				  struct netvsc_device *net_device,
1488 				  struct net_device *ndev,
1489 				  const struct vmpacket_descriptor *desc,
1490 				  int budget)
1491 {
1492 	struct vmbus_channel *channel = nvchan->channel;
1493 	const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1494 
1495 	trace_nvsp_recv(ndev, channel, nvmsg);
1496 
1497 	switch (desc->type) {
1498 	case VM_PKT_COMP:
1499 		netvsc_send_completion(ndev, net_device, channel, desc, budget);
1500 		break;
1501 
1502 	case VM_PKT_DATA_USING_XFER_PAGES:
1503 		return netvsc_receive(ndev, net_device, nvchan, desc);
1504 		break;
1505 
1506 	case VM_PKT_DATA_INBAND:
1507 		netvsc_receive_inband(ndev, net_device, desc);
1508 		break;
1509 
1510 	default:
1511 		netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1512 			   desc->type, desc->trans_id);
1513 		break;
1514 	}
1515 
1516 	return 0;
1517 }
1518 
1519 static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1520 {
1521 	struct vmbus_channel *primary = channel->primary_channel;
1522 
1523 	return primary ? primary->device_obj : channel->device_obj;
1524 }
1525 
1526 /* Network processing softirq
1527  * Process data in incoming ring buffer from host
1528  * Stops when ring is empty or budget is met or exceeded.
1529  */
1530 int netvsc_poll(struct napi_struct *napi, int budget)
1531 {
1532 	struct netvsc_channel *nvchan
1533 		= container_of(napi, struct netvsc_channel, napi);
1534 	struct netvsc_device *net_device = nvchan->net_device;
1535 	struct vmbus_channel *channel = nvchan->channel;
1536 	struct hv_device *device = netvsc_channel_to_device(channel);
1537 	struct net_device *ndev = hv_get_drvdata(device);
1538 	int work_done = 0;
1539 	int ret;
1540 
1541 	/* If starting a new interval */
1542 	if (!nvchan->desc)
1543 		nvchan->desc = hv_pkt_iter_first(channel);
1544 
1545 	while (nvchan->desc && work_done < budget) {
1546 		work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1547 						    ndev, nvchan->desc, budget);
1548 		nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1549 	}
1550 
1551 	/* Send any pending receive completions */
1552 	ret = send_recv_completions(ndev, net_device, nvchan);
1553 
1554 	/* If it did not exhaust NAPI budget this time
1555 	 *  and not doing busy poll
1556 	 * then re-enable host interrupts
1557 	 *  and reschedule if ring is not empty
1558 	 *   or sending receive completion failed.
1559 	 */
1560 	if (work_done < budget &&
1561 	    napi_complete_done(napi, work_done) &&
1562 	    (ret || hv_end_read(&channel->inbound)) &&
1563 	    napi_schedule_prep(napi)) {
1564 		hv_begin_read(&channel->inbound);
1565 		__napi_schedule(napi);
1566 	}
1567 
1568 	/* Driver may overshoot since multiple packets per descriptor */
1569 	return min(work_done, budget);
1570 }
1571 
1572 /* Call back when data is available in host ring buffer.
1573  * Processing is deferred until network softirq (NAPI)
1574  */
1575 void netvsc_channel_cb(void *context)
1576 {
1577 	struct netvsc_channel *nvchan = context;
1578 	struct vmbus_channel *channel = nvchan->channel;
1579 	struct hv_ring_buffer_info *rbi = &channel->inbound;
1580 
1581 	/* preload first vmpacket descriptor */
1582 	prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1583 
1584 	if (napi_schedule_prep(&nvchan->napi)) {
1585 		/* disable interrupts from host */
1586 		hv_begin_read(rbi);
1587 
1588 		__napi_schedule_irqoff(&nvchan->napi);
1589 	}
1590 }
1591 
1592 /*
1593  * netvsc_device_add - Callback when the device belonging to this
1594  * driver is added
1595  */
1596 struct netvsc_device *netvsc_device_add(struct hv_device *device,
1597 				const struct netvsc_device_info *device_info)
1598 {
1599 	int i, ret = 0;
1600 	struct netvsc_device *net_device;
1601 	struct net_device *ndev = hv_get_drvdata(device);
1602 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1603 
1604 	net_device = alloc_net_device();
1605 	if (!net_device)
1606 		return ERR_PTR(-ENOMEM);
1607 
1608 	for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1609 		net_device_ctx->tx_table[i] = 0;
1610 
1611 	/* Because the device uses NAPI, all the interrupt batching and
1612 	 * control is done via Net softirq, not the channel handling
1613 	 */
1614 	set_channel_read_mode(device->channel, HV_CALL_ISR);
1615 
1616 	/* If we're reopening the device we may have multiple queues, fill the
1617 	 * chn_table with the default channel to use it before subchannels are
1618 	 * opened.
1619 	 * Initialize the channel state before we open;
1620 	 * we can be interrupted as soon as we open the channel.
1621 	 */
1622 
1623 	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1624 		struct netvsc_channel *nvchan = &net_device->chan_table[i];
1625 
1626 		nvchan->channel = device->channel;
1627 		nvchan->net_device = net_device;
1628 		u64_stats_init(&nvchan->tx_stats.syncp);
1629 		u64_stats_init(&nvchan->rx_stats.syncp);
1630 
1631 		ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0);
1632 
1633 		if (ret) {
1634 			netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
1635 			goto cleanup2;
1636 		}
1637 
1638 		ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
1639 						 MEM_TYPE_PAGE_SHARED, NULL);
1640 
1641 		if (ret) {
1642 			netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
1643 			goto cleanup2;
1644 		}
1645 	}
1646 
1647 	/* Enable NAPI handler before init callbacks */
1648 	netif_napi_add(ndev, &net_device->chan_table[0].napi,
1649 		       netvsc_poll, NAPI_POLL_WEIGHT);
1650 
1651 	/* Open the channel */
1652 	device->channel->next_request_id_callback = vmbus_next_request_id;
1653 	device->channel->request_addr_callback = vmbus_request_addr;
1654 	device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
1655 	device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE;
1656 
1657 	ret = vmbus_open(device->channel, netvsc_ring_bytes,
1658 			 netvsc_ring_bytes,  NULL, 0,
1659 			 netvsc_channel_cb, net_device->chan_table);
1660 
1661 	if (ret != 0) {
1662 		netdev_err(ndev, "unable to open channel: %d\n", ret);
1663 		goto cleanup;
1664 	}
1665 
1666 	/* Channel is opened */
1667 	netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1668 
1669 	napi_enable(&net_device->chan_table[0].napi);
1670 
1671 	/* Connect with the NetVsp */
1672 	ret = netvsc_connect_vsp(device, net_device, device_info);
1673 	if (ret != 0) {
1674 		netdev_err(ndev,
1675 			"unable to connect to NetVSP - %d\n", ret);
1676 		goto close;
1677 	}
1678 
1679 	/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1680 	 * populated.
1681 	 */
1682 	rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1683 
1684 	return net_device;
1685 
1686 close:
1687 	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
1688 	napi_disable(&net_device->chan_table[0].napi);
1689 
1690 	/* Now, we can close the channel safely */
1691 	vmbus_close(device->channel);
1692 
1693 cleanup:
1694 	netif_napi_del(&net_device->chan_table[0].napi);
1695 
1696 cleanup2:
1697 	free_netvsc_device(&net_device->rcu);
1698 
1699 	return ERR_PTR(ret);
1700 }
1701