xref: /openbmc/linux/drivers/net/hyperv/netvsc.c (revision 51ad5b54)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/wait.h>
14 #include <linux/mm.h>
15 #include <linux/delay.h>
16 #include <linux/io.h>
17 #include <linux/slab.h>
18 #include <linux/netdevice.h>
19 #include <linux/if_ether.h>
20 #include <linux/vmalloc.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/prefetch.h>
23 
24 #include <asm/sync_bitops.h>
25 
26 #include "hyperv_net.h"
27 #include "netvsc_trace.h"
28 
29 /*
30  * Switch the data path from the synthetic interface to the VF
31  * interface.
32  */
33 void netvsc_switch_datapath(struct net_device *ndev, bool vf)
34 {
35 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
36 	struct hv_device *dev = net_device_ctx->device_ctx;
37 	struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
38 	struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
39 
40 	memset(init_pkt, 0, sizeof(struct nvsp_message));
41 	init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
42 	if (vf)
43 		init_pkt->msg.v4_msg.active_dp.active_datapath =
44 			NVSP_DATAPATH_VF;
45 	else
46 		init_pkt->msg.v4_msg.active_dp.active_datapath =
47 			NVSP_DATAPATH_SYNTHETIC;
48 
49 	trace_nvsp_send(ndev, init_pkt);
50 
51 	vmbus_sendpacket(dev->channel, init_pkt,
52 			       sizeof(struct nvsp_message),
53 			       (unsigned long)init_pkt,
54 			       VM_PKT_DATA_INBAND, 0);
55 }
56 
57 /* Worker to setup sub channels on initial setup
58  * Initial hotplug event occurs in softirq context
59  * and can't wait for channels.
60  */
61 static void netvsc_subchan_work(struct work_struct *w)
62 {
63 	struct netvsc_device *nvdev =
64 		container_of(w, struct netvsc_device, subchan_work);
65 	struct rndis_device *rdev;
66 	int i, ret;
67 
68 	/* Avoid deadlock with device removal already under RTNL */
69 	if (!rtnl_trylock()) {
70 		schedule_work(w);
71 		return;
72 	}
73 
74 	rdev = nvdev->extension;
75 	if (rdev) {
76 		ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
77 		if (ret == 0) {
78 			netif_device_attach(rdev->ndev);
79 		} else {
80 			/* fallback to only primary channel */
81 			for (i = 1; i < nvdev->num_chn; i++)
82 				netif_napi_del(&nvdev->chan_table[i].napi);
83 
84 			nvdev->max_chn = 1;
85 			nvdev->num_chn = 1;
86 		}
87 	}
88 
89 	rtnl_unlock();
90 }
91 
92 static struct netvsc_device *alloc_net_device(void)
93 {
94 	struct netvsc_device *net_device;
95 
96 	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
97 	if (!net_device)
98 		return NULL;
99 
100 	init_waitqueue_head(&net_device->wait_drain);
101 	net_device->destroy = false;
102 	net_device->tx_disable = true;
103 
104 	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
105 	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
106 
107 	init_completion(&net_device->channel_init_wait);
108 	init_waitqueue_head(&net_device->subchan_open);
109 	INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
110 
111 	return net_device;
112 }
113 
114 static void free_netvsc_device(struct rcu_head *head)
115 {
116 	struct netvsc_device *nvdev
117 		= container_of(head, struct netvsc_device, rcu);
118 	int i;
119 
120 	kfree(nvdev->extension);
121 	vfree(nvdev->recv_buf);
122 	vfree(nvdev->send_buf);
123 	kfree(nvdev->send_section_map);
124 
125 	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
126 		xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
127 		vfree(nvdev->chan_table[i].mrc.slots);
128 	}
129 
130 	kfree(nvdev);
131 }
132 
133 static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
134 {
135 	call_rcu(&nvdev->rcu, free_netvsc_device);
136 }
137 
138 static void netvsc_revoke_recv_buf(struct hv_device *device,
139 				   struct netvsc_device *net_device,
140 				   struct net_device *ndev)
141 {
142 	struct nvsp_message *revoke_packet;
143 	int ret;
144 
145 	/*
146 	 * If we got a section count, it means we received a
147 	 * SendReceiveBufferComplete msg (ie sent
148 	 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
149 	 * to send a revoke msg here
150 	 */
151 	if (net_device->recv_section_cnt) {
152 		/* Send the revoke receive buffer */
153 		revoke_packet = &net_device->revoke_packet;
154 		memset(revoke_packet, 0, sizeof(struct nvsp_message));
155 
156 		revoke_packet->hdr.msg_type =
157 			NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
158 		revoke_packet->msg.v1_msg.
159 		revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
160 
161 		trace_nvsp_send(ndev, revoke_packet);
162 
163 		ret = vmbus_sendpacket(device->channel,
164 				       revoke_packet,
165 				       sizeof(struct nvsp_message),
166 				       (unsigned long)revoke_packet,
167 				       VM_PKT_DATA_INBAND, 0);
168 		/* If the failure is because the channel is rescinded;
169 		 * ignore the failure since we cannot send on a rescinded
170 		 * channel. This would allow us to properly cleanup
171 		 * even when the channel is rescinded.
172 		 */
173 		if (device->channel->rescind)
174 			ret = 0;
175 		/*
176 		 * If we failed here, we might as well return and
177 		 * have a leak rather than continue and a bugchk
178 		 */
179 		if (ret != 0) {
180 			netdev_err(ndev, "unable to send "
181 				"revoke receive buffer to netvsp\n");
182 			return;
183 		}
184 		net_device->recv_section_cnt = 0;
185 	}
186 }
187 
188 static void netvsc_revoke_send_buf(struct hv_device *device,
189 				   struct netvsc_device *net_device,
190 				   struct net_device *ndev)
191 {
192 	struct nvsp_message *revoke_packet;
193 	int ret;
194 
195 	/* Deal with the send buffer we may have setup.
196 	 * If we got a  send section size, it means we received a
197 	 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
198 	 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
199 	 * to send a revoke msg here
200 	 */
201 	if (net_device->send_section_cnt) {
202 		/* Send the revoke receive buffer */
203 		revoke_packet = &net_device->revoke_packet;
204 		memset(revoke_packet, 0, sizeof(struct nvsp_message));
205 
206 		revoke_packet->hdr.msg_type =
207 			NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
208 		revoke_packet->msg.v1_msg.revoke_send_buf.id =
209 			NETVSC_SEND_BUFFER_ID;
210 
211 		trace_nvsp_send(ndev, revoke_packet);
212 
213 		ret = vmbus_sendpacket(device->channel,
214 				       revoke_packet,
215 				       sizeof(struct nvsp_message),
216 				       (unsigned long)revoke_packet,
217 				       VM_PKT_DATA_INBAND, 0);
218 
219 		/* If the failure is because the channel is rescinded;
220 		 * ignore the failure since we cannot send on a rescinded
221 		 * channel. This would allow us to properly cleanup
222 		 * even when the channel is rescinded.
223 		 */
224 		if (device->channel->rescind)
225 			ret = 0;
226 
227 		/* If we failed here, we might as well return and
228 		 * have a leak rather than continue and a bugchk
229 		 */
230 		if (ret != 0) {
231 			netdev_err(ndev, "unable to send "
232 				   "revoke send buffer to netvsp\n");
233 			return;
234 		}
235 		net_device->send_section_cnt = 0;
236 	}
237 }
238 
239 static void netvsc_teardown_recv_gpadl(struct hv_device *device,
240 				       struct netvsc_device *net_device,
241 				       struct net_device *ndev)
242 {
243 	int ret;
244 
245 	if (net_device->recv_buf_gpadl_handle) {
246 		ret = vmbus_teardown_gpadl(device->channel,
247 					   net_device->recv_buf_gpadl_handle);
248 
249 		/* If we failed here, we might as well return and have a leak
250 		 * rather than continue and a bugchk
251 		 */
252 		if (ret != 0) {
253 			netdev_err(ndev,
254 				   "unable to teardown receive buffer's gpadl\n");
255 			return;
256 		}
257 		net_device->recv_buf_gpadl_handle = 0;
258 	}
259 }
260 
261 static void netvsc_teardown_send_gpadl(struct hv_device *device,
262 				       struct netvsc_device *net_device,
263 				       struct net_device *ndev)
264 {
265 	int ret;
266 
267 	if (net_device->send_buf_gpadl_handle) {
268 		ret = vmbus_teardown_gpadl(device->channel,
269 					   net_device->send_buf_gpadl_handle);
270 
271 		/* If we failed here, we might as well return and have a leak
272 		 * rather than continue and a bugchk
273 		 */
274 		if (ret != 0) {
275 			netdev_err(ndev,
276 				   "unable to teardown send buffer's gpadl\n");
277 			return;
278 		}
279 		net_device->send_buf_gpadl_handle = 0;
280 	}
281 }
282 
283 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
284 {
285 	struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
286 	int node = cpu_to_node(nvchan->channel->target_cpu);
287 	size_t size;
288 
289 	size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
290 	nvchan->mrc.slots = vzalloc_node(size, node);
291 	if (!nvchan->mrc.slots)
292 		nvchan->mrc.slots = vzalloc(size);
293 
294 	return nvchan->mrc.slots ? 0 : -ENOMEM;
295 }
296 
297 static int netvsc_init_buf(struct hv_device *device,
298 			   struct netvsc_device *net_device,
299 			   const struct netvsc_device_info *device_info)
300 {
301 	struct nvsp_1_message_send_receive_buffer_complete *resp;
302 	struct net_device *ndev = hv_get_drvdata(device);
303 	struct nvsp_message *init_packet;
304 	unsigned int buf_size;
305 	size_t map_words;
306 	int ret = 0;
307 
308 	/* Get receive buffer area. */
309 	buf_size = device_info->recv_sections * device_info->recv_section_size;
310 	buf_size = roundup(buf_size, PAGE_SIZE);
311 
312 	/* Legacy hosts only allow smaller receive buffer */
313 	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
314 		buf_size = min_t(unsigned int, buf_size,
315 				 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
316 
317 	net_device->recv_buf = vzalloc(buf_size);
318 	if (!net_device->recv_buf) {
319 		netdev_err(ndev,
320 			   "unable to allocate receive buffer of size %u\n",
321 			   buf_size);
322 		ret = -ENOMEM;
323 		goto cleanup;
324 	}
325 
326 	net_device->recv_buf_size = buf_size;
327 
328 	/*
329 	 * Establish the gpadl handle for this buffer on this
330 	 * channel.  Note: This call uses the vmbus connection rather
331 	 * than the channel to establish the gpadl handle.
332 	 */
333 	ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
334 				    buf_size,
335 				    &net_device->recv_buf_gpadl_handle);
336 	if (ret != 0) {
337 		netdev_err(ndev,
338 			"unable to establish receive buffer's gpadl\n");
339 		goto cleanup;
340 	}
341 
342 	/* Notify the NetVsp of the gpadl handle */
343 	init_packet = &net_device->channel_init_pkt;
344 	memset(init_packet, 0, sizeof(struct nvsp_message));
345 	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
346 	init_packet->msg.v1_msg.send_recv_buf.
347 		gpadl_handle = net_device->recv_buf_gpadl_handle;
348 	init_packet->msg.v1_msg.
349 		send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
350 
351 	trace_nvsp_send(ndev, init_packet);
352 
353 	/* Send the gpadl notification request */
354 	ret = vmbus_sendpacket(device->channel, init_packet,
355 			       sizeof(struct nvsp_message),
356 			       (unsigned long)init_packet,
357 			       VM_PKT_DATA_INBAND,
358 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
359 	if (ret != 0) {
360 		netdev_err(ndev,
361 			"unable to send receive buffer's gpadl to netvsp\n");
362 		goto cleanup;
363 	}
364 
365 	wait_for_completion(&net_device->channel_init_wait);
366 
367 	/* Check the response */
368 	resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
369 	if (resp->status != NVSP_STAT_SUCCESS) {
370 		netdev_err(ndev,
371 			   "Unable to complete receive buffer initialization with NetVsp - status %d\n",
372 			   resp->status);
373 		ret = -EINVAL;
374 		goto cleanup;
375 	}
376 
377 	/* Parse the response */
378 	netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
379 		   resp->num_sections, resp->sections[0].sub_alloc_size,
380 		   resp->sections[0].num_sub_allocs);
381 
382 	/* There should only be one section for the entire receive buffer */
383 	if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
384 		ret = -EINVAL;
385 		goto cleanup;
386 	}
387 
388 	net_device->recv_section_size = resp->sections[0].sub_alloc_size;
389 	net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
390 
391 	/* Setup receive completion ring.
392 	 * Add 1 to the recv_section_cnt because at least one entry in a
393 	 * ring buffer has to be empty.
394 	 */
395 	net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
396 	ret = netvsc_alloc_recv_comp_ring(net_device, 0);
397 	if (ret)
398 		goto cleanup;
399 
400 	/* Now setup the send buffer. */
401 	buf_size = device_info->send_sections * device_info->send_section_size;
402 	buf_size = round_up(buf_size, PAGE_SIZE);
403 
404 	net_device->send_buf = vzalloc(buf_size);
405 	if (!net_device->send_buf) {
406 		netdev_err(ndev, "unable to allocate send buffer of size %u\n",
407 			   buf_size);
408 		ret = -ENOMEM;
409 		goto cleanup;
410 	}
411 
412 	/* Establish the gpadl handle for this buffer on this
413 	 * channel.  Note: This call uses the vmbus connection rather
414 	 * than the channel to establish the gpadl handle.
415 	 */
416 	ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
417 				    buf_size,
418 				    &net_device->send_buf_gpadl_handle);
419 	if (ret != 0) {
420 		netdev_err(ndev,
421 			   "unable to establish send buffer's gpadl\n");
422 		goto cleanup;
423 	}
424 
425 	/* Notify the NetVsp of the gpadl handle */
426 	init_packet = &net_device->channel_init_pkt;
427 	memset(init_packet, 0, sizeof(struct nvsp_message));
428 	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
429 	init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
430 		net_device->send_buf_gpadl_handle;
431 	init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
432 
433 	trace_nvsp_send(ndev, init_packet);
434 
435 	/* Send the gpadl notification request */
436 	ret = vmbus_sendpacket(device->channel, init_packet,
437 			       sizeof(struct nvsp_message),
438 			       (unsigned long)init_packet,
439 			       VM_PKT_DATA_INBAND,
440 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
441 	if (ret != 0) {
442 		netdev_err(ndev,
443 			   "unable to send send buffer's gpadl to netvsp\n");
444 		goto cleanup;
445 	}
446 
447 	wait_for_completion(&net_device->channel_init_wait);
448 
449 	/* Check the response */
450 	if (init_packet->msg.v1_msg.
451 	    send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
452 		netdev_err(ndev, "Unable to complete send buffer "
453 			   "initialization with NetVsp - status %d\n",
454 			   init_packet->msg.v1_msg.
455 			   send_send_buf_complete.status);
456 		ret = -EINVAL;
457 		goto cleanup;
458 	}
459 
460 	/* Parse the response */
461 	net_device->send_section_size = init_packet->msg.
462 				v1_msg.send_send_buf_complete.section_size;
463 
464 	/* Section count is simply the size divided by the section size. */
465 	net_device->send_section_cnt = buf_size / net_device->send_section_size;
466 
467 	netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
468 		   net_device->send_section_size, net_device->send_section_cnt);
469 
470 	/* Setup state for managing the send buffer. */
471 	map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
472 
473 	net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
474 	if (net_device->send_section_map == NULL) {
475 		ret = -ENOMEM;
476 		goto cleanup;
477 	}
478 
479 	goto exit;
480 
481 cleanup:
482 	netvsc_revoke_recv_buf(device, net_device, ndev);
483 	netvsc_revoke_send_buf(device, net_device, ndev);
484 	netvsc_teardown_recv_gpadl(device, net_device, ndev);
485 	netvsc_teardown_send_gpadl(device, net_device, ndev);
486 
487 exit:
488 	return ret;
489 }
490 
491 /* Negotiate NVSP protocol version */
492 static int negotiate_nvsp_ver(struct hv_device *device,
493 			      struct netvsc_device *net_device,
494 			      struct nvsp_message *init_packet,
495 			      u32 nvsp_ver)
496 {
497 	struct net_device *ndev = hv_get_drvdata(device);
498 	int ret;
499 
500 	memset(init_packet, 0, sizeof(struct nvsp_message));
501 	init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
502 	init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
503 	init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
504 	trace_nvsp_send(ndev, init_packet);
505 
506 	/* Send the init request */
507 	ret = vmbus_sendpacket(device->channel, init_packet,
508 			       sizeof(struct nvsp_message),
509 			       (unsigned long)init_packet,
510 			       VM_PKT_DATA_INBAND,
511 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
512 
513 	if (ret != 0)
514 		return ret;
515 
516 	wait_for_completion(&net_device->channel_init_wait);
517 
518 	if (init_packet->msg.init_msg.init_complete.status !=
519 	    NVSP_STAT_SUCCESS)
520 		return -EINVAL;
521 
522 	if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
523 		return 0;
524 
525 	/* NVSPv2 or later: Send NDIS config */
526 	memset(init_packet, 0, sizeof(struct nvsp_message));
527 	init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
528 	init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
529 	init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
530 
531 	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
532 		init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
533 
534 		/* Teaming bit is needed to receive link speed updates */
535 		init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
536 	}
537 
538 	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
539 		init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
540 
541 	trace_nvsp_send(ndev, init_packet);
542 
543 	ret = vmbus_sendpacket(device->channel, init_packet,
544 				sizeof(struct nvsp_message),
545 				(unsigned long)init_packet,
546 				VM_PKT_DATA_INBAND, 0);
547 
548 	return ret;
549 }
550 
551 static int netvsc_connect_vsp(struct hv_device *device,
552 			      struct netvsc_device *net_device,
553 			      const struct netvsc_device_info *device_info)
554 {
555 	struct net_device *ndev = hv_get_drvdata(device);
556 	static const u32 ver_list[] = {
557 		NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
558 		NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
559 		NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
560 	};
561 	struct nvsp_message *init_packet;
562 	int ndis_version, i, ret;
563 
564 	init_packet = &net_device->channel_init_pkt;
565 
566 	/* Negotiate the latest NVSP protocol supported */
567 	for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
568 		if (negotiate_nvsp_ver(device, net_device, init_packet,
569 				       ver_list[i])  == 0) {
570 			net_device->nvsp_version = ver_list[i];
571 			break;
572 		}
573 
574 	if (i < 0) {
575 		ret = -EPROTO;
576 		goto cleanup;
577 	}
578 
579 	pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
580 
581 	/* Send the ndis version */
582 	memset(init_packet, 0, sizeof(struct nvsp_message));
583 
584 	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
585 		ndis_version = 0x00060001;
586 	else
587 		ndis_version = 0x0006001e;
588 
589 	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
590 	init_packet->msg.v1_msg.
591 		send_ndis_ver.ndis_major_ver =
592 				(ndis_version & 0xFFFF0000) >> 16;
593 	init_packet->msg.v1_msg.
594 		send_ndis_ver.ndis_minor_ver =
595 				ndis_version & 0xFFFF;
596 
597 	trace_nvsp_send(ndev, init_packet);
598 
599 	/* Send the init request */
600 	ret = vmbus_sendpacket(device->channel, init_packet,
601 				sizeof(struct nvsp_message),
602 				(unsigned long)init_packet,
603 				VM_PKT_DATA_INBAND, 0);
604 	if (ret != 0)
605 		goto cleanup;
606 
607 
608 	ret = netvsc_init_buf(device, net_device, device_info);
609 
610 cleanup:
611 	return ret;
612 }
613 
614 /*
615  * netvsc_device_remove - Callback when the root bus device is removed
616  */
617 void netvsc_device_remove(struct hv_device *device)
618 {
619 	struct net_device *ndev = hv_get_drvdata(device);
620 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
621 	struct netvsc_device *net_device
622 		= rtnl_dereference(net_device_ctx->nvdev);
623 	int i;
624 
625 	/*
626 	 * Revoke receive buffer. If host is pre-Win2016 then tear down
627 	 * receive buffer GPADL. Do the same for send buffer.
628 	 */
629 	netvsc_revoke_recv_buf(device, net_device, ndev);
630 	if (vmbus_proto_version < VERSION_WIN10)
631 		netvsc_teardown_recv_gpadl(device, net_device, ndev);
632 
633 	netvsc_revoke_send_buf(device, net_device, ndev);
634 	if (vmbus_proto_version < VERSION_WIN10)
635 		netvsc_teardown_send_gpadl(device, net_device, ndev);
636 
637 	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
638 
639 	/* Disable NAPI and disassociate its context from the device. */
640 	for (i = 0; i < net_device->num_chn; i++) {
641 		/* See also vmbus_reset_channel_cb(). */
642 		napi_disable(&net_device->chan_table[i].napi);
643 		netif_napi_del(&net_device->chan_table[i].napi);
644 	}
645 
646 	/*
647 	 * At this point, no one should be accessing net_device
648 	 * except in here
649 	 */
650 	netdev_dbg(ndev, "net device safe to remove\n");
651 
652 	/* Now, we can close the channel safely */
653 	vmbus_close(device->channel);
654 
655 	/*
656 	 * If host is Win2016 or higher then we do the GPADL tear down
657 	 * here after VMBus is closed.
658 	*/
659 	if (vmbus_proto_version >= VERSION_WIN10) {
660 		netvsc_teardown_recv_gpadl(device, net_device, ndev);
661 		netvsc_teardown_send_gpadl(device, net_device, ndev);
662 	}
663 
664 	/* Release all resources */
665 	free_netvsc_device_rcu(net_device);
666 }
667 
668 #define RING_AVAIL_PERCENT_HIWATER 20
669 #define RING_AVAIL_PERCENT_LOWATER 10
670 
671 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
672 					 u32 index)
673 {
674 	sync_change_bit(index, net_device->send_section_map);
675 }
676 
677 static void netvsc_send_tx_complete(struct net_device *ndev,
678 				    struct netvsc_device *net_device,
679 				    struct vmbus_channel *channel,
680 				    const struct vmpacket_descriptor *desc,
681 				    int budget)
682 {
683 	struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
684 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
685 	u16 q_idx = 0;
686 	int queue_sends;
687 
688 	/* Notify the layer above us */
689 	if (likely(skb)) {
690 		const struct hv_netvsc_packet *packet
691 			= (struct hv_netvsc_packet *)skb->cb;
692 		u32 send_index = packet->send_buf_index;
693 		struct netvsc_stats *tx_stats;
694 
695 		if (send_index != NETVSC_INVALID_INDEX)
696 			netvsc_free_send_slot(net_device, send_index);
697 		q_idx = packet->q_idx;
698 
699 		tx_stats = &net_device->chan_table[q_idx].tx_stats;
700 
701 		u64_stats_update_begin(&tx_stats->syncp);
702 		tx_stats->packets += packet->total_packets;
703 		tx_stats->bytes += packet->total_bytes;
704 		u64_stats_update_end(&tx_stats->syncp);
705 
706 		napi_consume_skb(skb, budget);
707 	}
708 
709 	queue_sends =
710 		atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
711 
712 	if (unlikely(net_device->destroy)) {
713 		if (queue_sends == 0)
714 			wake_up(&net_device->wait_drain);
715 	} else {
716 		struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
717 
718 		if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
719 		    (hv_get_avail_to_write_percent(&channel->outbound) >
720 		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
721 			netif_tx_wake_queue(txq);
722 			ndev_ctx->eth_stats.wake_queue++;
723 		}
724 	}
725 }
726 
727 static void netvsc_send_completion(struct net_device *ndev,
728 				   struct netvsc_device *net_device,
729 				   struct vmbus_channel *incoming_channel,
730 				   const struct vmpacket_descriptor *desc,
731 				   int budget)
732 {
733 	const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
734 
735 	switch (nvsp_packet->hdr.msg_type) {
736 	case NVSP_MSG_TYPE_INIT_COMPLETE:
737 	case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
738 	case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
739 	case NVSP_MSG5_TYPE_SUBCHANNEL:
740 		/* Copy the response back */
741 		memcpy(&net_device->channel_init_pkt, nvsp_packet,
742 		       sizeof(struct nvsp_message));
743 		complete(&net_device->channel_init_wait);
744 		break;
745 
746 	case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
747 		netvsc_send_tx_complete(ndev, net_device, incoming_channel,
748 					desc, budget);
749 		break;
750 
751 	default:
752 		netdev_err(ndev,
753 			   "Unknown send completion type %d received!!\n",
754 			   nvsp_packet->hdr.msg_type);
755 	}
756 }
757 
758 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
759 {
760 	unsigned long *map_addr = net_device->send_section_map;
761 	unsigned int i;
762 
763 	for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
764 		if (sync_test_and_set_bit(i, map_addr) == 0)
765 			return i;
766 	}
767 
768 	return NETVSC_INVALID_INDEX;
769 }
770 
771 static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
772 				    unsigned int section_index,
773 				    u32 pend_size,
774 				    struct hv_netvsc_packet *packet,
775 				    struct rndis_message *rndis_msg,
776 				    struct hv_page_buffer *pb,
777 				    bool xmit_more)
778 {
779 	char *start = net_device->send_buf;
780 	char *dest = start + (section_index * net_device->send_section_size)
781 		     + pend_size;
782 	int i;
783 	u32 padding = 0;
784 	u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
785 		packet->page_buf_cnt;
786 	u32 remain;
787 
788 	/* Add padding */
789 	remain = packet->total_data_buflen & (net_device->pkt_align - 1);
790 	if (xmit_more && remain) {
791 		padding = net_device->pkt_align - remain;
792 		rndis_msg->msg_len += padding;
793 		packet->total_data_buflen += padding;
794 	}
795 
796 	for (i = 0; i < page_count; i++) {
797 		char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT);
798 		u32 offset = pb[i].offset;
799 		u32 len = pb[i].len;
800 
801 		memcpy(dest, (src + offset), len);
802 		dest += len;
803 	}
804 
805 	if (padding)
806 		memset(dest, 0, padding);
807 }
808 
809 static inline int netvsc_send_pkt(
810 	struct hv_device *device,
811 	struct hv_netvsc_packet *packet,
812 	struct netvsc_device *net_device,
813 	struct hv_page_buffer *pb,
814 	struct sk_buff *skb)
815 {
816 	struct nvsp_message nvmsg;
817 	struct nvsp_1_message_send_rndis_packet *rpkt =
818 		&nvmsg.msg.v1_msg.send_rndis_pkt;
819 	struct netvsc_channel * const nvchan =
820 		&net_device->chan_table[packet->q_idx];
821 	struct vmbus_channel *out_channel = nvchan->channel;
822 	struct net_device *ndev = hv_get_drvdata(device);
823 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
824 	struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
825 	u64 req_id;
826 	int ret;
827 	u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
828 
829 	nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
830 	if (skb)
831 		rpkt->channel_type = 0;		/* 0 is RMC_DATA */
832 	else
833 		rpkt->channel_type = 1;		/* 1 is RMC_CONTROL */
834 
835 	rpkt->send_buf_section_index = packet->send_buf_index;
836 	if (packet->send_buf_index == NETVSC_INVALID_INDEX)
837 		rpkt->send_buf_section_size = 0;
838 	else
839 		rpkt->send_buf_section_size = packet->total_data_buflen;
840 
841 	req_id = (ulong)skb;
842 
843 	if (out_channel->rescind)
844 		return -ENODEV;
845 
846 	trace_nvsp_send_pkt(ndev, out_channel, rpkt);
847 
848 	if (packet->page_buf_cnt) {
849 		if (packet->cp_partial)
850 			pb += packet->rmsg_pgcnt;
851 
852 		ret = vmbus_sendpacket_pagebuffer(out_channel,
853 						  pb, packet->page_buf_cnt,
854 						  &nvmsg, sizeof(nvmsg),
855 						  req_id);
856 	} else {
857 		ret = vmbus_sendpacket(out_channel,
858 				       &nvmsg, sizeof(nvmsg),
859 				       req_id, VM_PKT_DATA_INBAND,
860 				       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
861 	}
862 
863 	if (ret == 0) {
864 		atomic_inc_return(&nvchan->queue_sends);
865 
866 		if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
867 			netif_tx_stop_queue(txq);
868 			ndev_ctx->eth_stats.stop_queue++;
869 		}
870 	} else if (ret == -EAGAIN) {
871 		netif_tx_stop_queue(txq);
872 		ndev_ctx->eth_stats.stop_queue++;
873 	} else {
874 		netdev_err(ndev,
875 			   "Unable to send packet pages %u len %u, ret %d\n",
876 			   packet->page_buf_cnt, packet->total_data_buflen,
877 			   ret);
878 	}
879 
880 	if (netif_tx_queue_stopped(txq) &&
881 	    atomic_read(&nvchan->queue_sends) < 1 &&
882 	    !net_device->tx_disable) {
883 		netif_tx_wake_queue(txq);
884 		ndev_ctx->eth_stats.wake_queue++;
885 		if (ret == -EAGAIN)
886 			ret = -ENOSPC;
887 	}
888 
889 	return ret;
890 }
891 
892 /* Move packet out of multi send data (msd), and clear msd */
893 static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
894 				struct sk_buff **msd_skb,
895 				struct multi_send_data *msdp)
896 {
897 	*msd_skb = msdp->skb;
898 	*msd_send = msdp->pkt;
899 	msdp->skb = NULL;
900 	msdp->pkt = NULL;
901 	msdp->count = 0;
902 }
903 
904 /* RCU already held by caller */
905 int netvsc_send(struct net_device *ndev,
906 		struct hv_netvsc_packet *packet,
907 		struct rndis_message *rndis_msg,
908 		struct hv_page_buffer *pb,
909 		struct sk_buff *skb,
910 		bool xdp_tx)
911 {
912 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
913 	struct netvsc_device *net_device
914 		= rcu_dereference_bh(ndev_ctx->nvdev);
915 	struct hv_device *device = ndev_ctx->device_ctx;
916 	int ret = 0;
917 	struct netvsc_channel *nvchan;
918 	u32 pktlen = packet->total_data_buflen, msd_len = 0;
919 	unsigned int section_index = NETVSC_INVALID_INDEX;
920 	struct multi_send_data *msdp;
921 	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
922 	struct sk_buff *msd_skb = NULL;
923 	bool try_batch, xmit_more;
924 
925 	/* If device is rescinded, return error and packet will get dropped. */
926 	if (unlikely(!net_device || net_device->destroy))
927 		return -ENODEV;
928 
929 	nvchan = &net_device->chan_table[packet->q_idx];
930 	packet->send_buf_index = NETVSC_INVALID_INDEX;
931 	packet->cp_partial = false;
932 
933 	/* Send a control message or XDP packet directly without accessing
934 	 * msd (Multi-Send Data) field which may be changed during data packet
935 	 * processing.
936 	 */
937 	if (!skb || xdp_tx)
938 		return netvsc_send_pkt(device, packet, net_device, pb, skb);
939 
940 	/* batch packets in send buffer if possible */
941 	msdp = &nvchan->msd;
942 	if (msdp->pkt)
943 		msd_len = msdp->pkt->total_data_buflen;
944 
945 	try_batch =  msd_len > 0 && msdp->count < net_device->max_pkt;
946 	if (try_batch && msd_len + pktlen + net_device->pkt_align <
947 	    net_device->send_section_size) {
948 		section_index = msdp->pkt->send_buf_index;
949 
950 	} else if (try_batch && msd_len + packet->rmsg_size <
951 		   net_device->send_section_size) {
952 		section_index = msdp->pkt->send_buf_index;
953 		packet->cp_partial = true;
954 
955 	} else if (pktlen + net_device->pkt_align <
956 		   net_device->send_section_size) {
957 		section_index = netvsc_get_next_send_section(net_device);
958 		if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
959 			++ndev_ctx->eth_stats.tx_send_full;
960 		} else {
961 			move_pkt_msd(&msd_send, &msd_skb, msdp);
962 			msd_len = 0;
963 		}
964 	}
965 
966 	/* Keep aggregating only if stack says more data is coming
967 	 * and not doing mixed modes send and not flow blocked
968 	 */
969 	xmit_more = netdev_xmit_more() &&
970 		!packet->cp_partial &&
971 		!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
972 
973 	if (section_index != NETVSC_INVALID_INDEX) {
974 		netvsc_copy_to_send_buf(net_device,
975 					section_index, msd_len,
976 					packet, rndis_msg, pb, xmit_more);
977 
978 		packet->send_buf_index = section_index;
979 
980 		if (packet->cp_partial) {
981 			packet->page_buf_cnt -= packet->rmsg_pgcnt;
982 			packet->total_data_buflen = msd_len + packet->rmsg_size;
983 		} else {
984 			packet->page_buf_cnt = 0;
985 			packet->total_data_buflen += msd_len;
986 		}
987 
988 		if (msdp->pkt) {
989 			packet->total_packets += msdp->pkt->total_packets;
990 			packet->total_bytes += msdp->pkt->total_bytes;
991 		}
992 
993 		if (msdp->skb)
994 			dev_consume_skb_any(msdp->skb);
995 
996 		if (xmit_more) {
997 			msdp->skb = skb;
998 			msdp->pkt = packet;
999 			msdp->count++;
1000 		} else {
1001 			cur_send = packet;
1002 			msdp->skb = NULL;
1003 			msdp->pkt = NULL;
1004 			msdp->count = 0;
1005 		}
1006 	} else {
1007 		move_pkt_msd(&msd_send, &msd_skb, msdp);
1008 		cur_send = packet;
1009 	}
1010 
1011 	if (msd_send) {
1012 		int m_ret = netvsc_send_pkt(device, msd_send, net_device,
1013 					    NULL, msd_skb);
1014 
1015 		if (m_ret != 0) {
1016 			netvsc_free_send_slot(net_device,
1017 					      msd_send->send_buf_index);
1018 			dev_kfree_skb_any(msd_skb);
1019 		}
1020 	}
1021 
1022 	if (cur_send)
1023 		ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1024 
1025 	if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1026 		netvsc_free_send_slot(net_device, section_index);
1027 
1028 	return ret;
1029 }
1030 
1031 /* Send pending recv completions */
1032 static int send_recv_completions(struct net_device *ndev,
1033 				 struct netvsc_device *nvdev,
1034 				 struct netvsc_channel *nvchan)
1035 {
1036 	struct multi_recv_comp *mrc = &nvchan->mrc;
1037 	struct recv_comp_msg {
1038 		struct nvsp_message_header hdr;
1039 		u32 status;
1040 	}  __packed;
1041 	struct recv_comp_msg msg = {
1042 		.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
1043 	};
1044 	int ret;
1045 
1046 	while (mrc->first != mrc->next) {
1047 		const struct recv_comp_data *rcd
1048 			= mrc->slots + mrc->first;
1049 
1050 		msg.status = rcd->status;
1051 		ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
1052 				       rcd->tid, VM_PKT_COMP, 0);
1053 		if (unlikely(ret)) {
1054 			struct net_device_context *ndev_ctx = netdev_priv(ndev);
1055 
1056 			++ndev_ctx->eth_stats.rx_comp_busy;
1057 			return ret;
1058 		}
1059 
1060 		if (++mrc->first == nvdev->recv_completion_cnt)
1061 			mrc->first = 0;
1062 	}
1063 
1064 	/* receive completion ring has been emptied */
1065 	if (unlikely(nvdev->destroy))
1066 		wake_up(&nvdev->wait_drain);
1067 
1068 	return 0;
1069 }
1070 
1071 /* Count how many receive completions are outstanding */
1072 static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
1073 				 const struct multi_recv_comp *mrc,
1074 				 u32 *filled, u32 *avail)
1075 {
1076 	u32 count = nvdev->recv_completion_cnt;
1077 
1078 	if (mrc->next >= mrc->first)
1079 		*filled = mrc->next - mrc->first;
1080 	else
1081 		*filled = (count - mrc->first) + mrc->next;
1082 
1083 	*avail = count - *filled - 1;
1084 }
1085 
1086 /* Add receive complete to ring to send to host. */
1087 static void enq_receive_complete(struct net_device *ndev,
1088 				 struct netvsc_device *nvdev, u16 q_idx,
1089 				 u64 tid, u32 status)
1090 {
1091 	struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1092 	struct multi_recv_comp *mrc = &nvchan->mrc;
1093 	struct recv_comp_data *rcd;
1094 	u32 filled, avail;
1095 
1096 	recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1097 
1098 	if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1099 		send_recv_completions(ndev, nvdev, nvchan);
1100 		recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1101 	}
1102 
1103 	if (unlikely(!avail)) {
1104 		netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1105 			   q_idx, tid);
1106 		return;
1107 	}
1108 
1109 	rcd = mrc->slots + mrc->next;
1110 	rcd->tid = tid;
1111 	rcd->status = status;
1112 
1113 	if (++mrc->next == nvdev->recv_completion_cnt)
1114 		mrc->next = 0;
1115 }
1116 
1117 static int netvsc_receive(struct net_device *ndev,
1118 			  struct netvsc_device *net_device,
1119 			  struct netvsc_channel *nvchan,
1120 			  const struct vmpacket_descriptor *desc,
1121 			  const struct nvsp_message *nvsp)
1122 {
1123 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1124 	struct vmbus_channel *channel = nvchan->channel;
1125 	const struct vmtransfer_page_packet_header *vmxferpage_packet
1126 		= container_of(desc, const struct vmtransfer_page_packet_header, d);
1127 	u16 q_idx = channel->offermsg.offer.sub_channel_index;
1128 	char *recv_buf = net_device->recv_buf;
1129 	u32 status = NVSP_STAT_SUCCESS;
1130 	int i;
1131 	int count = 0;
1132 
1133 	/* Make sure this is a valid nvsp packet */
1134 	if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1135 		netif_err(net_device_ctx, rx_err, ndev,
1136 			  "Unknown nvsp packet type received %u\n",
1137 			  nvsp->hdr.msg_type);
1138 		return 0;
1139 	}
1140 
1141 	if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1142 		netif_err(net_device_ctx, rx_err, ndev,
1143 			  "Invalid xfer page set id - expecting %x got %x\n",
1144 			  NETVSC_RECEIVE_BUFFER_ID,
1145 			  vmxferpage_packet->xfer_pageset_id);
1146 		return 0;
1147 	}
1148 
1149 	count = vmxferpage_packet->range_cnt;
1150 
1151 	/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1152 	for (i = 0; i < count; i++) {
1153 		u32 offset = vmxferpage_packet->ranges[i].byte_offset;
1154 		u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1155 		void *data;
1156 		int ret;
1157 
1158 		if (unlikely(offset + buflen > net_device->recv_buf_size)) {
1159 			nvchan->rsc.cnt = 0;
1160 			status = NVSP_STAT_FAIL;
1161 			netif_err(net_device_ctx, rx_err, ndev,
1162 				  "Packet offset:%u + len:%u too big\n",
1163 				  offset, buflen);
1164 
1165 			continue;
1166 		}
1167 
1168 		data = recv_buf + offset;
1169 
1170 		nvchan->rsc.is_last = (i == count - 1);
1171 
1172 		trace_rndis_recv(ndev, q_idx, data);
1173 
1174 		/* Pass it to the upper layer */
1175 		ret = rndis_filter_receive(ndev, net_device,
1176 					   nvchan, data, buflen);
1177 
1178 		if (unlikely(ret != NVSP_STAT_SUCCESS))
1179 			status = NVSP_STAT_FAIL;
1180 	}
1181 
1182 	enq_receive_complete(ndev, net_device, q_idx,
1183 			     vmxferpage_packet->d.trans_id, status);
1184 
1185 	return count;
1186 }
1187 
1188 static void netvsc_send_table(struct net_device *ndev,
1189 			      struct netvsc_device *nvscdev,
1190 			      const struct nvsp_message *nvmsg,
1191 			      u32 msglen)
1192 {
1193 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1194 	u32 count, offset, *tab;
1195 	int i;
1196 
1197 	count = nvmsg->msg.v5_msg.send_table.count;
1198 	offset = nvmsg->msg.v5_msg.send_table.offset;
1199 
1200 	if (count != VRSS_SEND_TAB_SIZE) {
1201 		netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1202 		return;
1203 	}
1204 
1205 	/* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
1206 	 * wrong due to a host bug. So fix the offset here.
1207 	 */
1208 	if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
1209 	    msglen >= sizeof(struct nvsp_message_header) +
1210 	    sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
1211 		offset = sizeof(struct nvsp_message_header) +
1212 			 sizeof(union nvsp_6_message_uber);
1213 
1214 	/* Boundary check for all versions */
1215 	if (offset > msglen - count * sizeof(u32)) {
1216 		netdev_err(ndev, "Received send-table offset too big:%u\n",
1217 			   offset);
1218 		return;
1219 	}
1220 
1221 	tab = (void *)nvmsg + offset;
1222 
1223 	for (i = 0; i < count; i++)
1224 		net_device_ctx->tx_table[i] = tab[i];
1225 }
1226 
1227 static void netvsc_send_vf(struct net_device *ndev,
1228 			   const struct nvsp_message *nvmsg)
1229 {
1230 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1231 
1232 	net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1233 	net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1234 	netdev_info(ndev, "VF slot %u %s\n",
1235 		    net_device_ctx->vf_serial,
1236 		    net_device_ctx->vf_alloc ? "added" : "removed");
1237 }
1238 
1239 static void netvsc_receive_inband(struct net_device *ndev,
1240 				  struct netvsc_device *nvscdev,
1241 				  const struct nvsp_message *nvmsg,
1242 				  u32 msglen)
1243 {
1244 	switch (nvmsg->hdr.msg_type) {
1245 	case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1246 		netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
1247 		break;
1248 
1249 	case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1250 		netvsc_send_vf(ndev, nvmsg);
1251 		break;
1252 	}
1253 }
1254 
1255 static int netvsc_process_raw_pkt(struct hv_device *device,
1256 				  struct netvsc_channel *nvchan,
1257 				  struct netvsc_device *net_device,
1258 				  struct net_device *ndev,
1259 				  const struct vmpacket_descriptor *desc,
1260 				  int budget)
1261 {
1262 	struct vmbus_channel *channel = nvchan->channel;
1263 	const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1264 	u32 msglen = hv_pkt_datalen(desc);
1265 
1266 	trace_nvsp_recv(ndev, channel, nvmsg);
1267 
1268 	switch (desc->type) {
1269 	case VM_PKT_COMP:
1270 		netvsc_send_completion(ndev, net_device, channel,
1271 				       desc, budget);
1272 		break;
1273 
1274 	case VM_PKT_DATA_USING_XFER_PAGES:
1275 		return netvsc_receive(ndev, net_device, nvchan,
1276 				      desc, nvmsg);
1277 		break;
1278 
1279 	case VM_PKT_DATA_INBAND:
1280 		netvsc_receive_inband(ndev, net_device, nvmsg, msglen);
1281 		break;
1282 
1283 	default:
1284 		netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1285 			   desc->type, desc->trans_id);
1286 		break;
1287 	}
1288 
1289 	return 0;
1290 }
1291 
1292 static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1293 {
1294 	struct vmbus_channel *primary = channel->primary_channel;
1295 
1296 	return primary ? primary->device_obj : channel->device_obj;
1297 }
1298 
1299 /* Network processing softirq
1300  * Process data in incoming ring buffer from host
1301  * Stops when ring is empty or budget is met or exceeded.
1302  */
1303 int netvsc_poll(struct napi_struct *napi, int budget)
1304 {
1305 	struct netvsc_channel *nvchan
1306 		= container_of(napi, struct netvsc_channel, napi);
1307 	struct netvsc_device *net_device = nvchan->net_device;
1308 	struct vmbus_channel *channel = nvchan->channel;
1309 	struct hv_device *device = netvsc_channel_to_device(channel);
1310 	struct net_device *ndev = hv_get_drvdata(device);
1311 	int work_done = 0;
1312 	int ret;
1313 
1314 	/* If starting a new interval */
1315 	if (!nvchan->desc)
1316 		nvchan->desc = hv_pkt_iter_first(channel);
1317 
1318 	while (nvchan->desc && work_done < budget) {
1319 		work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1320 						    ndev, nvchan->desc, budget);
1321 		nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1322 	}
1323 
1324 	/* Send any pending receive completions */
1325 	ret = send_recv_completions(ndev, net_device, nvchan);
1326 
1327 	/* If it did not exhaust NAPI budget this time
1328 	 *  and not doing busy poll
1329 	 * then re-enable host interrupts
1330 	 *  and reschedule if ring is not empty
1331 	 *   or sending receive completion failed.
1332 	 */
1333 	if (work_done < budget &&
1334 	    napi_complete_done(napi, work_done) &&
1335 	    (ret || hv_end_read(&channel->inbound)) &&
1336 	    napi_schedule_prep(napi)) {
1337 		hv_begin_read(&channel->inbound);
1338 		__napi_schedule(napi);
1339 	}
1340 
1341 	/* Driver may overshoot since multiple packets per descriptor */
1342 	return min(work_done, budget);
1343 }
1344 
1345 /* Call back when data is available in host ring buffer.
1346  * Processing is deferred until network softirq (NAPI)
1347  */
1348 void netvsc_channel_cb(void *context)
1349 {
1350 	struct netvsc_channel *nvchan = context;
1351 	struct vmbus_channel *channel = nvchan->channel;
1352 	struct hv_ring_buffer_info *rbi = &channel->inbound;
1353 
1354 	/* preload first vmpacket descriptor */
1355 	prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1356 
1357 	if (napi_schedule_prep(&nvchan->napi)) {
1358 		/* disable interrupts from host */
1359 		hv_begin_read(rbi);
1360 
1361 		__napi_schedule_irqoff(&nvchan->napi);
1362 	}
1363 }
1364 
1365 /*
1366  * netvsc_device_add - Callback when the device belonging to this
1367  * driver is added
1368  */
1369 struct netvsc_device *netvsc_device_add(struct hv_device *device,
1370 				const struct netvsc_device_info *device_info)
1371 {
1372 	int i, ret = 0;
1373 	struct netvsc_device *net_device;
1374 	struct net_device *ndev = hv_get_drvdata(device);
1375 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1376 
1377 	net_device = alloc_net_device();
1378 	if (!net_device)
1379 		return ERR_PTR(-ENOMEM);
1380 
1381 	for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1382 		net_device_ctx->tx_table[i] = 0;
1383 
1384 	/* Because the device uses NAPI, all the interrupt batching and
1385 	 * control is done via Net softirq, not the channel handling
1386 	 */
1387 	set_channel_read_mode(device->channel, HV_CALL_ISR);
1388 
1389 	/* If we're reopening the device we may have multiple queues, fill the
1390 	 * chn_table with the default channel to use it before subchannels are
1391 	 * opened.
1392 	 * Initialize the channel state before we open;
1393 	 * we can be interrupted as soon as we open the channel.
1394 	 */
1395 
1396 	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1397 		struct netvsc_channel *nvchan = &net_device->chan_table[i];
1398 
1399 		nvchan->channel = device->channel;
1400 		nvchan->net_device = net_device;
1401 		u64_stats_init(&nvchan->tx_stats.syncp);
1402 		u64_stats_init(&nvchan->rx_stats.syncp);
1403 
1404 		ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i);
1405 
1406 		if (ret) {
1407 			netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
1408 			goto cleanup2;
1409 		}
1410 
1411 		ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
1412 						 MEM_TYPE_PAGE_SHARED, NULL);
1413 
1414 		if (ret) {
1415 			netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
1416 			goto cleanup2;
1417 		}
1418 	}
1419 
1420 	/* Enable NAPI handler before init callbacks */
1421 	netif_napi_add(ndev, &net_device->chan_table[0].napi,
1422 		       netvsc_poll, NAPI_POLL_WEIGHT);
1423 
1424 	/* Open the channel */
1425 	ret = vmbus_open(device->channel, netvsc_ring_bytes,
1426 			 netvsc_ring_bytes,  NULL, 0,
1427 			 netvsc_channel_cb, net_device->chan_table);
1428 
1429 	if (ret != 0) {
1430 		netdev_err(ndev, "unable to open channel: %d\n", ret);
1431 		goto cleanup;
1432 	}
1433 
1434 	/* Channel is opened */
1435 	netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1436 
1437 	napi_enable(&net_device->chan_table[0].napi);
1438 
1439 	/* Connect with the NetVsp */
1440 	ret = netvsc_connect_vsp(device, net_device, device_info);
1441 	if (ret != 0) {
1442 		netdev_err(ndev,
1443 			"unable to connect to NetVSP - %d\n", ret);
1444 		goto close;
1445 	}
1446 
1447 	/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1448 	 * populated.
1449 	 */
1450 	rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1451 
1452 	return net_device;
1453 
1454 close:
1455 	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
1456 	napi_disable(&net_device->chan_table[0].napi);
1457 
1458 	/* Now, we can close the channel safely */
1459 	vmbus_close(device->channel);
1460 
1461 cleanup:
1462 	netif_napi_del(&net_device->chan_table[0].napi);
1463 
1464 cleanup2:
1465 	free_netvsc_device(&net_device->rcu);
1466 
1467 	return ERR_PTR(ret);
1468 }
1469