xref: /openbmc/linux/drivers/net/hyperv/netvsc_drv.c (revision 15d90a6a)
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, see <http://www.gnu.org/licenses/>.
15  *
16  * Authors:
17  *   Haiyang Zhang <haiyangz@microsoft.com>
18  *   Hank Janssen  <hjanssen@microsoft.com>
19  */
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
27 #include <linux/io.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/pci.h>
33 #include <linux/skbuff.h>
34 #include <linux/if_vlan.h>
35 #include <linux/in.h>
36 #include <linux/slab.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/netpoll.h>
39 
40 #include <net/arp.h>
41 #include <net/route.h>
42 #include <net/sock.h>
43 #include <net/pkt_sched.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 
47 #include "hyperv_net.h"
48 
49 #define RING_SIZE_MIN	64
50 #define RETRY_US_LO	5000
51 #define RETRY_US_HI	10000
52 #define RETRY_MAX	2000	/* >10 sec */
53 
54 #define LINKCHANGE_INT (2 * HZ)
55 #define VF_TAKEOVER_INT (HZ / 10)
56 
57 static unsigned int ring_size __ro_after_init = 128;
58 module_param(ring_size, uint, 0444);
59 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
60 unsigned int netvsc_ring_bytes __ro_after_init;
61 
62 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
63 				NETIF_MSG_LINK | NETIF_MSG_IFUP |
64 				NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
65 				NETIF_MSG_TX_ERR;
66 
67 static int debug = -1;
68 module_param(debug, int, 0444);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70 
71 static LIST_HEAD(netvsc_dev_list);
72 
73 static void netvsc_change_rx_flags(struct net_device *net, int change)
74 {
75 	struct net_device_context *ndev_ctx = netdev_priv(net);
76 	struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
77 	int inc;
78 
79 	if (!vf_netdev)
80 		return;
81 
82 	if (change & IFF_PROMISC) {
83 		inc = (net->flags & IFF_PROMISC) ? 1 : -1;
84 		dev_set_promiscuity(vf_netdev, inc);
85 	}
86 
87 	if (change & IFF_ALLMULTI) {
88 		inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
89 		dev_set_allmulti(vf_netdev, inc);
90 	}
91 }
92 
93 static void netvsc_set_rx_mode(struct net_device *net)
94 {
95 	struct net_device_context *ndev_ctx = netdev_priv(net);
96 	struct net_device *vf_netdev;
97 	struct netvsc_device *nvdev;
98 
99 	rcu_read_lock();
100 	vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
101 	if (vf_netdev) {
102 		dev_uc_sync(vf_netdev, net);
103 		dev_mc_sync(vf_netdev, net);
104 	}
105 
106 	nvdev = rcu_dereference(ndev_ctx->nvdev);
107 	if (nvdev)
108 		rndis_filter_update(nvdev);
109 	rcu_read_unlock();
110 }
111 
112 static int netvsc_open(struct net_device *net)
113 {
114 	struct net_device_context *ndev_ctx = netdev_priv(net);
115 	struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
116 	struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
117 	struct rndis_device *rdev;
118 	int ret = 0;
119 
120 	netif_carrier_off(net);
121 
122 	/* Open up the device */
123 	ret = rndis_filter_open(nvdev);
124 	if (ret != 0) {
125 		netdev_err(net, "unable to open device (ret %d).\n", ret);
126 		return ret;
127 	}
128 
129 	rdev = nvdev->extension;
130 	if (!rdev->link_state) {
131 		netif_carrier_on(net);
132 		netif_tx_wake_all_queues(net);
133 	}
134 
135 	if (vf_netdev) {
136 		/* Setting synthetic device up transparently sets
137 		 * slave as up. If open fails, then slave will be
138 		 * still be offline (and not used).
139 		 */
140 		ret = dev_open(vf_netdev, NULL);
141 		if (ret)
142 			netdev_warn(net,
143 				    "unable to open slave: %s: %d\n",
144 				    vf_netdev->name, ret);
145 	}
146 	return 0;
147 }
148 
149 static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
150 {
151 	unsigned int retry = 0;
152 	int i;
153 
154 	/* Ensure pending bytes in ring are read */
155 	for (;;) {
156 		u32 aread = 0;
157 
158 		for (i = 0; i < nvdev->num_chn; i++) {
159 			struct vmbus_channel *chn
160 				= nvdev->chan_table[i].channel;
161 
162 			if (!chn)
163 				continue;
164 
165 			/* make sure receive not running now */
166 			napi_synchronize(&nvdev->chan_table[i].napi);
167 
168 			aread = hv_get_bytes_to_read(&chn->inbound);
169 			if (aread)
170 				break;
171 
172 			aread = hv_get_bytes_to_read(&chn->outbound);
173 			if (aread)
174 				break;
175 		}
176 
177 		if (aread == 0)
178 			return 0;
179 
180 		if (++retry > RETRY_MAX)
181 			return -ETIMEDOUT;
182 
183 		usleep_range(RETRY_US_LO, RETRY_US_HI);
184 	}
185 }
186 
187 static int netvsc_close(struct net_device *net)
188 {
189 	struct net_device_context *net_device_ctx = netdev_priv(net);
190 	struct net_device *vf_netdev
191 		= rtnl_dereference(net_device_ctx->vf_netdev);
192 	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
193 	int ret;
194 
195 	netif_tx_disable(net);
196 
197 	/* No need to close rndis filter if it is removed already */
198 	if (!nvdev)
199 		return 0;
200 
201 	ret = rndis_filter_close(nvdev);
202 	if (ret != 0) {
203 		netdev_err(net, "unable to close device (ret %d).\n", ret);
204 		return ret;
205 	}
206 
207 	ret = netvsc_wait_until_empty(nvdev);
208 	if (ret)
209 		netdev_err(net, "Ring buffer not empty after closing rndis\n");
210 
211 	if (vf_netdev)
212 		dev_close(vf_netdev);
213 
214 	return ret;
215 }
216 
217 static inline void *init_ppi_data(struct rndis_message *msg,
218 				  u32 ppi_size, u32 pkt_type)
219 {
220 	struct rndis_packet *rndis_pkt = &msg->msg.pkt;
221 	struct rndis_per_packet_info *ppi;
222 
223 	rndis_pkt->data_offset += ppi_size;
224 	ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
225 		+ rndis_pkt->per_pkt_info_len;
226 
227 	ppi->size = ppi_size;
228 	ppi->type = pkt_type;
229 	ppi->internal = 0;
230 	ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
231 
232 	rndis_pkt->per_pkt_info_len += ppi_size;
233 
234 	return ppi + 1;
235 }
236 
237 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
238  * packets. We can use ethtool to change UDP hash level when necessary.
239  */
240 static inline u32 netvsc_get_hash(
241 	struct sk_buff *skb,
242 	const struct net_device_context *ndc)
243 {
244 	struct flow_keys flow;
245 	u32 hash, pkt_proto = 0;
246 	static u32 hashrnd __read_mostly;
247 
248 	net_get_random_once(&hashrnd, sizeof(hashrnd));
249 
250 	if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
251 		return 0;
252 
253 	switch (flow.basic.ip_proto) {
254 	case IPPROTO_TCP:
255 		if (flow.basic.n_proto == htons(ETH_P_IP))
256 			pkt_proto = HV_TCP4_L4HASH;
257 		else if (flow.basic.n_proto == htons(ETH_P_IPV6))
258 			pkt_proto = HV_TCP6_L4HASH;
259 
260 		break;
261 
262 	case IPPROTO_UDP:
263 		if (flow.basic.n_proto == htons(ETH_P_IP))
264 			pkt_proto = HV_UDP4_L4HASH;
265 		else if (flow.basic.n_proto == htons(ETH_P_IPV6))
266 			pkt_proto = HV_UDP6_L4HASH;
267 
268 		break;
269 	}
270 
271 	if (pkt_proto & ndc->l4_hash) {
272 		return skb_get_hash(skb);
273 	} else {
274 		if (flow.basic.n_proto == htons(ETH_P_IP))
275 			hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
276 		else if (flow.basic.n_proto == htons(ETH_P_IPV6))
277 			hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
278 		else
279 			hash = 0;
280 
281 		skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
282 	}
283 
284 	return hash;
285 }
286 
287 static inline int netvsc_get_tx_queue(struct net_device *ndev,
288 				      struct sk_buff *skb, int old_idx)
289 {
290 	const struct net_device_context *ndc = netdev_priv(ndev);
291 	struct sock *sk = skb->sk;
292 	int q_idx;
293 
294 	q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
295 			      (VRSS_SEND_TAB_SIZE - 1)];
296 
297 	/* If queue index changed record the new value */
298 	if (q_idx != old_idx &&
299 	    sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
300 		sk_tx_queue_set(sk, q_idx);
301 
302 	return q_idx;
303 }
304 
305 /*
306  * Select queue for transmit.
307  *
308  * If a valid queue has already been assigned, then use that.
309  * Otherwise compute tx queue based on hash and the send table.
310  *
311  * This is basically similar to default (__netdev_pick_tx) with the added step
312  * of using the host send_table when no other queue has been assigned.
313  *
314  * TODO support XPS - but get_xps_queue not exported
315  */
316 static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
317 {
318 	int q_idx = sk_tx_queue_get(skb->sk);
319 
320 	if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
321 		/* If forwarding a packet, we use the recorded queue when
322 		 * available for better cache locality.
323 		 */
324 		if (skb_rx_queue_recorded(skb))
325 			q_idx = skb_get_rx_queue(skb);
326 		else
327 			q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
328 	}
329 
330 	return q_idx;
331 }
332 
333 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
334 			       struct net_device *sb_dev,
335 			       select_queue_fallback_t fallback)
336 {
337 	struct net_device_context *ndc = netdev_priv(ndev);
338 	struct net_device *vf_netdev;
339 	u16 txq;
340 
341 	rcu_read_lock();
342 	vf_netdev = rcu_dereference(ndc->vf_netdev);
343 	if (vf_netdev) {
344 		const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
345 
346 		if (vf_ops->ndo_select_queue)
347 			txq = vf_ops->ndo_select_queue(vf_netdev, skb,
348 						       sb_dev, fallback);
349 		else
350 			txq = fallback(vf_netdev, skb, NULL);
351 
352 		/* Record the queue selected by VF so that it can be
353 		 * used for common case where VF has more queues than
354 		 * the synthetic device.
355 		 */
356 		qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
357 	} else {
358 		txq = netvsc_pick_tx(ndev, skb);
359 	}
360 	rcu_read_unlock();
361 
362 	while (unlikely(txq >= ndev->real_num_tx_queues))
363 		txq -= ndev->real_num_tx_queues;
364 
365 	return txq;
366 }
367 
368 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
369 		       struct hv_page_buffer *pb)
370 {
371 	int j = 0;
372 
373 	/* Deal with compound pages by ignoring unused part
374 	 * of the page.
375 	 */
376 	page += (offset >> PAGE_SHIFT);
377 	offset &= ~PAGE_MASK;
378 
379 	while (len > 0) {
380 		unsigned long bytes;
381 
382 		bytes = PAGE_SIZE - offset;
383 		if (bytes > len)
384 			bytes = len;
385 		pb[j].pfn = page_to_pfn(page);
386 		pb[j].offset = offset;
387 		pb[j].len = bytes;
388 
389 		offset += bytes;
390 		len -= bytes;
391 
392 		if (offset == PAGE_SIZE && len) {
393 			page++;
394 			offset = 0;
395 			j++;
396 		}
397 	}
398 
399 	return j + 1;
400 }
401 
402 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
403 			   struct hv_netvsc_packet *packet,
404 			   struct hv_page_buffer *pb)
405 {
406 	u32 slots_used = 0;
407 	char *data = skb->data;
408 	int frags = skb_shinfo(skb)->nr_frags;
409 	int i;
410 
411 	/* The packet is laid out thus:
412 	 * 1. hdr: RNDIS header and PPI
413 	 * 2. skb linear data
414 	 * 3. skb fragment data
415 	 */
416 	slots_used += fill_pg_buf(virt_to_page(hdr),
417 				  offset_in_page(hdr),
418 				  len, &pb[slots_used]);
419 
420 	packet->rmsg_size = len;
421 	packet->rmsg_pgcnt = slots_used;
422 
423 	slots_used += fill_pg_buf(virt_to_page(data),
424 				offset_in_page(data),
425 				skb_headlen(skb), &pb[slots_used]);
426 
427 	for (i = 0; i < frags; i++) {
428 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
429 
430 		slots_used += fill_pg_buf(skb_frag_page(frag),
431 					frag->page_offset,
432 					skb_frag_size(frag), &pb[slots_used]);
433 	}
434 	return slots_used;
435 }
436 
437 static int count_skb_frag_slots(struct sk_buff *skb)
438 {
439 	int i, frags = skb_shinfo(skb)->nr_frags;
440 	int pages = 0;
441 
442 	for (i = 0; i < frags; i++) {
443 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
444 		unsigned long size = skb_frag_size(frag);
445 		unsigned long offset = frag->page_offset;
446 
447 		/* Skip unused frames from start of page */
448 		offset &= ~PAGE_MASK;
449 		pages += PFN_UP(offset + size);
450 	}
451 	return pages;
452 }
453 
454 static int netvsc_get_slots(struct sk_buff *skb)
455 {
456 	char *data = skb->data;
457 	unsigned int offset = offset_in_page(data);
458 	unsigned int len = skb_headlen(skb);
459 	int slots;
460 	int frag_slots;
461 
462 	slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
463 	frag_slots = count_skb_frag_slots(skb);
464 	return slots + frag_slots;
465 }
466 
467 static u32 net_checksum_info(struct sk_buff *skb)
468 {
469 	if (skb->protocol == htons(ETH_P_IP)) {
470 		struct iphdr *ip = ip_hdr(skb);
471 
472 		if (ip->protocol == IPPROTO_TCP)
473 			return TRANSPORT_INFO_IPV4_TCP;
474 		else if (ip->protocol == IPPROTO_UDP)
475 			return TRANSPORT_INFO_IPV4_UDP;
476 	} else {
477 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
478 
479 		if (ip6->nexthdr == IPPROTO_TCP)
480 			return TRANSPORT_INFO_IPV6_TCP;
481 		else if (ip6->nexthdr == IPPROTO_UDP)
482 			return TRANSPORT_INFO_IPV6_UDP;
483 	}
484 
485 	return TRANSPORT_INFO_NOT_IP;
486 }
487 
488 /* Send skb on the slave VF device. */
489 static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
490 			  struct sk_buff *skb)
491 {
492 	struct net_device_context *ndev_ctx = netdev_priv(net);
493 	unsigned int len = skb->len;
494 	int rc;
495 
496 	skb->dev = vf_netdev;
497 	skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
498 
499 	rc = dev_queue_xmit(skb);
500 	if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
501 		struct netvsc_vf_pcpu_stats *pcpu_stats
502 			= this_cpu_ptr(ndev_ctx->vf_stats);
503 
504 		u64_stats_update_begin(&pcpu_stats->syncp);
505 		pcpu_stats->tx_packets++;
506 		pcpu_stats->tx_bytes += len;
507 		u64_stats_update_end(&pcpu_stats->syncp);
508 	} else {
509 		this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
510 	}
511 
512 	return rc;
513 }
514 
515 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
516 {
517 	struct net_device_context *net_device_ctx = netdev_priv(net);
518 	struct hv_netvsc_packet *packet = NULL;
519 	int ret;
520 	unsigned int num_data_pgs;
521 	struct rndis_message *rndis_msg;
522 	struct net_device *vf_netdev;
523 	u32 rndis_msg_size;
524 	u32 hash;
525 	struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
526 
527 	/* if VF is present and up then redirect packets
528 	 * already called with rcu_read_lock_bh
529 	 */
530 	vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
531 	if (vf_netdev && netif_running(vf_netdev) &&
532 	    !netpoll_tx_running(net))
533 		return netvsc_vf_xmit(net, vf_netdev, skb);
534 
535 	/* We will atmost need two pages to describe the rndis
536 	 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
537 	 * of pages in a single packet. If skb is scattered around
538 	 * more pages we try linearizing it.
539 	 */
540 
541 	num_data_pgs = netvsc_get_slots(skb) + 2;
542 
543 	if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
544 		++net_device_ctx->eth_stats.tx_scattered;
545 
546 		if (skb_linearize(skb))
547 			goto no_memory;
548 
549 		num_data_pgs = netvsc_get_slots(skb) + 2;
550 		if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
551 			++net_device_ctx->eth_stats.tx_too_big;
552 			goto drop;
553 		}
554 	}
555 
556 	/*
557 	 * Place the rndis header in the skb head room and
558 	 * the skb->cb will be used for hv_netvsc_packet
559 	 * structure.
560 	 */
561 	ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
562 	if (ret)
563 		goto no_memory;
564 
565 	/* Use the skb control buffer for building up the packet */
566 	BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
567 			FIELD_SIZEOF(struct sk_buff, cb));
568 	packet = (struct hv_netvsc_packet *)skb->cb;
569 
570 	packet->q_idx = skb_get_queue_mapping(skb);
571 
572 	packet->total_data_buflen = skb->len;
573 	packet->total_bytes = skb->len;
574 	packet->total_packets = 1;
575 
576 	rndis_msg = (struct rndis_message *)skb->head;
577 
578 	/* Add the rndis header */
579 	rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
580 	rndis_msg->msg_len = packet->total_data_buflen;
581 
582 	rndis_msg->msg.pkt = (struct rndis_packet) {
583 		.data_offset = sizeof(struct rndis_packet),
584 		.data_len = packet->total_data_buflen,
585 		.per_pkt_info_offset = sizeof(struct rndis_packet),
586 	};
587 
588 	rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
589 
590 	hash = skb_get_hash_raw(skb);
591 	if (hash != 0 && net->real_num_tx_queues > 1) {
592 		u32 *hash_info;
593 
594 		rndis_msg_size += NDIS_HASH_PPI_SIZE;
595 		hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
596 					  NBL_HASH_VALUE);
597 		*hash_info = hash;
598 	}
599 
600 	if (skb_vlan_tag_present(skb)) {
601 		struct ndis_pkt_8021q_info *vlan;
602 
603 		rndis_msg_size += NDIS_VLAN_PPI_SIZE;
604 		vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
605 				     IEEE_8021Q_INFO);
606 
607 		vlan->value = 0;
608 		vlan->vlanid = skb_vlan_tag_get_id(skb);
609 		vlan->cfi = skb_vlan_tag_get_cfi(skb);
610 		vlan->pri = skb_vlan_tag_get_prio(skb);
611 	}
612 
613 	if (skb_is_gso(skb)) {
614 		struct ndis_tcp_lso_info *lso_info;
615 
616 		rndis_msg_size += NDIS_LSO_PPI_SIZE;
617 		lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
618 					 TCP_LARGESEND_PKTINFO);
619 
620 		lso_info->value = 0;
621 		lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
622 		if (skb->protocol == htons(ETH_P_IP)) {
623 			lso_info->lso_v2_transmit.ip_version =
624 				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
625 			ip_hdr(skb)->tot_len = 0;
626 			ip_hdr(skb)->check = 0;
627 			tcp_hdr(skb)->check =
628 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
629 						   ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
630 		} else {
631 			lso_info->lso_v2_transmit.ip_version =
632 				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
633 			ipv6_hdr(skb)->payload_len = 0;
634 			tcp_hdr(skb)->check =
635 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
636 						 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
637 		}
638 		lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
639 		lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
640 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
641 		if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
642 			struct ndis_tcp_ip_checksum_info *csum_info;
643 
644 			rndis_msg_size += NDIS_CSUM_PPI_SIZE;
645 			csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
646 						  TCPIP_CHKSUM_PKTINFO);
647 
648 			csum_info->value = 0;
649 			csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
650 
651 			if (skb->protocol == htons(ETH_P_IP)) {
652 				csum_info->transmit.is_ipv4 = 1;
653 
654 				if (ip_hdr(skb)->protocol == IPPROTO_TCP)
655 					csum_info->transmit.tcp_checksum = 1;
656 				else
657 					csum_info->transmit.udp_checksum = 1;
658 			} else {
659 				csum_info->transmit.is_ipv6 = 1;
660 
661 				if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
662 					csum_info->transmit.tcp_checksum = 1;
663 				else
664 					csum_info->transmit.udp_checksum = 1;
665 			}
666 		} else {
667 			/* Can't do offload of this type of checksum */
668 			if (skb_checksum_help(skb))
669 				goto drop;
670 		}
671 	}
672 
673 	/* Start filling in the page buffers with the rndis hdr */
674 	rndis_msg->msg_len += rndis_msg_size;
675 	packet->total_data_buflen = rndis_msg->msg_len;
676 	packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
677 					       skb, packet, pb);
678 
679 	/* timestamp packet in software */
680 	skb_tx_timestamp(skb);
681 
682 	ret = netvsc_send(net, packet, rndis_msg, pb, skb);
683 	if (likely(ret == 0))
684 		return NETDEV_TX_OK;
685 
686 	if (ret == -EAGAIN) {
687 		++net_device_ctx->eth_stats.tx_busy;
688 		return NETDEV_TX_BUSY;
689 	}
690 
691 	if (ret == -ENOSPC)
692 		++net_device_ctx->eth_stats.tx_no_space;
693 
694 drop:
695 	dev_kfree_skb_any(skb);
696 	net->stats.tx_dropped++;
697 
698 	return NETDEV_TX_OK;
699 
700 no_memory:
701 	++net_device_ctx->eth_stats.tx_no_memory;
702 	goto drop;
703 }
704 
705 /*
706  * netvsc_linkstatus_callback - Link up/down notification
707  */
708 void netvsc_linkstatus_callback(struct net_device *net,
709 				struct rndis_message *resp)
710 {
711 	struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
712 	struct net_device_context *ndev_ctx = netdev_priv(net);
713 	struct netvsc_reconfig *event;
714 	unsigned long flags;
715 
716 	/* Update the physical link speed when changing to another vSwitch */
717 	if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
718 		u32 speed;
719 
720 		speed = *(u32 *)((void *)indicate
721 				 + indicate->status_buf_offset) / 10000;
722 		ndev_ctx->speed = speed;
723 		return;
724 	}
725 
726 	/* Handle these link change statuses below */
727 	if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
728 	    indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
729 	    indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
730 		return;
731 
732 	if (net->reg_state != NETREG_REGISTERED)
733 		return;
734 
735 	event = kzalloc(sizeof(*event), GFP_ATOMIC);
736 	if (!event)
737 		return;
738 	event->event = indicate->status;
739 
740 	spin_lock_irqsave(&ndev_ctx->lock, flags);
741 	list_add_tail(&event->list, &ndev_ctx->reconfig_events);
742 	spin_unlock_irqrestore(&ndev_ctx->lock, flags);
743 
744 	schedule_delayed_work(&ndev_ctx->dwork, 0);
745 }
746 
747 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
748 					     struct netvsc_channel *nvchan)
749 {
750 	struct napi_struct *napi = &nvchan->napi;
751 	const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
752 	const struct ndis_tcp_ip_checksum_info *csum_info =
753 						nvchan->rsc.csum_info;
754 	struct sk_buff *skb;
755 	int i;
756 
757 	skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
758 	if (!skb)
759 		return skb;
760 
761 	/*
762 	 * Copy to skb. This copy is needed here since the memory pointed by
763 	 * hv_netvsc_packet cannot be deallocated
764 	 */
765 	for (i = 0; i < nvchan->rsc.cnt; i++)
766 		skb_put_data(skb, nvchan->rsc.data[i], nvchan->rsc.len[i]);
767 
768 	skb->protocol = eth_type_trans(skb, net);
769 
770 	/* skb is already created with CHECKSUM_NONE */
771 	skb_checksum_none_assert(skb);
772 
773 	/*
774 	 * In Linux, the IP checksum is always checked.
775 	 * Do L4 checksum offload if enabled and present.
776 	 */
777 	if (csum_info && (net->features & NETIF_F_RXCSUM)) {
778 		if (csum_info->receive.tcp_checksum_succeeded ||
779 		    csum_info->receive.udp_checksum_succeeded)
780 			skb->ip_summed = CHECKSUM_UNNECESSARY;
781 	}
782 
783 	if (vlan) {
784 		u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
785 			(vlan->cfi ? VLAN_CFI_MASK : 0);
786 
787 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
788 				       vlan_tci);
789 	}
790 
791 	return skb;
792 }
793 
794 /*
795  * netvsc_recv_callback -  Callback when we receive a packet from the
796  * "wire" on the specified device.
797  */
798 int netvsc_recv_callback(struct net_device *net,
799 			 struct netvsc_device *net_device,
800 			 struct netvsc_channel *nvchan)
801 {
802 	struct net_device_context *net_device_ctx = netdev_priv(net);
803 	struct vmbus_channel *channel = nvchan->channel;
804 	u16 q_idx = channel->offermsg.offer.sub_channel_index;
805 	struct sk_buff *skb;
806 	struct netvsc_stats *rx_stats;
807 
808 	if (net->reg_state != NETREG_REGISTERED)
809 		return NVSP_STAT_FAIL;
810 
811 	/* Allocate a skb - TODO direct I/O to pages? */
812 	skb = netvsc_alloc_recv_skb(net, nvchan);
813 
814 	if (unlikely(!skb)) {
815 		++net_device_ctx->eth_stats.rx_no_memory;
816 		rcu_read_unlock();
817 		return NVSP_STAT_FAIL;
818 	}
819 
820 	skb_record_rx_queue(skb, q_idx);
821 
822 	/*
823 	 * Even if injecting the packet, record the statistics
824 	 * on the synthetic device because modifying the VF device
825 	 * statistics will not work correctly.
826 	 */
827 	rx_stats = &nvchan->rx_stats;
828 	u64_stats_update_begin(&rx_stats->syncp);
829 	rx_stats->packets++;
830 	rx_stats->bytes += nvchan->rsc.pktlen;
831 
832 	if (skb->pkt_type == PACKET_BROADCAST)
833 		++rx_stats->broadcast;
834 	else if (skb->pkt_type == PACKET_MULTICAST)
835 		++rx_stats->multicast;
836 	u64_stats_update_end(&rx_stats->syncp);
837 
838 	napi_gro_receive(&nvchan->napi, skb);
839 	return NVSP_STAT_SUCCESS;
840 }
841 
842 static void netvsc_get_drvinfo(struct net_device *net,
843 			       struct ethtool_drvinfo *info)
844 {
845 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
846 	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
847 }
848 
849 static void netvsc_get_channels(struct net_device *net,
850 				struct ethtool_channels *channel)
851 {
852 	struct net_device_context *net_device_ctx = netdev_priv(net);
853 	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
854 
855 	if (nvdev) {
856 		channel->max_combined	= nvdev->max_chn;
857 		channel->combined_count = nvdev->num_chn;
858 	}
859 }
860 
861 /* Alloc struct netvsc_device_info, and initialize it from either existing
862  * struct netvsc_device, or from default values.
863  */
864 static struct netvsc_device_info *netvsc_devinfo_get
865 			(struct netvsc_device *nvdev)
866 {
867 	struct netvsc_device_info *dev_info;
868 
869 	dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
870 
871 	if (!dev_info)
872 		return NULL;
873 
874 	if (nvdev) {
875 		dev_info->num_chn = nvdev->num_chn;
876 		dev_info->send_sections = nvdev->send_section_cnt;
877 		dev_info->send_section_size = nvdev->send_section_size;
878 		dev_info->recv_sections = nvdev->recv_section_cnt;
879 		dev_info->recv_section_size = nvdev->recv_section_size;
880 
881 		memcpy(dev_info->rss_key, nvdev->extension->rss_key,
882 		       NETVSC_HASH_KEYLEN);
883 	} else {
884 		dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
885 		dev_info->send_sections = NETVSC_DEFAULT_TX;
886 		dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
887 		dev_info->recv_sections = NETVSC_DEFAULT_RX;
888 		dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
889 	}
890 
891 	return dev_info;
892 }
893 
894 static int netvsc_detach(struct net_device *ndev,
895 			 struct netvsc_device *nvdev)
896 {
897 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
898 	struct hv_device *hdev = ndev_ctx->device_ctx;
899 	int ret;
900 
901 	/* Don't try continuing to try and setup sub channels */
902 	if (cancel_work_sync(&nvdev->subchan_work))
903 		nvdev->num_chn = 1;
904 
905 	/* If device was up (receiving) then shutdown */
906 	if (netif_running(ndev)) {
907 		netif_tx_disable(ndev);
908 
909 		ret = rndis_filter_close(nvdev);
910 		if (ret) {
911 			netdev_err(ndev,
912 				   "unable to close device (ret %d).\n", ret);
913 			return ret;
914 		}
915 
916 		ret = netvsc_wait_until_empty(nvdev);
917 		if (ret) {
918 			netdev_err(ndev,
919 				   "Ring buffer not empty after closing rndis\n");
920 			return ret;
921 		}
922 	}
923 
924 	netif_device_detach(ndev);
925 
926 	rndis_filter_device_remove(hdev, nvdev);
927 
928 	return 0;
929 }
930 
931 static int netvsc_attach(struct net_device *ndev,
932 			 struct netvsc_device_info *dev_info)
933 {
934 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
935 	struct hv_device *hdev = ndev_ctx->device_ctx;
936 	struct netvsc_device *nvdev;
937 	struct rndis_device *rdev;
938 	int ret;
939 
940 	nvdev = rndis_filter_device_add(hdev, dev_info);
941 	if (IS_ERR(nvdev))
942 		return PTR_ERR(nvdev);
943 
944 	if (nvdev->num_chn > 1) {
945 		ret = rndis_set_subchannel(ndev, nvdev, dev_info);
946 
947 		/* if unavailable, just proceed with one queue */
948 		if (ret) {
949 			nvdev->max_chn = 1;
950 			nvdev->num_chn = 1;
951 		}
952 	}
953 
954 	/* In any case device is now ready */
955 	netif_device_attach(ndev);
956 
957 	/* Note: enable and attach happen when sub-channels setup */
958 	netif_carrier_off(ndev);
959 
960 	if (netif_running(ndev)) {
961 		ret = rndis_filter_open(nvdev);
962 		if (ret)
963 			return ret;
964 
965 		rdev = nvdev->extension;
966 		if (!rdev->link_state)
967 			netif_carrier_on(ndev);
968 	}
969 
970 	return 0;
971 }
972 
973 static int netvsc_set_channels(struct net_device *net,
974 			       struct ethtool_channels *channels)
975 {
976 	struct net_device_context *net_device_ctx = netdev_priv(net);
977 	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
978 	unsigned int orig, count = channels->combined_count;
979 	struct netvsc_device_info *device_info;
980 	int ret;
981 
982 	/* We do not support separate count for rx, tx, or other */
983 	if (count == 0 ||
984 	    channels->rx_count || channels->tx_count || channels->other_count)
985 		return -EINVAL;
986 
987 	if (!nvdev || nvdev->destroy)
988 		return -ENODEV;
989 
990 	if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
991 		return -EINVAL;
992 
993 	if (count > nvdev->max_chn)
994 		return -EINVAL;
995 
996 	orig = nvdev->num_chn;
997 
998 	device_info = netvsc_devinfo_get(nvdev);
999 
1000 	if (!device_info)
1001 		return -ENOMEM;
1002 
1003 	device_info->num_chn = count;
1004 
1005 	ret = netvsc_detach(net, nvdev);
1006 	if (ret)
1007 		goto out;
1008 
1009 	ret = netvsc_attach(net, device_info);
1010 	if (ret) {
1011 		device_info->num_chn = orig;
1012 		if (netvsc_attach(net, device_info))
1013 			netdev_err(net, "restoring channel setting failed\n");
1014 	}
1015 
1016 out:
1017 	kfree(device_info);
1018 	return ret;
1019 }
1020 
1021 static bool
1022 netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
1023 {
1024 	struct ethtool_link_ksettings diff1 = *cmd;
1025 	struct ethtool_link_ksettings diff2 = {};
1026 
1027 	diff1.base.speed = 0;
1028 	diff1.base.duplex = 0;
1029 	/* advertising and cmd are usually set */
1030 	ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
1031 	diff1.base.cmd = 0;
1032 	/* We set port to PORT_OTHER */
1033 	diff2.base.port = PORT_OTHER;
1034 
1035 	return !memcmp(&diff1, &diff2, sizeof(diff1));
1036 }
1037 
1038 static void netvsc_init_settings(struct net_device *dev)
1039 {
1040 	struct net_device_context *ndc = netdev_priv(dev);
1041 
1042 	ndc->l4_hash = HV_DEFAULT_L4HASH;
1043 
1044 	ndc->speed = SPEED_UNKNOWN;
1045 	ndc->duplex = DUPLEX_FULL;
1046 
1047 	dev->features = NETIF_F_LRO;
1048 }
1049 
1050 static int netvsc_get_link_ksettings(struct net_device *dev,
1051 				     struct ethtool_link_ksettings *cmd)
1052 {
1053 	struct net_device_context *ndc = netdev_priv(dev);
1054 
1055 	cmd->base.speed = ndc->speed;
1056 	cmd->base.duplex = ndc->duplex;
1057 	cmd->base.port = PORT_OTHER;
1058 
1059 	return 0;
1060 }
1061 
1062 static int netvsc_set_link_ksettings(struct net_device *dev,
1063 				     const struct ethtool_link_ksettings *cmd)
1064 {
1065 	struct net_device_context *ndc = netdev_priv(dev);
1066 	u32 speed;
1067 
1068 	speed = cmd->base.speed;
1069 	if (!ethtool_validate_speed(speed) ||
1070 	    !ethtool_validate_duplex(cmd->base.duplex) ||
1071 	    !netvsc_validate_ethtool_ss_cmd(cmd))
1072 		return -EINVAL;
1073 
1074 	ndc->speed = speed;
1075 	ndc->duplex = cmd->base.duplex;
1076 
1077 	return 0;
1078 }
1079 
1080 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1081 {
1082 	struct net_device_context *ndevctx = netdev_priv(ndev);
1083 	struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1084 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1085 	int orig_mtu = ndev->mtu;
1086 	struct netvsc_device_info *device_info;
1087 	int ret = 0;
1088 
1089 	if (!nvdev || nvdev->destroy)
1090 		return -ENODEV;
1091 
1092 	device_info = netvsc_devinfo_get(nvdev);
1093 
1094 	if (!device_info)
1095 		return -ENOMEM;
1096 
1097 	/* Change MTU of underlying VF netdev first. */
1098 	if (vf_netdev) {
1099 		ret = dev_set_mtu(vf_netdev, mtu);
1100 		if (ret)
1101 			goto out;
1102 	}
1103 
1104 	ret = netvsc_detach(ndev, nvdev);
1105 	if (ret)
1106 		goto rollback_vf;
1107 
1108 	ndev->mtu = mtu;
1109 
1110 	ret = netvsc_attach(ndev, device_info);
1111 	if (!ret)
1112 		goto out;
1113 
1114 	/* Attempt rollback to original MTU */
1115 	ndev->mtu = orig_mtu;
1116 
1117 	if (netvsc_attach(ndev, device_info))
1118 		netdev_err(ndev, "restoring mtu failed\n");
1119 rollback_vf:
1120 	if (vf_netdev)
1121 		dev_set_mtu(vf_netdev, orig_mtu);
1122 
1123 out:
1124 	kfree(device_info);
1125 	return ret;
1126 }
1127 
1128 static void netvsc_get_vf_stats(struct net_device *net,
1129 				struct netvsc_vf_pcpu_stats *tot)
1130 {
1131 	struct net_device_context *ndev_ctx = netdev_priv(net);
1132 	int i;
1133 
1134 	memset(tot, 0, sizeof(*tot));
1135 
1136 	for_each_possible_cpu(i) {
1137 		const struct netvsc_vf_pcpu_stats *stats
1138 			= per_cpu_ptr(ndev_ctx->vf_stats, i);
1139 		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1140 		unsigned int start;
1141 
1142 		do {
1143 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1144 			rx_packets = stats->rx_packets;
1145 			tx_packets = stats->tx_packets;
1146 			rx_bytes = stats->rx_bytes;
1147 			tx_bytes = stats->tx_bytes;
1148 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1149 
1150 		tot->rx_packets += rx_packets;
1151 		tot->tx_packets += tx_packets;
1152 		tot->rx_bytes   += rx_bytes;
1153 		tot->tx_bytes   += tx_bytes;
1154 		tot->tx_dropped += stats->tx_dropped;
1155 	}
1156 }
1157 
1158 static void netvsc_get_pcpu_stats(struct net_device *net,
1159 				  struct netvsc_ethtool_pcpu_stats *pcpu_tot)
1160 {
1161 	struct net_device_context *ndev_ctx = netdev_priv(net);
1162 	struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1163 	int i;
1164 
1165 	/* fetch percpu stats of vf */
1166 	for_each_possible_cpu(i) {
1167 		const struct netvsc_vf_pcpu_stats *stats =
1168 			per_cpu_ptr(ndev_ctx->vf_stats, i);
1169 		struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
1170 		unsigned int start;
1171 
1172 		do {
1173 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1174 			this_tot->vf_rx_packets = stats->rx_packets;
1175 			this_tot->vf_tx_packets = stats->tx_packets;
1176 			this_tot->vf_rx_bytes = stats->rx_bytes;
1177 			this_tot->vf_tx_bytes = stats->tx_bytes;
1178 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1179 		this_tot->rx_packets = this_tot->vf_rx_packets;
1180 		this_tot->tx_packets = this_tot->vf_tx_packets;
1181 		this_tot->rx_bytes   = this_tot->vf_rx_bytes;
1182 		this_tot->tx_bytes   = this_tot->vf_tx_bytes;
1183 	}
1184 
1185 	/* fetch percpu stats of netvsc */
1186 	for (i = 0; i < nvdev->num_chn; i++) {
1187 		const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1188 		const struct netvsc_stats *stats;
1189 		struct netvsc_ethtool_pcpu_stats *this_tot =
1190 			&pcpu_tot[nvchan->channel->target_cpu];
1191 		u64 packets, bytes;
1192 		unsigned int start;
1193 
1194 		stats = &nvchan->tx_stats;
1195 		do {
1196 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1197 			packets = stats->packets;
1198 			bytes = stats->bytes;
1199 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1200 
1201 		this_tot->tx_bytes	+= bytes;
1202 		this_tot->tx_packets	+= packets;
1203 
1204 		stats = &nvchan->rx_stats;
1205 		do {
1206 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1207 			packets = stats->packets;
1208 			bytes = stats->bytes;
1209 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1210 
1211 		this_tot->rx_bytes	+= bytes;
1212 		this_tot->rx_packets	+= packets;
1213 	}
1214 }
1215 
1216 static void netvsc_get_stats64(struct net_device *net,
1217 			       struct rtnl_link_stats64 *t)
1218 {
1219 	struct net_device_context *ndev_ctx = netdev_priv(net);
1220 	struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1221 	struct netvsc_vf_pcpu_stats vf_tot;
1222 	int i;
1223 
1224 	if (!nvdev)
1225 		return;
1226 
1227 	netdev_stats_to_stats64(t, &net->stats);
1228 
1229 	netvsc_get_vf_stats(net, &vf_tot);
1230 	t->rx_packets += vf_tot.rx_packets;
1231 	t->tx_packets += vf_tot.tx_packets;
1232 	t->rx_bytes   += vf_tot.rx_bytes;
1233 	t->tx_bytes   += vf_tot.tx_bytes;
1234 	t->tx_dropped += vf_tot.tx_dropped;
1235 
1236 	for (i = 0; i < nvdev->num_chn; i++) {
1237 		const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1238 		const struct netvsc_stats *stats;
1239 		u64 packets, bytes, multicast;
1240 		unsigned int start;
1241 
1242 		stats = &nvchan->tx_stats;
1243 		do {
1244 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1245 			packets = stats->packets;
1246 			bytes = stats->bytes;
1247 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1248 
1249 		t->tx_bytes	+= bytes;
1250 		t->tx_packets	+= packets;
1251 
1252 		stats = &nvchan->rx_stats;
1253 		do {
1254 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1255 			packets = stats->packets;
1256 			bytes = stats->bytes;
1257 			multicast = stats->multicast + stats->broadcast;
1258 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1259 
1260 		t->rx_bytes	+= bytes;
1261 		t->rx_packets	+= packets;
1262 		t->multicast	+= multicast;
1263 	}
1264 }
1265 
1266 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1267 {
1268 	struct net_device_context *ndc = netdev_priv(ndev);
1269 	struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
1270 	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1271 	struct sockaddr *addr = p;
1272 	int err;
1273 
1274 	err = eth_prepare_mac_addr_change(ndev, p);
1275 	if (err)
1276 		return err;
1277 
1278 	if (!nvdev)
1279 		return -ENODEV;
1280 
1281 	if (vf_netdev) {
1282 		err = dev_set_mac_address(vf_netdev, addr, NULL);
1283 		if (err)
1284 			return err;
1285 	}
1286 
1287 	err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
1288 	if (!err) {
1289 		eth_commit_mac_addr_change(ndev, p);
1290 	} else if (vf_netdev) {
1291 		/* rollback change on VF */
1292 		memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1293 		dev_set_mac_address(vf_netdev, addr, NULL);
1294 	}
1295 
1296 	return err;
1297 }
1298 
1299 static const struct {
1300 	char name[ETH_GSTRING_LEN];
1301 	u16 offset;
1302 } netvsc_stats[] = {
1303 	{ "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
1304 	{ "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
1305 	{ "tx_no_space",  offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1306 	{ "tx_too_big",	  offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1307 	{ "tx_busy",	  offsetof(struct netvsc_ethtool_stats, tx_busy) },
1308 	{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1309 	{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
1310 	{ "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
1311 	{ "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
1312 	{ "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
1313 }, pcpu_stats[] = {
1314 	{ "cpu%u_rx_packets",
1315 		offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
1316 	{ "cpu%u_rx_bytes",
1317 		offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
1318 	{ "cpu%u_tx_packets",
1319 		offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
1320 	{ "cpu%u_tx_bytes",
1321 		offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
1322 	{ "cpu%u_vf_rx_packets",
1323 		offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
1324 	{ "cpu%u_vf_rx_bytes",
1325 		offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
1326 	{ "cpu%u_vf_tx_packets",
1327 		offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
1328 	{ "cpu%u_vf_tx_bytes",
1329 		offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
1330 }, vf_stats[] = {
1331 	{ "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1332 	{ "vf_rx_bytes",   offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1333 	{ "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1334 	{ "vf_tx_bytes",   offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1335 	{ "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
1336 };
1337 
1338 #define NETVSC_GLOBAL_STATS_LEN	ARRAY_SIZE(netvsc_stats)
1339 #define NETVSC_VF_STATS_LEN	ARRAY_SIZE(vf_stats)
1340 
1341 /* statistics per queue (rx/tx packets/bytes) */
1342 #define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
1343 
1344 /* 4 statistics per queue (rx/tx packets/bytes) */
1345 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1346 
1347 static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1348 {
1349 	struct net_device_context *ndc = netdev_priv(dev);
1350 	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1351 
1352 	if (!nvdev)
1353 		return -ENODEV;
1354 
1355 	switch (string_set) {
1356 	case ETH_SS_STATS:
1357 		return NETVSC_GLOBAL_STATS_LEN
1358 			+ NETVSC_VF_STATS_LEN
1359 			+ NETVSC_QUEUE_STATS_LEN(nvdev)
1360 			+ NETVSC_PCPU_STATS_LEN;
1361 	default:
1362 		return -EINVAL;
1363 	}
1364 }
1365 
1366 static void netvsc_get_ethtool_stats(struct net_device *dev,
1367 				     struct ethtool_stats *stats, u64 *data)
1368 {
1369 	struct net_device_context *ndc = netdev_priv(dev);
1370 	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1371 	const void *nds = &ndc->eth_stats;
1372 	const struct netvsc_stats *qstats;
1373 	struct netvsc_vf_pcpu_stats sum;
1374 	struct netvsc_ethtool_pcpu_stats *pcpu_sum;
1375 	unsigned int start;
1376 	u64 packets, bytes;
1377 	int i, j, cpu;
1378 
1379 	if (!nvdev)
1380 		return;
1381 
1382 	for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
1383 		data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1384 
1385 	netvsc_get_vf_stats(dev, &sum);
1386 	for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1387 		data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1388 
1389 	for (j = 0; j < nvdev->num_chn; j++) {
1390 		qstats = &nvdev->chan_table[j].tx_stats;
1391 
1392 		do {
1393 			start = u64_stats_fetch_begin_irq(&qstats->syncp);
1394 			packets = qstats->packets;
1395 			bytes = qstats->bytes;
1396 		} while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1397 		data[i++] = packets;
1398 		data[i++] = bytes;
1399 
1400 		qstats = &nvdev->chan_table[j].rx_stats;
1401 		do {
1402 			start = u64_stats_fetch_begin_irq(&qstats->syncp);
1403 			packets = qstats->packets;
1404 			bytes = qstats->bytes;
1405 		} while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1406 		data[i++] = packets;
1407 		data[i++] = bytes;
1408 	}
1409 
1410 	pcpu_sum = kvmalloc_array(num_possible_cpus(),
1411 				  sizeof(struct netvsc_ethtool_pcpu_stats),
1412 				  GFP_KERNEL);
1413 	netvsc_get_pcpu_stats(dev, pcpu_sum);
1414 	for_each_present_cpu(cpu) {
1415 		struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
1416 
1417 		for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
1418 			data[i++] = *(u64 *)((void *)this_sum
1419 					     + pcpu_stats[j].offset);
1420 	}
1421 	kvfree(pcpu_sum);
1422 }
1423 
1424 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1425 {
1426 	struct net_device_context *ndc = netdev_priv(dev);
1427 	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1428 	u8 *p = data;
1429 	int i, cpu;
1430 
1431 	if (!nvdev)
1432 		return;
1433 
1434 	switch (stringset) {
1435 	case ETH_SS_STATS:
1436 		for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1437 			memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1438 			p += ETH_GSTRING_LEN;
1439 		}
1440 
1441 		for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1442 			memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1443 			p += ETH_GSTRING_LEN;
1444 		}
1445 
1446 		for (i = 0; i < nvdev->num_chn; i++) {
1447 			sprintf(p, "tx_queue_%u_packets", i);
1448 			p += ETH_GSTRING_LEN;
1449 			sprintf(p, "tx_queue_%u_bytes", i);
1450 			p += ETH_GSTRING_LEN;
1451 			sprintf(p, "rx_queue_%u_packets", i);
1452 			p += ETH_GSTRING_LEN;
1453 			sprintf(p, "rx_queue_%u_bytes", i);
1454 			p += ETH_GSTRING_LEN;
1455 		}
1456 
1457 		for_each_present_cpu(cpu) {
1458 			for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
1459 				sprintf(p, pcpu_stats[i].name, cpu);
1460 				p += ETH_GSTRING_LEN;
1461 			}
1462 		}
1463 
1464 		break;
1465 	}
1466 }
1467 
1468 static int
1469 netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1470 			 struct ethtool_rxnfc *info)
1471 {
1472 	const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
1473 
1474 	info->data = RXH_IP_SRC | RXH_IP_DST;
1475 
1476 	switch (info->flow_type) {
1477 	case TCP_V4_FLOW:
1478 		if (ndc->l4_hash & HV_TCP4_L4HASH)
1479 			info->data |= l4_flag;
1480 
1481 		break;
1482 
1483 	case TCP_V6_FLOW:
1484 		if (ndc->l4_hash & HV_TCP6_L4HASH)
1485 			info->data |= l4_flag;
1486 
1487 		break;
1488 
1489 	case UDP_V4_FLOW:
1490 		if (ndc->l4_hash & HV_UDP4_L4HASH)
1491 			info->data |= l4_flag;
1492 
1493 		break;
1494 
1495 	case UDP_V6_FLOW:
1496 		if (ndc->l4_hash & HV_UDP6_L4HASH)
1497 			info->data |= l4_flag;
1498 
1499 		break;
1500 
1501 	case IPV4_FLOW:
1502 	case IPV6_FLOW:
1503 		break;
1504 	default:
1505 		info->data = 0;
1506 		break;
1507 	}
1508 
1509 	return 0;
1510 }
1511 
1512 static int
1513 netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1514 		 u32 *rules)
1515 {
1516 	struct net_device_context *ndc = netdev_priv(dev);
1517 	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1518 
1519 	if (!nvdev)
1520 		return -ENODEV;
1521 
1522 	switch (info->cmd) {
1523 	case ETHTOOL_GRXRINGS:
1524 		info->data = nvdev->num_chn;
1525 		return 0;
1526 
1527 	case ETHTOOL_GRXFH:
1528 		return netvsc_get_rss_hash_opts(ndc, info);
1529 	}
1530 	return -EOPNOTSUPP;
1531 }
1532 
1533 static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1534 				    struct ethtool_rxnfc *info)
1535 {
1536 	if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1537 			   RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1538 		switch (info->flow_type) {
1539 		case TCP_V4_FLOW:
1540 			ndc->l4_hash |= HV_TCP4_L4HASH;
1541 			break;
1542 
1543 		case TCP_V6_FLOW:
1544 			ndc->l4_hash |= HV_TCP6_L4HASH;
1545 			break;
1546 
1547 		case UDP_V4_FLOW:
1548 			ndc->l4_hash |= HV_UDP4_L4HASH;
1549 			break;
1550 
1551 		case UDP_V6_FLOW:
1552 			ndc->l4_hash |= HV_UDP6_L4HASH;
1553 			break;
1554 
1555 		default:
1556 			return -EOPNOTSUPP;
1557 		}
1558 
1559 		return 0;
1560 	}
1561 
1562 	if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1563 		switch (info->flow_type) {
1564 		case TCP_V4_FLOW:
1565 			ndc->l4_hash &= ~HV_TCP4_L4HASH;
1566 			break;
1567 
1568 		case TCP_V6_FLOW:
1569 			ndc->l4_hash &= ~HV_TCP6_L4HASH;
1570 			break;
1571 
1572 		case UDP_V4_FLOW:
1573 			ndc->l4_hash &= ~HV_UDP4_L4HASH;
1574 			break;
1575 
1576 		case UDP_V6_FLOW:
1577 			ndc->l4_hash &= ~HV_UDP6_L4HASH;
1578 			break;
1579 
1580 		default:
1581 			return -EOPNOTSUPP;
1582 		}
1583 
1584 		return 0;
1585 	}
1586 
1587 	return -EOPNOTSUPP;
1588 }
1589 
1590 static int
1591 netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1592 {
1593 	struct net_device_context *ndc = netdev_priv(ndev);
1594 
1595 	if (info->cmd == ETHTOOL_SRXFH)
1596 		return netvsc_set_rss_hash_opts(ndc, info);
1597 
1598 	return -EOPNOTSUPP;
1599 }
1600 
1601 static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1602 {
1603 	return NETVSC_HASH_KEYLEN;
1604 }
1605 
1606 static u32 netvsc_rss_indir_size(struct net_device *dev)
1607 {
1608 	return ITAB_NUM;
1609 }
1610 
1611 static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1612 			   u8 *hfunc)
1613 {
1614 	struct net_device_context *ndc = netdev_priv(dev);
1615 	struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1616 	struct rndis_device *rndis_dev;
1617 	int i;
1618 
1619 	if (!ndev)
1620 		return -ENODEV;
1621 
1622 	if (hfunc)
1623 		*hfunc = ETH_RSS_HASH_TOP;	/* Toeplitz */
1624 
1625 	rndis_dev = ndev->extension;
1626 	if (indir) {
1627 		for (i = 0; i < ITAB_NUM; i++)
1628 			indir[i] = rndis_dev->rx_table[i];
1629 	}
1630 
1631 	if (key)
1632 		memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1633 
1634 	return 0;
1635 }
1636 
1637 static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1638 			   const u8 *key, const u8 hfunc)
1639 {
1640 	struct net_device_context *ndc = netdev_priv(dev);
1641 	struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1642 	struct rndis_device *rndis_dev;
1643 	int i;
1644 
1645 	if (!ndev)
1646 		return -ENODEV;
1647 
1648 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1649 		return -EOPNOTSUPP;
1650 
1651 	rndis_dev = ndev->extension;
1652 	if (indir) {
1653 		for (i = 0; i < ITAB_NUM; i++)
1654 			if (indir[i] >= ndev->num_chn)
1655 				return -EINVAL;
1656 
1657 		for (i = 0; i < ITAB_NUM; i++)
1658 			rndis_dev->rx_table[i] = indir[i];
1659 	}
1660 
1661 	if (!key) {
1662 		if (!indir)
1663 			return 0;
1664 
1665 		key = rndis_dev->rss_key;
1666 	}
1667 
1668 	return rndis_filter_set_rss_param(rndis_dev, key);
1669 }
1670 
1671 /* Hyper-V RNDIS protocol does not have ring in the HW sense.
1672  * It does have pre-allocated receive area which is divided into sections.
1673  */
1674 static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1675 				   struct ethtool_ringparam *ring)
1676 {
1677 	u32 max_buf_size;
1678 
1679 	ring->rx_pending = nvdev->recv_section_cnt;
1680 	ring->tx_pending = nvdev->send_section_cnt;
1681 
1682 	if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1683 		max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1684 	else
1685 		max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1686 
1687 	ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1688 	ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1689 		/ nvdev->send_section_size;
1690 }
1691 
1692 static void netvsc_get_ringparam(struct net_device *ndev,
1693 				 struct ethtool_ringparam *ring)
1694 {
1695 	struct net_device_context *ndevctx = netdev_priv(ndev);
1696 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1697 
1698 	if (!nvdev)
1699 		return;
1700 
1701 	__netvsc_get_ringparam(nvdev, ring);
1702 }
1703 
1704 static int netvsc_set_ringparam(struct net_device *ndev,
1705 				struct ethtool_ringparam *ring)
1706 {
1707 	struct net_device_context *ndevctx = netdev_priv(ndev);
1708 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1709 	struct netvsc_device_info *device_info;
1710 	struct ethtool_ringparam orig;
1711 	u32 new_tx, new_rx;
1712 	int ret = 0;
1713 
1714 	if (!nvdev || nvdev->destroy)
1715 		return -ENODEV;
1716 
1717 	memset(&orig, 0, sizeof(orig));
1718 	__netvsc_get_ringparam(nvdev, &orig);
1719 
1720 	new_tx = clamp_t(u32, ring->tx_pending,
1721 			 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1722 	new_rx = clamp_t(u32, ring->rx_pending,
1723 			 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1724 
1725 	if (new_tx == orig.tx_pending &&
1726 	    new_rx == orig.rx_pending)
1727 		return 0;	 /* no change */
1728 
1729 	device_info = netvsc_devinfo_get(nvdev);
1730 
1731 	if (!device_info)
1732 		return -ENOMEM;
1733 
1734 	device_info->send_sections = new_tx;
1735 	device_info->recv_sections = new_rx;
1736 
1737 	ret = netvsc_detach(ndev, nvdev);
1738 	if (ret)
1739 		goto out;
1740 
1741 	ret = netvsc_attach(ndev, device_info);
1742 	if (ret) {
1743 		device_info->send_sections = orig.tx_pending;
1744 		device_info->recv_sections = orig.rx_pending;
1745 
1746 		if (netvsc_attach(ndev, device_info))
1747 			netdev_err(ndev, "restoring ringparam failed");
1748 	}
1749 
1750 out:
1751 	kfree(device_info);
1752 	return ret;
1753 }
1754 
1755 static int netvsc_set_features(struct net_device *ndev,
1756 			       netdev_features_t features)
1757 {
1758 	netdev_features_t change = features ^ ndev->features;
1759 	struct net_device_context *ndevctx = netdev_priv(ndev);
1760 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1761 	struct ndis_offload_params offloads;
1762 
1763 	if (!nvdev || nvdev->destroy)
1764 		return -ENODEV;
1765 
1766 	if (!(change & NETIF_F_LRO))
1767 		return 0;
1768 
1769 	memset(&offloads, 0, sizeof(struct ndis_offload_params));
1770 
1771 	if (features & NETIF_F_LRO) {
1772 		offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1773 		offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1774 	} else {
1775 		offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1776 		offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1777 	}
1778 
1779 	return rndis_filter_set_offload_params(ndev, nvdev, &offloads);
1780 }
1781 
1782 static u32 netvsc_get_msglevel(struct net_device *ndev)
1783 {
1784 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1785 
1786 	return ndev_ctx->msg_enable;
1787 }
1788 
1789 static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
1790 {
1791 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1792 
1793 	ndev_ctx->msg_enable = val;
1794 }
1795 
1796 static const struct ethtool_ops ethtool_ops = {
1797 	.get_drvinfo	= netvsc_get_drvinfo,
1798 	.get_msglevel	= netvsc_get_msglevel,
1799 	.set_msglevel	= netvsc_set_msglevel,
1800 	.get_link	= ethtool_op_get_link,
1801 	.get_ethtool_stats = netvsc_get_ethtool_stats,
1802 	.get_sset_count = netvsc_get_sset_count,
1803 	.get_strings	= netvsc_get_strings,
1804 	.get_channels   = netvsc_get_channels,
1805 	.set_channels   = netvsc_set_channels,
1806 	.get_ts_info	= ethtool_op_get_ts_info,
1807 	.get_rxnfc	= netvsc_get_rxnfc,
1808 	.set_rxnfc	= netvsc_set_rxnfc,
1809 	.get_rxfh_key_size = netvsc_get_rxfh_key_size,
1810 	.get_rxfh_indir_size = netvsc_rss_indir_size,
1811 	.get_rxfh	= netvsc_get_rxfh,
1812 	.set_rxfh	= netvsc_set_rxfh,
1813 	.get_link_ksettings = netvsc_get_link_ksettings,
1814 	.set_link_ksettings = netvsc_set_link_ksettings,
1815 	.get_ringparam	= netvsc_get_ringparam,
1816 	.set_ringparam	= netvsc_set_ringparam,
1817 };
1818 
1819 static const struct net_device_ops device_ops = {
1820 	.ndo_open =			netvsc_open,
1821 	.ndo_stop =			netvsc_close,
1822 	.ndo_start_xmit =		netvsc_start_xmit,
1823 	.ndo_change_rx_flags =		netvsc_change_rx_flags,
1824 	.ndo_set_rx_mode =		netvsc_set_rx_mode,
1825 	.ndo_set_features =		netvsc_set_features,
1826 	.ndo_change_mtu =		netvsc_change_mtu,
1827 	.ndo_validate_addr =		eth_validate_addr,
1828 	.ndo_set_mac_address =		netvsc_set_mac_addr,
1829 	.ndo_select_queue =		netvsc_select_queue,
1830 	.ndo_get_stats64 =		netvsc_get_stats64,
1831 };
1832 
1833 /*
1834  * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1835  * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1836  * present send GARP packet to network peers with netif_notify_peers().
1837  */
1838 static void netvsc_link_change(struct work_struct *w)
1839 {
1840 	struct net_device_context *ndev_ctx =
1841 		container_of(w, struct net_device_context, dwork.work);
1842 	struct hv_device *device_obj = ndev_ctx->device_ctx;
1843 	struct net_device *net = hv_get_drvdata(device_obj);
1844 	struct netvsc_device *net_device;
1845 	struct rndis_device *rdev;
1846 	struct netvsc_reconfig *event = NULL;
1847 	bool notify = false, reschedule = false;
1848 	unsigned long flags, next_reconfig, delay;
1849 
1850 	/* if changes are happening, comeback later */
1851 	if (!rtnl_trylock()) {
1852 		schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1853 		return;
1854 	}
1855 
1856 	net_device = rtnl_dereference(ndev_ctx->nvdev);
1857 	if (!net_device)
1858 		goto out_unlock;
1859 
1860 	rdev = net_device->extension;
1861 
1862 	next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1863 	if (time_is_after_jiffies(next_reconfig)) {
1864 		/* link_watch only sends one notification with current state
1865 		 * per second, avoid doing reconfig more frequently. Handle
1866 		 * wrap around.
1867 		 */
1868 		delay = next_reconfig - jiffies;
1869 		delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1870 		schedule_delayed_work(&ndev_ctx->dwork, delay);
1871 		goto out_unlock;
1872 	}
1873 	ndev_ctx->last_reconfig = jiffies;
1874 
1875 	spin_lock_irqsave(&ndev_ctx->lock, flags);
1876 	if (!list_empty(&ndev_ctx->reconfig_events)) {
1877 		event = list_first_entry(&ndev_ctx->reconfig_events,
1878 					 struct netvsc_reconfig, list);
1879 		list_del(&event->list);
1880 		reschedule = !list_empty(&ndev_ctx->reconfig_events);
1881 	}
1882 	spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1883 
1884 	if (!event)
1885 		goto out_unlock;
1886 
1887 	switch (event->event) {
1888 		/* Only the following events are possible due to the check in
1889 		 * netvsc_linkstatus_callback()
1890 		 */
1891 	case RNDIS_STATUS_MEDIA_CONNECT:
1892 		if (rdev->link_state) {
1893 			rdev->link_state = false;
1894 			netif_carrier_on(net);
1895 			netif_tx_wake_all_queues(net);
1896 		} else {
1897 			notify = true;
1898 		}
1899 		kfree(event);
1900 		break;
1901 	case RNDIS_STATUS_MEDIA_DISCONNECT:
1902 		if (!rdev->link_state) {
1903 			rdev->link_state = true;
1904 			netif_carrier_off(net);
1905 			netif_tx_stop_all_queues(net);
1906 		}
1907 		kfree(event);
1908 		break;
1909 	case RNDIS_STATUS_NETWORK_CHANGE:
1910 		/* Only makes sense if carrier is present */
1911 		if (!rdev->link_state) {
1912 			rdev->link_state = true;
1913 			netif_carrier_off(net);
1914 			netif_tx_stop_all_queues(net);
1915 			event->event = RNDIS_STATUS_MEDIA_CONNECT;
1916 			spin_lock_irqsave(&ndev_ctx->lock, flags);
1917 			list_add(&event->list, &ndev_ctx->reconfig_events);
1918 			spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1919 			reschedule = true;
1920 		}
1921 		break;
1922 	}
1923 
1924 	rtnl_unlock();
1925 
1926 	if (notify)
1927 		netdev_notify_peers(net);
1928 
1929 	/* link_watch only sends one notification with current state per
1930 	 * second, handle next reconfig event in 2 seconds.
1931 	 */
1932 	if (reschedule)
1933 		schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1934 
1935 	return;
1936 
1937 out_unlock:
1938 	rtnl_unlock();
1939 }
1940 
1941 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1942 {
1943 	struct net_device_context *net_device_ctx;
1944 	struct net_device *dev;
1945 
1946 	dev = netdev_master_upper_dev_get(vf_netdev);
1947 	if (!dev || dev->netdev_ops != &device_ops)
1948 		return NULL;	/* not a netvsc device */
1949 
1950 	net_device_ctx = netdev_priv(dev);
1951 	if (!rtnl_dereference(net_device_ctx->nvdev))
1952 		return NULL;	/* device is removed */
1953 
1954 	return dev;
1955 }
1956 
1957 /* Called when VF is injecting data into network stack.
1958  * Change the associated network device from VF to netvsc.
1959  * note: already called with rcu_read_lock
1960  */
1961 static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
1962 {
1963 	struct sk_buff *skb = *pskb;
1964 	struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
1965 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1966 	struct netvsc_vf_pcpu_stats *pcpu_stats
1967 		 = this_cpu_ptr(ndev_ctx->vf_stats);
1968 
1969 	skb->dev = ndev;
1970 
1971 	u64_stats_update_begin(&pcpu_stats->syncp);
1972 	pcpu_stats->rx_packets++;
1973 	pcpu_stats->rx_bytes += skb->len;
1974 	u64_stats_update_end(&pcpu_stats->syncp);
1975 
1976 	return RX_HANDLER_ANOTHER;
1977 }
1978 
1979 static int netvsc_vf_join(struct net_device *vf_netdev,
1980 			  struct net_device *ndev)
1981 {
1982 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1983 	int ret;
1984 
1985 	ret = netdev_rx_handler_register(vf_netdev,
1986 					 netvsc_vf_handle_frame, ndev);
1987 	if (ret != 0) {
1988 		netdev_err(vf_netdev,
1989 			   "can not register netvsc VF receive handler (err = %d)\n",
1990 			   ret);
1991 		goto rx_handler_failed;
1992 	}
1993 
1994 	ret = netdev_master_upper_dev_link(vf_netdev, ndev,
1995 					   NULL, NULL, NULL);
1996 	if (ret != 0) {
1997 		netdev_err(vf_netdev,
1998 			   "can not set master device %s (err = %d)\n",
1999 			   ndev->name, ret);
2000 		goto upper_link_failed;
2001 	}
2002 
2003 	/* set slave flag before open to prevent IPv6 addrconf */
2004 	vf_netdev->flags |= IFF_SLAVE;
2005 
2006 	schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
2007 
2008 	call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
2009 
2010 	netdev_info(vf_netdev, "joined to %s\n", ndev->name);
2011 	return 0;
2012 
2013 upper_link_failed:
2014 	netdev_rx_handler_unregister(vf_netdev);
2015 rx_handler_failed:
2016 	return ret;
2017 }
2018 
2019 static void __netvsc_vf_setup(struct net_device *ndev,
2020 			      struct net_device *vf_netdev)
2021 {
2022 	int ret;
2023 
2024 	/* Align MTU of VF with master */
2025 	ret = dev_set_mtu(vf_netdev, ndev->mtu);
2026 	if (ret)
2027 		netdev_warn(vf_netdev,
2028 			    "unable to change mtu to %u\n", ndev->mtu);
2029 
2030 	/* set multicast etc flags on VF */
2031 	dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL);
2032 
2033 	/* sync address list from ndev to VF */
2034 	netif_addr_lock_bh(ndev);
2035 	dev_uc_sync(vf_netdev, ndev);
2036 	dev_mc_sync(vf_netdev, ndev);
2037 	netif_addr_unlock_bh(ndev);
2038 
2039 	if (netif_running(ndev)) {
2040 		ret = dev_open(vf_netdev, NULL);
2041 		if (ret)
2042 			netdev_warn(vf_netdev,
2043 				    "unable to open: %d\n", ret);
2044 	}
2045 }
2046 
2047 /* Setup VF as slave of the synthetic device.
2048  * Runs in workqueue to avoid recursion in netlink callbacks.
2049  */
2050 static void netvsc_vf_setup(struct work_struct *w)
2051 {
2052 	struct net_device_context *ndev_ctx
2053 		= container_of(w, struct net_device_context, vf_takeover.work);
2054 	struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2055 	struct net_device *vf_netdev;
2056 
2057 	if (!rtnl_trylock()) {
2058 		schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
2059 		return;
2060 	}
2061 
2062 	vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2063 	if (vf_netdev)
2064 		__netvsc_vf_setup(ndev, vf_netdev);
2065 
2066 	rtnl_unlock();
2067 }
2068 
2069 /* Find netvsc by VF serial number.
2070  * The PCI hyperv controller records the serial number as the slot kobj name.
2071  */
2072 static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
2073 {
2074 	struct device *parent = vf_netdev->dev.parent;
2075 	struct net_device_context *ndev_ctx;
2076 	struct pci_dev *pdev;
2077 	u32 serial;
2078 
2079 	if (!parent || !dev_is_pci(parent))
2080 		return NULL; /* not a PCI device */
2081 
2082 	pdev = to_pci_dev(parent);
2083 	if (!pdev->slot) {
2084 		netdev_notice(vf_netdev, "no PCI slot information\n");
2085 		return NULL;
2086 	}
2087 
2088 	if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
2089 		netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
2090 			      pci_slot_name(pdev->slot));
2091 		return NULL;
2092 	}
2093 
2094 	list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2095 		if (!ndev_ctx->vf_alloc)
2096 			continue;
2097 
2098 		if (ndev_ctx->vf_serial == serial)
2099 			return hv_get_drvdata(ndev_ctx->device_ctx);
2100 	}
2101 
2102 	netdev_notice(vf_netdev,
2103 		      "no netdev found for vf serial:%u\n", serial);
2104 	return NULL;
2105 }
2106 
2107 static int netvsc_register_vf(struct net_device *vf_netdev)
2108 {
2109 	struct net_device_context *net_device_ctx;
2110 	struct netvsc_device *netvsc_dev;
2111 	struct net_device *ndev;
2112 	int ret;
2113 
2114 	if (vf_netdev->addr_len != ETH_ALEN)
2115 		return NOTIFY_DONE;
2116 
2117 	ndev = get_netvsc_byslot(vf_netdev);
2118 	if (!ndev)
2119 		return NOTIFY_DONE;
2120 
2121 	net_device_ctx = netdev_priv(ndev);
2122 	netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2123 	if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
2124 		return NOTIFY_DONE;
2125 
2126 	/* if synthetic interface is a different namespace,
2127 	 * then move the VF to that namespace; join will be
2128 	 * done again in that context.
2129 	 */
2130 	if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
2131 		ret = dev_change_net_namespace(vf_netdev,
2132 					       dev_net(ndev), "eth%d");
2133 		if (ret)
2134 			netdev_err(vf_netdev,
2135 				   "could not move to same namespace as %s: %d\n",
2136 				   ndev->name, ret);
2137 		else
2138 			netdev_info(vf_netdev,
2139 				    "VF moved to namespace with: %s\n",
2140 				    ndev->name);
2141 		return NOTIFY_DONE;
2142 	}
2143 
2144 	netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
2145 
2146 	if (netvsc_vf_join(vf_netdev, ndev) != 0)
2147 		return NOTIFY_DONE;
2148 
2149 	dev_hold(vf_netdev);
2150 	rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
2151 	return NOTIFY_OK;
2152 }
2153 
2154 /* VF up/down change detected, schedule to change data path */
2155 static int netvsc_vf_changed(struct net_device *vf_netdev)
2156 {
2157 	struct net_device_context *net_device_ctx;
2158 	struct netvsc_device *netvsc_dev;
2159 	struct net_device *ndev;
2160 	bool vf_is_up = netif_running(vf_netdev);
2161 
2162 	ndev = get_netvsc_byref(vf_netdev);
2163 	if (!ndev)
2164 		return NOTIFY_DONE;
2165 
2166 	net_device_ctx = netdev_priv(ndev);
2167 	netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2168 	if (!netvsc_dev)
2169 		return NOTIFY_DONE;
2170 
2171 	netvsc_switch_datapath(ndev, vf_is_up);
2172 	netdev_info(ndev, "Data path switched %s VF: %s\n",
2173 		    vf_is_up ? "to" : "from", vf_netdev->name);
2174 
2175 	return NOTIFY_OK;
2176 }
2177 
2178 static int netvsc_unregister_vf(struct net_device *vf_netdev)
2179 {
2180 	struct net_device *ndev;
2181 	struct net_device_context *net_device_ctx;
2182 
2183 	ndev = get_netvsc_byref(vf_netdev);
2184 	if (!ndev)
2185 		return NOTIFY_DONE;
2186 
2187 	net_device_ctx = netdev_priv(ndev);
2188 	cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
2189 
2190 	netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
2191 
2192 	netdev_rx_handler_unregister(vf_netdev);
2193 	netdev_upper_dev_unlink(vf_netdev, ndev);
2194 	RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
2195 	dev_put(vf_netdev);
2196 
2197 	return NOTIFY_OK;
2198 }
2199 
2200 static int netvsc_probe(struct hv_device *dev,
2201 			const struct hv_vmbus_device_id *dev_id)
2202 {
2203 	struct net_device *net = NULL;
2204 	struct net_device_context *net_device_ctx;
2205 	struct netvsc_device_info *device_info = NULL;
2206 	struct netvsc_device *nvdev;
2207 	int ret = -ENOMEM;
2208 
2209 	net = alloc_etherdev_mq(sizeof(struct net_device_context),
2210 				VRSS_CHANNEL_MAX);
2211 	if (!net)
2212 		goto no_net;
2213 
2214 	netif_carrier_off(net);
2215 
2216 	netvsc_init_settings(net);
2217 
2218 	net_device_ctx = netdev_priv(net);
2219 	net_device_ctx->device_ctx = dev;
2220 	net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
2221 	if (netif_msg_probe(net_device_ctx))
2222 		netdev_dbg(net, "netvsc msg_enable: %d\n",
2223 			   net_device_ctx->msg_enable);
2224 
2225 	hv_set_drvdata(dev, net);
2226 
2227 	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
2228 
2229 	spin_lock_init(&net_device_ctx->lock);
2230 	INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
2231 	INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
2232 
2233 	net_device_ctx->vf_stats
2234 		= netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
2235 	if (!net_device_ctx->vf_stats)
2236 		goto no_stats;
2237 
2238 	net->netdev_ops = &device_ops;
2239 	net->ethtool_ops = &ethtool_ops;
2240 	SET_NETDEV_DEV(net, &dev->device);
2241 
2242 	/* We always need headroom for rndis header */
2243 	net->needed_headroom = RNDIS_AND_PPI_SIZE;
2244 
2245 	/* Initialize the number of queues to be 1, we may change it if more
2246 	 * channels are offered later.
2247 	 */
2248 	netif_set_real_num_tx_queues(net, 1);
2249 	netif_set_real_num_rx_queues(net, 1);
2250 
2251 	/* Notify the netvsc driver of the new device */
2252 	device_info = netvsc_devinfo_get(NULL);
2253 
2254 	if (!device_info) {
2255 		ret = -ENOMEM;
2256 		goto devinfo_failed;
2257 	}
2258 
2259 	nvdev = rndis_filter_device_add(dev, device_info);
2260 	if (IS_ERR(nvdev)) {
2261 		ret = PTR_ERR(nvdev);
2262 		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
2263 		goto rndis_failed;
2264 	}
2265 
2266 	memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
2267 
2268 	/* We must get rtnl lock before scheduling nvdev->subchan_work,
2269 	 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2270 	 * all subchannels to show up, but that may not happen because
2271 	 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2272 	 * -> ... -> device_add() -> ... -> __device_attach() can't get
2273 	 * the device lock, so all the subchannels can't be processed --
2274 	 * finally netvsc_subchan_work() hangs forever.
2275 	 */
2276 	rtnl_lock();
2277 
2278 	if (nvdev->num_chn > 1)
2279 		schedule_work(&nvdev->subchan_work);
2280 
2281 	/* hw_features computed in rndis_netdev_set_hwcaps() */
2282 	net->features = net->hw_features |
2283 		NETIF_F_HIGHDMA | NETIF_F_SG |
2284 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2285 	net->vlan_features = net->features;
2286 
2287 	netdev_lockdep_set_classes(net);
2288 
2289 	/* MTU range: 68 - 1500 or 65521 */
2290 	net->min_mtu = NETVSC_MTU_MIN;
2291 	if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2292 		net->max_mtu = NETVSC_MTU - ETH_HLEN;
2293 	else
2294 		net->max_mtu = ETH_DATA_LEN;
2295 
2296 	ret = register_netdevice(net);
2297 	if (ret != 0) {
2298 		pr_err("Unable to register netdev.\n");
2299 		goto register_failed;
2300 	}
2301 
2302 	list_add(&net_device_ctx->list, &netvsc_dev_list);
2303 	rtnl_unlock();
2304 
2305 	kfree(device_info);
2306 	return 0;
2307 
2308 register_failed:
2309 	rtnl_unlock();
2310 	rndis_filter_device_remove(dev, nvdev);
2311 rndis_failed:
2312 	kfree(device_info);
2313 devinfo_failed:
2314 	free_percpu(net_device_ctx->vf_stats);
2315 no_stats:
2316 	hv_set_drvdata(dev, NULL);
2317 	free_netdev(net);
2318 no_net:
2319 	return ret;
2320 }
2321 
2322 static int netvsc_remove(struct hv_device *dev)
2323 {
2324 	struct net_device_context *ndev_ctx;
2325 	struct net_device *vf_netdev, *net;
2326 	struct netvsc_device *nvdev;
2327 
2328 	net = hv_get_drvdata(dev);
2329 	if (net == NULL) {
2330 		dev_err(&dev->device, "No net device to remove\n");
2331 		return 0;
2332 	}
2333 
2334 	ndev_ctx = netdev_priv(net);
2335 
2336 	cancel_delayed_work_sync(&ndev_ctx->dwork);
2337 
2338 	rtnl_lock();
2339 	nvdev = rtnl_dereference(ndev_ctx->nvdev);
2340 	if (nvdev)
2341 		cancel_work_sync(&nvdev->subchan_work);
2342 
2343 	/*
2344 	 * Call to the vsc driver to let it know that the device is being
2345 	 * removed. Also blocks mtu and channel changes.
2346 	 */
2347 	vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2348 	if (vf_netdev)
2349 		netvsc_unregister_vf(vf_netdev);
2350 
2351 	if (nvdev)
2352 		rndis_filter_device_remove(dev, nvdev);
2353 
2354 	unregister_netdevice(net);
2355 	list_del(&ndev_ctx->list);
2356 
2357 	rtnl_unlock();
2358 
2359 	hv_set_drvdata(dev, NULL);
2360 
2361 	free_percpu(ndev_ctx->vf_stats);
2362 	free_netdev(net);
2363 	return 0;
2364 }
2365 
2366 static const struct hv_vmbus_device_id id_table[] = {
2367 	/* Network guid */
2368 	{ HV_NIC_GUID, },
2369 	{ },
2370 };
2371 
2372 MODULE_DEVICE_TABLE(vmbus, id_table);
2373 
2374 /* The one and only one */
2375 static struct  hv_driver netvsc_drv = {
2376 	.name = KBUILD_MODNAME,
2377 	.id_table = id_table,
2378 	.probe = netvsc_probe,
2379 	.remove = netvsc_remove,
2380 	.driver = {
2381 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
2382 	},
2383 };
2384 
2385 /*
2386  * On Hyper-V, every VF interface is matched with a corresponding
2387  * synthetic interface. The synthetic interface is presented first
2388  * to the guest. When the corresponding VF instance is registered,
2389  * we will take care of switching the data path.
2390  */
2391 static int netvsc_netdev_event(struct notifier_block *this,
2392 			       unsigned long event, void *ptr)
2393 {
2394 	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2395 
2396 	/* Skip our own events */
2397 	if (event_dev->netdev_ops == &device_ops)
2398 		return NOTIFY_DONE;
2399 
2400 	/* Avoid non-Ethernet type devices */
2401 	if (event_dev->type != ARPHRD_ETHER)
2402 		return NOTIFY_DONE;
2403 
2404 	/* Avoid Vlan dev with same MAC registering as VF */
2405 	if (is_vlan_dev(event_dev))
2406 		return NOTIFY_DONE;
2407 
2408 	/* Avoid Bonding master dev with same MAC registering as VF */
2409 	if ((event_dev->priv_flags & IFF_BONDING) &&
2410 	    (event_dev->flags & IFF_MASTER))
2411 		return NOTIFY_DONE;
2412 
2413 	switch (event) {
2414 	case NETDEV_REGISTER:
2415 		return netvsc_register_vf(event_dev);
2416 	case NETDEV_UNREGISTER:
2417 		return netvsc_unregister_vf(event_dev);
2418 	case NETDEV_UP:
2419 	case NETDEV_DOWN:
2420 		return netvsc_vf_changed(event_dev);
2421 	default:
2422 		return NOTIFY_DONE;
2423 	}
2424 }
2425 
2426 static struct notifier_block netvsc_netdev_notifier = {
2427 	.notifier_call = netvsc_netdev_event,
2428 };
2429 
2430 static void __exit netvsc_drv_exit(void)
2431 {
2432 	unregister_netdevice_notifier(&netvsc_netdev_notifier);
2433 	vmbus_driver_unregister(&netvsc_drv);
2434 }
2435 
2436 static int __init netvsc_drv_init(void)
2437 {
2438 	int ret;
2439 
2440 	if (ring_size < RING_SIZE_MIN) {
2441 		ring_size = RING_SIZE_MIN;
2442 		pr_info("Increased ring_size to %u (min allowed)\n",
2443 			ring_size);
2444 	}
2445 	netvsc_ring_bytes = ring_size * PAGE_SIZE;
2446 
2447 	ret = vmbus_driver_register(&netvsc_drv);
2448 	if (ret)
2449 		return ret;
2450 
2451 	register_netdevice_notifier(&netvsc_netdev_notifier);
2452 	return 0;
2453 }
2454 
2455 MODULE_LICENSE("GPL");
2456 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
2457 
2458 module_init(netvsc_drv_init);
2459 module_exit(netvsc_drv_exit);
2460