xref: /openbmc/linux/drivers/net/hyperv/netvsc_drv.c (revision 3b23dc52)
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, see <http://www.gnu.org/licenses/>.
15  *
16  * Authors:
17  *   Haiyang Zhang <haiyangz@microsoft.com>
18  *   Hank Janssen  <hjanssen@microsoft.com>
19  */
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
27 #include <linux/io.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/if_vlan.h>
34 #include <linux/in.h>
35 #include <linux/slab.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/netpoll.h>
38 
39 #include <net/arp.h>
40 #include <net/route.h>
41 #include <net/sock.h>
42 #include <net/pkt_sched.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 
46 #include "hyperv_net.h"
47 
48 #define RING_SIZE_MIN	64
49 #define RETRY_US_LO	5000
50 #define RETRY_US_HI	10000
51 #define RETRY_MAX	2000	/* >10 sec */
52 
53 #define LINKCHANGE_INT (2 * HZ)
54 #define VF_TAKEOVER_INT (HZ / 10)
55 
56 static unsigned int ring_size __ro_after_init = 128;
57 module_param(ring_size, uint, 0444);
58 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
59 unsigned int netvsc_ring_bytes __ro_after_init;
60 
61 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
62 				NETIF_MSG_LINK | NETIF_MSG_IFUP |
63 				NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
64 				NETIF_MSG_TX_ERR;
65 
66 static int debug = -1;
67 module_param(debug, int, 0444);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69 
70 static LIST_HEAD(netvsc_dev_list);
71 
72 static void netvsc_change_rx_flags(struct net_device *net, int change)
73 {
74 	struct net_device_context *ndev_ctx = netdev_priv(net);
75 	struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
76 	int inc;
77 
78 	if (!vf_netdev)
79 		return;
80 
81 	if (change & IFF_PROMISC) {
82 		inc = (net->flags & IFF_PROMISC) ? 1 : -1;
83 		dev_set_promiscuity(vf_netdev, inc);
84 	}
85 
86 	if (change & IFF_ALLMULTI) {
87 		inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
88 		dev_set_allmulti(vf_netdev, inc);
89 	}
90 }
91 
92 static void netvsc_set_rx_mode(struct net_device *net)
93 {
94 	struct net_device_context *ndev_ctx = netdev_priv(net);
95 	struct net_device *vf_netdev;
96 	struct netvsc_device *nvdev;
97 
98 	rcu_read_lock();
99 	vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
100 	if (vf_netdev) {
101 		dev_uc_sync(vf_netdev, net);
102 		dev_mc_sync(vf_netdev, net);
103 	}
104 
105 	nvdev = rcu_dereference(ndev_ctx->nvdev);
106 	if (nvdev)
107 		rndis_filter_update(nvdev);
108 	rcu_read_unlock();
109 }
110 
111 static int netvsc_open(struct net_device *net)
112 {
113 	struct net_device_context *ndev_ctx = netdev_priv(net);
114 	struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
115 	struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
116 	struct rndis_device *rdev;
117 	int ret = 0;
118 
119 	netif_carrier_off(net);
120 
121 	/* Open up the device */
122 	ret = rndis_filter_open(nvdev);
123 	if (ret != 0) {
124 		netdev_err(net, "unable to open device (ret %d).\n", ret);
125 		return ret;
126 	}
127 
128 	rdev = nvdev->extension;
129 	if (!rdev->link_state) {
130 		netif_carrier_on(net);
131 		netif_tx_wake_all_queues(net);
132 	}
133 
134 	if (vf_netdev) {
135 		/* Setting synthetic device up transparently sets
136 		 * slave as up. If open fails, then slave will be
137 		 * still be offline (and not used).
138 		 */
139 		ret = dev_open(vf_netdev);
140 		if (ret)
141 			netdev_warn(net,
142 				    "unable to open slave: %s: %d\n",
143 				    vf_netdev->name, ret);
144 	}
145 	return 0;
146 }
147 
148 static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
149 {
150 	unsigned int retry = 0;
151 	int i;
152 
153 	/* Ensure pending bytes in ring are read */
154 	for (;;) {
155 		u32 aread = 0;
156 
157 		for (i = 0; i < nvdev->num_chn; i++) {
158 			struct vmbus_channel *chn
159 				= nvdev->chan_table[i].channel;
160 
161 			if (!chn)
162 				continue;
163 
164 			/* make sure receive not running now */
165 			napi_synchronize(&nvdev->chan_table[i].napi);
166 
167 			aread = hv_get_bytes_to_read(&chn->inbound);
168 			if (aread)
169 				break;
170 
171 			aread = hv_get_bytes_to_read(&chn->outbound);
172 			if (aread)
173 				break;
174 		}
175 
176 		if (aread == 0)
177 			return 0;
178 
179 		if (++retry > RETRY_MAX)
180 			return -ETIMEDOUT;
181 
182 		usleep_range(RETRY_US_LO, RETRY_US_HI);
183 	}
184 }
185 
186 static int netvsc_close(struct net_device *net)
187 {
188 	struct net_device_context *net_device_ctx = netdev_priv(net);
189 	struct net_device *vf_netdev
190 		= rtnl_dereference(net_device_ctx->vf_netdev);
191 	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
192 	int ret;
193 
194 	netif_tx_disable(net);
195 
196 	/* No need to close rndis filter if it is removed already */
197 	if (!nvdev)
198 		return 0;
199 
200 	ret = rndis_filter_close(nvdev);
201 	if (ret != 0) {
202 		netdev_err(net, "unable to close device (ret %d).\n", ret);
203 		return ret;
204 	}
205 
206 	ret = netvsc_wait_until_empty(nvdev);
207 	if (ret)
208 		netdev_err(net, "Ring buffer not empty after closing rndis\n");
209 
210 	if (vf_netdev)
211 		dev_close(vf_netdev);
212 
213 	return ret;
214 }
215 
216 static inline void *init_ppi_data(struct rndis_message *msg,
217 				  u32 ppi_size, u32 pkt_type)
218 {
219 	struct rndis_packet *rndis_pkt = &msg->msg.pkt;
220 	struct rndis_per_packet_info *ppi;
221 
222 	rndis_pkt->data_offset += ppi_size;
223 	ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
224 		+ rndis_pkt->per_pkt_info_len;
225 
226 	ppi->size = ppi_size;
227 	ppi->type = pkt_type;
228 	ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
229 
230 	rndis_pkt->per_pkt_info_len += ppi_size;
231 
232 	return ppi + 1;
233 }
234 
235 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
236  * packets. We can use ethtool to change UDP hash level when necessary.
237  */
238 static inline u32 netvsc_get_hash(
239 	struct sk_buff *skb,
240 	const struct net_device_context *ndc)
241 {
242 	struct flow_keys flow;
243 	u32 hash, pkt_proto = 0;
244 	static u32 hashrnd __read_mostly;
245 
246 	net_get_random_once(&hashrnd, sizeof(hashrnd));
247 
248 	if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
249 		return 0;
250 
251 	switch (flow.basic.ip_proto) {
252 	case IPPROTO_TCP:
253 		if (flow.basic.n_proto == htons(ETH_P_IP))
254 			pkt_proto = HV_TCP4_L4HASH;
255 		else if (flow.basic.n_proto == htons(ETH_P_IPV6))
256 			pkt_proto = HV_TCP6_L4HASH;
257 
258 		break;
259 
260 	case IPPROTO_UDP:
261 		if (flow.basic.n_proto == htons(ETH_P_IP))
262 			pkt_proto = HV_UDP4_L4HASH;
263 		else if (flow.basic.n_proto == htons(ETH_P_IPV6))
264 			pkt_proto = HV_UDP6_L4HASH;
265 
266 		break;
267 	}
268 
269 	if (pkt_proto & ndc->l4_hash) {
270 		return skb_get_hash(skb);
271 	} else {
272 		if (flow.basic.n_proto == htons(ETH_P_IP))
273 			hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
274 		else if (flow.basic.n_proto == htons(ETH_P_IPV6))
275 			hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
276 		else
277 			hash = 0;
278 
279 		skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
280 	}
281 
282 	return hash;
283 }
284 
285 static inline int netvsc_get_tx_queue(struct net_device *ndev,
286 				      struct sk_buff *skb, int old_idx)
287 {
288 	const struct net_device_context *ndc = netdev_priv(ndev);
289 	struct sock *sk = skb->sk;
290 	int q_idx;
291 
292 	q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
293 			      (VRSS_SEND_TAB_SIZE - 1)];
294 
295 	/* If queue index changed record the new value */
296 	if (q_idx != old_idx &&
297 	    sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
298 		sk_tx_queue_set(sk, q_idx);
299 
300 	return q_idx;
301 }
302 
303 /*
304  * Select queue for transmit.
305  *
306  * If a valid queue has already been assigned, then use that.
307  * Otherwise compute tx queue based on hash and the send table.
308  *
309  * This is basically similar to default (__netdev_pick_tx) with the added step
310  * of using the host send_table when no other queue has been assigned.
311  *
312  * TODO support XPS - but get_xps_queue not exported
313  */
314 static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
315 {
316 	int q_idx = sk_tx_queue_get(skb->sk);
317 
318 	if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
319 		/* If forwarding a packet, we use the recorded queue when
320 		 * available for better cache locality.
321 		 */
322 		if (skb_rx_queue_recorded(skb))
323 			q_idx = skb_get_rx_queue(skb);
324 		else
325 			q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
326 	}
327 
328 	return q_idx;
329 }
330 
331 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
332 			       void *accel_priv,
333 			       select_queue_fallback_t fallback)
334 {
335 	struct net_device_context *ndc = netdev_priv(ndev);
336 	struct net_device *vf_netdev;
337 	u16 txq;
338 
339 	rcu_read_lock();
340 	vf_netdev = rcu_dereference(ndc->vf_netdev);
341 	if (vf_netdev) {
342 		const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
343 
344 		if (vf_ops->ndo_select_queue)
345 			txq = vf_ops->ndo_select_queue(vf_netdev, skb,
346 						       accel_priv, fallback);
347 		else
348 			txq = fallback(vf_netdev, skb);
349 
350 		/* Record the queue selected by VF so that it can be
351 		 * used for common case where VF has more queues than
352 		 * the synthetic device.
353 		 */
354 		qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
355 	} else {
356 		txq = netvsc_pick_tx(ndev, skb);
357 	}
358 	rcu_read_unlock();
359 
360 	while (unlikely(txq >= ndev->real_num_tx_queues))
361 		txq -= ndev->real_num_tx_queues;
362 
363 	return txq;
364 }
365 
366 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
367 		       struct hv_page_buffer *pb)
368 {
369 	int j = 0;
370 
371 	/* Deal with compund pages by ignoring unused part
372 	 * of the page.
373 	 */
374 	page += (offset >> PAGE_SHIFT);
375 	offset &= ~PAGE_MASK;
376 
377 	while (len > 0) {
378 		unsigned long bytes;
379 
380 		bytes = PAGE_SIZE - offset;
381 		if (bytes > len)
382 			bytes = len;
383 		pb[j].pfn = page_to_pfn(page);
384 		pb[j].offset = offset;
385 		pb[j].len = bytes;
386 
387 		offset += bytes;
388 		len -= bytes;
389 
390 		if (offset == PAGE_SIZE && len) {
391 			page++;
392 			offset = 0;
393 			j++;
394 		}
395 	}
396 
397 	return j + 1;
398 }
399 
400 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
401 			   struct hv_netvsc_packet *packet,
402 			   struct hv_page_buffer *pb)
403 {
404 	u32 slots_used = 0;
405 	char *data = skb->data;
406 	int frags = skb_shinfo(skb)->nr_frags;
407 	int i;
408 
409 	/* The packet is laid out thus:
410 	 * 1. hdr: RNDIS header and PPI
411 	 * 2. skb linear data
412 	 * 3. skb fragment data
413 	 */
414 	slots_used += fill_pg_buf(virt_to_page(hdr),
415 				  offset_in_page(hdr),
416 				  len, &pb[slots_used]);
417 
418 	packet->rmsg_size = len;
419 	packet->rmsg_pgcnt = slots_used;
420 
421 	slots_used += fill_pg_buf(virt_to_page(data),
422 				offset_in_page(data),
423 				skb_headlen(skb), &pb[slots_used]);
424 
425 	for (i = 0; i < frags; i++) {
426 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
427 
428 		slots_used += fill_pg_buf(skb_frag_page(frag),
429 					frag->page_offset,
430 					skb_frag_size(frag), &pb[slots_used]);
431 	}
432 	return slots_used;
433 }
434 
435 static int count_skb_frag_slots(struct sk_buff *skb)
436 {
437 	int i, frags = skb_shinfo(skb)->nr_frags;
438 	int pages = 0;
439 
440 	for (i = 0; i < frags; i++) {
441 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
442 		unsigned long size = skb_frag_size(frag);
443 		unsigned long offset = frag->page_offset;
444 
445 		/* Skip unused frames from start of page */
446 		offset &= ~PAGE_MASK;
447 		pages += PFN_UP(offset + size);
448 	}
449 	return pages;
450 }
451 
452 static int netvsc_get_slots(struct sk_buff *skb)
453 {
454 	char *data = skb->data;
455 	unsigned int offset = offset_in_page(data);
456 	unsigned int len = skb_headlen(skb);
457 	int slots;
458 	int frag_slots;
459 
460 	slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
461 	frag_slots = count_skb_frag_slots(skb);
462 	return slots + frag_slots;
463 }
464 
465 static u32 net_checksum_info(struct sk_buff *skb)
466 {
467 	if (skb->protocol == htons(ETH_P_IP)) {
468 		struct iphdr *ip = ip_hdr(skb);
469 
470 		if (ip->protocol == IPPROTO_TCP)
471 			return TRANSPORT_INFO_IPV4_TCP;
472 		else if (ip->protocol == IPPROTO_UDP)
473 			return TRANSPORT_INFO_IPV4_UDP;
474 	} else {
475 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
476 
477 		if (ip6->nexthdr == IPPROTO_TCP)
478 			return TRANSPORT_INFO_IPV6_TCP;
479 		else if (ip6->nexthdr == IPPROTO_UDP)
480 			return TRANSPORT_INFO_IPV6_UDP;
481 	}
482 
483 	return TRANSPORT_INFO_NOT_IP;
484 }
485 
486 /* Send skb on the slave VF device. */
487 static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
488 			  struct sk_buff *skb)
489 {
490 	struct net_device_context *ndev_ctx = netdev_priv(net);
491 	unsigned int len = skb->len;
492 	int rc;
493 
494 	skb->dev = vf_netdev;
495 	skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
496 
497 	rc = dev_queue_xmit(skb);
498 	if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
499 		struct netvsc_vf_pcpu_stats *pcpu_stats
500 			= this_cpu_ptr(ndev_ctx->vf_stats);
501 
502 		u64_stats_update_begin(&pcpu_stats->syncp);
503 		pcpu_stats->tx_packets++;
504 		pcpu_stats->tx_bytes += len;
505 		u64_stats_update_end(&pcpu_stats->syncp);
506 	} else {
507 		this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
508 	}
509 
510 	return rc;
511 }
512 
513 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
514 {
515 	struct net_device_context *net_device_ctx = netdev_priv(net);
516 	struct hv_netvsc_packet *packet = NULL;
517 	int ret;
518 	unsigned int num_data_pgs;
519 	struct rndis_message *rndis_msg;
520 	struct net_device *vf_netdev;
521 	u32 rndis_msg_size;
522 	u32 hash;
523 	struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
524 
525 	/* if VF is present and up then redirect packets
526 	 * already called with rcu_read_lock_bh
527 	 */
528 	vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
529 	if (vf_netdev && netif_running(vf_netdev) &&
530 	    !netpoll_tx_running(net))
531 		return netvsc_vf_xmit(net, vf_netdev, skb);
532 
533 	/* We will atmost need two pages to describe the rndis
534 	 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
535 	 * of pages in a single packet. If skb is scattered around
536 	 * more pages we try linearizing it.
537 	 */
538 
539 	num_data_pgs = netvsc_get_slots(skb) + 2;
540 
541 	if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
542 		++net_device_ctx->eth_stats.tx_scattered;
543 
544 		if (skb_linearize(skb))
545 			goto no_memory;
546 
547 		num_data_pgs = netvsc_get_slots(skb) + 2;
548 		if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
549 			++net_device_ctx->eth_stats.tx_too_big;
550 			goto drop;
551 		}
552 	}
553 
554 	/*
555 	 * Place the rndis header in the skb head room and
556 	 * the skb->cb will be used for hv_netvsc_packet
557 	 * structure.
558 	 */
559 	ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
560 	if (ret)
561 		goto no_memory;
562 
563 	/* Use the skb control buffer for building up the packet */
564 	BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
565 			FIELD_SIZEOF(struct sk_buff, cb));
566 	packet = (struct hv_netvsc_packet *)skb->cb;
567 
568 	packet->q_idx = skb_get_queue_mapping(skb);
569 
570 	packet->total_data_buflen = skb->len;
571 	packet->total_bytes = skb->len;
572 	packet->total_packets = 1;
573 
574 	rndis_msg = (struct rndis_message *)skb->head;
575 
576 	/* Add the rndis header */
577 	rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
578 	rndis_msg->msg_len = packet->total_data_buflen;
579 
580 	rndis_msg->msg.pkt = (struct rndis_packet) {
581 		.data_offset = sizeof(struct rndis_packet),
582 		.data_len = packet->total_data_buflen,
583 		.per_pkt_info_offset = sizeof(struct rndis_packet),
584 	};
585 
586 	rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
587 
588 	hash = skb_get_hash_raw(skb);
589 	if (hash != 0 && net->real_num_tx_queues > 1) {
590 		u32 *hash_info;
591 
592 		rndis_msg_size += NDIS_HASH_PPI_SIZE;
593 		hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
594 					  NBL_HASH_VALUE);
595 		*hash_info = hash;
596 	}
597 
598 	if (skb_vlan_tag_present(skb)) {
599 		struct ndis_pkt_8021q_info *vlan;
600 
601 		rndis_msg_size += NDIS_VLAN_PPI_SIZE;
602 		vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
603 				     IEEE_8021Q_INFO);
604 
605 		vlan->value = 0;
606 		vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
607 		vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
608 				VLAN_PRIO_SHIFT;
609 	}
610 
611 	if (skb_is_gso(skb)) {
612 		struct ndis_tcp_lso_info *lso_info;
613 
614 		rndis_msg_size += NDIS_LSO_PPI_SIZE;
615 		lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
616 					 TCP_LARGESEND_PKTINFO);
617 
618 		lso_info->value = 0;
619 		lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
620 		if (skb->protocol == htons(ETH_P_IP)) {
621 			lso_info->lso_v2_transmit.ip_version =
622 				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
623 			ip_hdr(skb)->tot_len = 0;
624 			ip_hdr(skb)->check = 0;
625 			tcp_hdr(skb)->check =
626 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
627 						   ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
628 		} else {
629 			lso_info->lso_v2_transmit.ip_version =
630 				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
631 			ipv6_hdr(skb)->payload_len = 0;
632 			tcp_hdr(skb)->check =
633 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
634 						 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
635 		}
636 		lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
637 		lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
638 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
639 		if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
640 			struct ndis_tcp_ip_checksum_info *csum_info;
641 
642 			rndis_msg_size += NDIS_CSUM_PPI_SIZE;
643 			csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
644 						  TCPIP_CHKSUM_PKTINFO);
645 
646 			csum_info->value = 0;
647 			csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
648 
649 			if (skb->protocol == htons(ETH_P_IP)) {
650 				csum_info->transmit.is_ipv4 = 1;
651 
652 				if (ip_hdr(skb)->protocol == IPPROTO_TCP)
653 					csum_info->transmit.tcp_checksum = 1;
654 				else
655 					csum_info->transmit.udp_checksum = 1;
656 			} else {
657 				csum_info->transmit.is_ipv6 = 1;
658 
659 				if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
660 					csum_info->transmit.tcp_checksum = 1;
661 				else
662 					csum_info->transmit.udp_checksum = 1;
663 			}
664 		} else {
665 			/* Can't do offload of this type of checksum */
666 			if (skb_checksum_help(skb))
667 				goto drop;
668 		}
669 	}
670 
671 	/* Start filling in the page buffers with the rndis hdr */
672 	rndis_msg->msg_len += rndis_msg_size;
673 	packet->total_data_buflen = rndis_msg->msg_len;
674 	packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
675 					       skb, packet, pb);
676 
677 	/* timestamp packet in software */
678 	skb_tx_timestamp(skb);
679 
680 	ret = netvsc_send(net, packet, rndis_msg, pb, skb);
681 	if (likely(ret == 0))
682 		return NETDEV_TX_OK;
683 
684 	if (ret == -EAGAIN) {
685 		++net_device_ctx->eth_stats.tx_busy;
686 		return NETDEV_TX_BUSY;
687 	}
688 
689 	if (ret == -ENOSPC)
690 		++net_device_ctx->eth_stats.tx_no_space;
691 
692 drop:
693 	dev_kfree_skb_any(skb);
694 	net->stats.tx_dropped++;
695 
696 	return NETDEV_TX_OK;
697 
698 no_memory:
699 	++net_device_ctx->eth_stats.tx_no_memory;
700 	goto drop;
701 }
702 
703 /*
704  * netvsc_linkstatus_callback - Link up/down notification
705  */
706 void netvsc_linkstatus_callback(struct net_device *net,
707 				struct rndis_message *resp)
708 {
709 	struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
710 	struct net_device_context *ndev_ctx = netdev_priv(net);
711 	struct netvsc_reconfig *event;
712 	unsigned long flags;
713 
714 	/* Update the physical link speed when changing to another vSwitch */
715 	if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
716 		u32 speed;
717 
718 		speed = *(u32 *)((void *)indicate
719 				 + indicate->status_buf_offset) / 10000;
720 		ndev_ctx->speed = speed;
721 		return;
722 	}
723 
724 	/* Handle these link change statuses below */
725 	if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
726 	    indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
727 	    indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
728 		return;
729 
730 	if (net->reg_state != NETREG_REGISTERED)
731 		return;
732 
733 	event = kzalloc(sizeof(*event), GFP_ATOMIC);
734 	if (!event)
735 		return;
736 	event->event = indicate->status;
737 
738 	spin_lock_irqsave(&ndev_ctx->lock, flags);
739 	list_add_tail(&event->list, &ndev_ctx->reconfig_events);
740 	spin_unlock_irqrestore(&ndev_ctx->lock, flags);
741 
742 	schedule_delayed_work(&ndev_ctx->dwork, 0);
743 }
744 
745 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
746 					     struct napi_struct *napi,
747 					     const struct ndis_tcp_ip_checksum_info *csum_info,
748 					     const struct ndis_pkt_8021q_info *vlan,
749 					     void *data, u32 buflen)
750 {
751 	struct sk_buff *skb;
752 
753 	skb = napi_alloc_skb(napi, buflen);
754 	if (!skb)
755 		return skb;
756 
757 	/*
758 	 * Copy to skb. This copy is needed here since the memory pointed by
759 	 * hv_netvsc_packet cannot be deallocated
760 	 */
761 	skb_put_data(skb, data, buflen);
762 
763 	skb->protocol = eth_type_trans(skb, net);
764 
765 	/* skb is already created with CHECKSUM_NONE */
766 	skb_checksum_none_assert(skb);
767 
768 	/*
769 	 * In Linux, the IP checksum is always checked.
770 	 * Do L4 checksum offload if enabled and present.
771 	 */
772 	if (csum_info && (net->features & NETIF_F_RXCSUM)) {
773 		if (csum_info->receive.tcp_checksum_succeeded ||
774 		    csum_info->receive.udp_checksum_succeeded)
775 			skb->ip_summed = CHECKSUM_UNNECESSARY;
776 	}
777 
778 	if (vlan) {
779 		u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
780 
781 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
782 				       vlan_tci);
783 	}
784 
785 	return skb;
786 }
787 
788 /*
789  * netvsc_recv_callback -  Callback when we receive a packet from the
790  * "wire" on the specified device.
791  */
792 int netvsc_recv_callback(struct net_device *net,
793 			 struct netvsc_device *net_device,
794 			 struct vmbus_channel *channel,
795 			 void  *data, u32 len,
796 			 const struct ndis_tcp_ip_checksum_info *csum_info,
797 			 const struct ndis_pkt_8021q_info *vlan)
798 {
799 	struct net_device_context *net_device_ctx = netdev_priv(net);
800 	u16 q_idx = channel->offermsg.offer.sub_channel_index;
801 	struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
802 	struct sk_buff *skb;
803 	struct netvsc_stats *rx_stats;
804 
805 	if (net->reg_state != NETREG_REGISTERED)
806 		return NVSP_STAT_FAIL;
807 
808 	/* Allocate a skb - TODO direct I/O to pages? */
809 	skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
810 				    csum_info, vlan, data, len);
811 	if (unlikely(!skb)) {
812 		++net_device_ctx->eth_stats.rx_no_memory;
813 		rcu_read_unlock();
814 		return NVSP_STAT_FAIL;
815 	}
816 
817 	skb_record_rx_queue(skb, q_idx);
818 
819 	/*
820 	 * Even if injecting the packet, record the statistics
821 	 * on the synthetic device because modifying the VF device
822 	 * statistics will not work correctly.
823 	 */
824 	rx_stats = &nvchan->rx_stats;
825 	u64_stats_update_begin(&rx_stats->syncp);
826 	rx_stats->packets++;
827 	rx_stats->bytes += len;
828 
829 	if (skb->pkt_type == PACKET_BROADCAST)
830 		++rx_stats->broadcast;
831 	else if (skb->pkt_type == PACKET_MULTICAST)
832 		++rx_stats->multicast;
833 	u64_stats_update_end(&rx_stats->syncp);
834 
835 	napi_gro_receive(&nvchan->napi, skb);
836 	return NVSP_STAT_SUCCESS;
837 }
838 
839 static void netvsc_get_drvinfo(struct net_device *net,
840 			       struct ethtool_drvinfo *info)
841 {
842 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
843 	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
844 }
845 
846 static void netvsc_get_channels(struct net_device *net,
847 				struct ethtool_channels *channel)
848 {
849 	struct net_device_context *net_device_ctx = netdev_priv(net);
850 	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
851 
852 	if (nvdev) {
853 		channel->max_combined	= nvdev->max_chn;
854 		channel->combined_count = nvdev->num_chn;
855 	}
856 }
857 
858 static int netvsc_detach(struct net_device *ndev,
859 			 struct netvsc_device *nvdev)
860 {
861 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
862 	struct hv_device *hdev = ndev_ctx->device_ctx;
863 	int ret;
864 
865 	/* Don't try continuing to try and setup sub channels */
866 	if (cancel_work_sync(&nvdev->subchan_work))
867 		nvdev->num_chn = 1;
868 
869 	/* If device was up (receiving) then shutdown */
870 	if (netif_running(ndev)) {
871 		netif_tx_disable(ndev);
872 
873 		ret = rndis_filter_close(nvdev);
874 		if (ret) {
875 			netdev_err(ndev,
876 				   "unable to close device (ret %d).\n", ret);
877 			return ret;
878 		}
879 
880 		ret = netvsc_wait_until_empty(nvdev);
881 		if (ret) {
882 			netdev_err(ndev,
883 				   "Ring buffer not empty after closing rndis\n");
884 			return ret;
885 		}
886 	}
887 
888 	netif_device_detach(ndev);
889 
890 	rndis_filter_device_remove(hdev, nvdev);
891 
892 	return 0;
893 }
894 
895 static int netvsc_attach(struct net_device *ndev,
896 			 struct netvsc_device_info *dev_info)
897 {
898 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
899 	struct hv_device *hdev = ndev_ctx->device_ctx;
900 	struct netvsc_device *nvdev;
901 	struct rndis_device *rdev;
902 	int ret;
903 
904 	nvdev = rndis_filter_device_add(hdev, dev_info);
905 	if (IS_ERR(nvdev))
906 		return PTR_ERR(nvdev);
907 
908 	if (nvdev->num_chn > 1) {
909 		ret = rndis_set_subchannel(ndev, nvdev);
910 
911 		/* if unavailable, just proceed with one queue */
912 		if (ret) {
913 			nvdev->max_chn = 1;
914 			nvdev->num_chn = 1;
915 		}
916 	}
917 
918 	/* In any case device is now ready */
919 	netif_device_attach(ndev);
920 
921 	/* Note: enable and attach happen when sub-channels setup */
922 	netif_carrier_off(ndev);
923 
924 	if (netif_running(ndev)) {
925 		ret = rndis_filter_open(nvdev);
926 		if (ret)
927 			return ret;
928 
929 		rdev = nvdev->extension;
930 		if (!rdev->link_state)
931 			netif_carrier_on(ndev);
932 	}
933 
934 	return 0;
935 }
936 
937 static int netvsc_set_channels(struct net_device *net,
938 			       struct ethtool_channels *channels)
939 {
940 	struct net_device_context *net_device_ctx = netdev_priv(net);
941 	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
942 	unsigned int orig, count = channels->combined_count;
943 	struct netvsc_device_info device_info;
944 	int ret;
945 
946 	/* We do not support separate count for rx, tx, or other */
947 	if (count == 0 ||
948 	    channels->rx_count || channels->tx_count || channels->other_count)
949 		return -EINVAL;
950 
951 	if (!nvdev || nvdev->destroy)
952 		return -ENODEV;
953 
954 	if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
955 		return -EINVAL;
956 
957 	if (count > nvdev->max_chn)
958 		return -EINVAL;
959 
960 	orig = nvdev->num_chn;
961 
962 	memset(&device_info, 0, sizeof(device_info));
963 	device_info.num_chn = count;
964 	device_info.send_sections = nvdev->send_section_cnt;
965 	device_info.send_section_size = nvdev->send_section_size;
966 	device_info.recv_sections = nvdev->recv_section_cnt;
967 	device_info.recv_section_size = nvdev->recv_section_size;
968 
969 	ret = netvsc_detach(net, nvdev);
970 	if (ret)
971 		return ret;
972 
973 	ret = netvsc_attach(net, &device_info);
974 	if (ret) {
975 		device_info.num_chn = orig;
976 		if (netvsc_attach(net, &device_info))
977 			netdev_err(net, "restoring channel setting failed\n");
978 	}
979 
980 	return ret;
981 }
982 
983 static bool
984 netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
985 {
986 	struct ethtool_link_ksettings diff1 = *cmd;
987 	struct ethtool_link_ksettings diff2 = {};
988 
989 	diff1.base.speed = 0;
990 	diff1.base.duplex = 0;
991 	/* advertising and cmd are usually set */
992 	ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
993 	diff1.base.cmd = 0;
994 	/* We set port to PORT_OTHER */
995 	diff2.base.port = PORT_OTHER;
996 
997 	return !memcmp(&diff1, &diff2, sizeof(diff1));
998 }
999 
1000 static void netvsc_init_settings(struct net_device *dev)
1001 {
1002 	struct net_device_context *ndc = netdev_priv(dev);
1003 
1004 	ndc->l4_hash = HV_DEFAULT_L4HASH;
1005 
1006 	ndc->speed = SPEED_UNKNOWN;
1007 	ndc->duplex = DUPLEX_FULL;
1008 }
1009 
1010 static int netvsc_get_link_ksettings(struct net_device *dev,
1011 				     struct ethtool_link_ksettings *cmd)
1012 {
1013 	struct net_device_context *ndc = netdev_priv(dev);
1014 
1015 	cmd->base.speed = ndc->speed;
1016 	cmd->base.duplex = ndc->duplex;
1017 	cmd->base.port = PORT_OTHER;
1018 
1019 	return 0;
1020 }
1021 
1022 static int netvsc_set_link_ksettings(struct net_device *dev,
1023 				     const struct ethtool_link_ksettings *cmd)
1024 {
1025 	struct net_device_context *ndc = netdev_priv(dev);
1026 	u32 speed;
1027 
1028 	speed = cmd->base.speed;
1029 	if (!ethtool_validate_speed(speed) ||
1030 	    !ethtool_validate_duplex(cmd->base.duplex) ||
1031 	    !netvsc_validate_ethtool_ss_cmd(cmd))
1032 		return -EINVAL;
1033 
1034 	ndc->speed = speed;
1035 	ndc->duplex = cmd->base.duplex;
1036 
1037 	return 0;
1038 }
1039 
1040 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1041 {
1042 	struct net_device_context *ndevctx = netdev_priv(ndev);
1043 	struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1044 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1045 	int orig_mtu = ndev->mtu;
1046 	struct netvsc_device_info device_info;
1047 	int ret = 0;
1048 
1049 	if (!nvdev || nvdev->destroy)
1050 		return -ENODEV;
1051 
1052 	/* Change MTU of underlying VF netdev first. */
1053 	if (vf_netdev) {
1054 		ret = dev_set_mtu(vf_netdev, mtu);
1055 		if (ret)
1056 			return ret;
1057 	}
1058 
1059 	memset(&device_info, 0, sizeof(device_info));
1060 	device_info.num_chn = nvdev->num_chn;
1061 	device_info.send_sections = nvdev->send_section_cnt;
1062 	device_info.send_section_size = nvdev->send_section_size;
1063 	device_info.recv_sections = nvdev->recv_section_cnt;
1064 	device_info.recv_section_size = nvdev->recv_section_size;
1065 
1066 	ret = netvsc_detach(ndev, nvdev);
1067 	if (ret)
1068 		goto rollback_vf;
1069 
1070 	ndev->mtu = mtu;
1071 
1072 	ret = netvsc_attach(ndev, &device_info);
1073 	if (ret)
1074 		goto rollback;
1075 
1076 	return 0;
1077 
1078 rollback:
1079 	/* Attempt rollback to original MTU */
1080 	ndev->mtu = orig_mtu;
1081 
1082 	if (netvsc_attach(ndev, &device_info))
1083 		netdev_err(ndev, "restoring mtu failed\n");
1084 rollback_vf:
1085 	if (vf_netdev)
1086 		dev_set_mtu(vf_netdev, orig_mtu);
1087 
1088 	return ret;
1089 }
1090 
1091 static void netvsc_get_vf_stats(struct net_device *net,
1092 				struct netvsc_vf_pcpu_stats *tot)
1093 {
1094 	struct net_device_context *ndev_ctx = netdev_priv(net);
1095 	int i;
1096 
1097 	memset(tot, 0, sizeof(*tot));
1098 
1099 	for_each_possible_cpu(i) {
1100 		const struct netvsc_vf_pcpu_stats *stats
1101 			= per_cpu_ptr(ndev_ctx->vf_stats, i);
1102 		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1103 		unsigned int start;
1104 
1105 		do {
1106 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1107 			rx_packets = stats->rx_packets;
1108 			tx_packets = stats->tx_packets;
1109 			rx_bytes = stats->rx_bytes;
1110 			tx_bytes = stats->tx_bytes;
1111 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1112 
1113 		tot->rx_packets += rx_packets;
1114 		tot->tx_packets += tx_packets;
1115 		tot->rx_bytes   += rx_bytes;
1116 		tot->tx_bytes   += tx_bytes;
1117 		tot->tx_dropped += stats->tx_dropped;
1118 	}
1119 }
1120 
1121 static void netvsc_get_stats64(struct net_device *net,
1122 			       struct rtnl_link_stats64 *t)
1123 {
1124 	struct net_device_context *ndev_ctx = netdev_priv(net);
1125 	struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1126 	struct netvsc_vf_pcpu_stats vf_tot;
1127 	int i;
1128 
1129 	if (!nvdev)
1130 		return;
1131 
1132 	netdev_stats_to_stats64(t, &net->stats);
1133 
1134 	netvsc_get_vf_stats(net, &vf_tot);
1135 	t->rx_packets += vf_tot.rx_packets;
1136 	t->tx_packets += vf_tot.tx_packets;
1137 	t->rx_bytes   += vf_tot.rx_bytes;
1138 	t->tx_bytes   += vf_tot.tx_bytes;
1139 	t->tx_dropped += vf_tot.tx_dropped;
1140 
1141 	for (i = 0; i < nvdev->num_chn; i++) {
1142 		const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1143 		const struct netvsc_stats *stats;
1144 		u64 packets, bytes, multicast;
1145 		unsigned int start;
1146 
1147 		stats = &nvchan->tx_stats;
1148 		do {
1149 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1150 			packets = stats->packets;
1151 			bytes = stats->bytes;
1152 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1153 
1154 		t->tx_bytes	+= bytes;
1155 		t->tx_packets	+= packets;
1156 
1157 		stats = &nvchan->rx_stats;
1158 		do {
1159 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1160 			packets = stats->packets;
1161 			bytes = stats->bytes;
1162 			multicast = stats->multicast + stats->broadcast;
1163 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1164 
1165 		t->rx_bytes	+= bytes;
1166 		t->rx_packets	+= packets;
1167 		t->multicast	+= multicast;
1168 	}
1169 }
1170 
1171 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1172 {
1173 	struct net_device_context *ndc = netdev_priv(ndev);
1174 	struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
1175 	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1176 	struct sockaddr *addr = p;
1177 	int err;
1178 
1179 	err = eth_prepare_mac_addr_change(ndev, p);
1180 	if (err)
1181 		return err;
1182 
1183 	if (!nvdev)
1184 		return -ENODEV;
1185 
1186 	if (vf_netdev) {
1187 		err = dev_set_mac_address(vf_netdev, addr);
1188 		if (err)
1189 			return err;
1190 	}
1191 
1192 	err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
1193 	if (!err) {
1194 		eth_commit_mac_addr_change(ndev, p);
1195 	} else if (vf_netdev) {
1196 		/* rollback change on VF */
1197 		memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1198 		dev_set_mac_address(vf_netdev, addr);
1199 	}
1200 
1201 	return err;
1202 }
1203 
1204 static const struct {
1205 	char name[ETH_GSTRING_LEN];
1206 	u16 offset;
1207 } netvsc_stats[] = {
1208 	{ "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
1209 	{ "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
1210 	{ "tx_no_space",  offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1211 	{ "tx_too_big",	  offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1212 	{ "tx_busy",	  offsetof(struct netvsc_ethtool_stats, tx_busy) },
1213 	{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1214 	{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
1215 	{ "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
1216 	{ "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
1217 	{ "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
1218 }, vf_stats[] = {
1219 	{ "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1220 	{ "vf_rx_bytes",   offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1221 	{ "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1222 	{ "vf_tx_bytes",   offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1223 	{ "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
1224 };
1225 
1226 #define NETVSC_GLOBAL_STATS_LEN	ARRAY_SIZE(netvsc_stats)
1227 #define NETVSC_VF_STATS_LEN	ARRAY_SIZE(vf_stats)
1228 
1229 /* 4 statistics per queue (rx/tx packets/bytes) */
1230 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1231 
1232 static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1233 {
1234 	struct net_device_context *ndc = netdev_priv(dev);
1235 	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1236 
1237 	if (!nvdev)
1238 		return -ENODEV;
1239 
1240 	switch (string_set) {
1241 	case ETH_SS_STATS:
1242 		return NETVSC_GLOBAL_STATS_LEN
1243 			+ NETVSC_VF_STATS_LEN
1244 			+ NETVSC_QUEUE_STATS_LEN(nvdev);
1245 	default:
1246 		return -EINVAL;
1247 	}
1248 }
1249 
1250 static void netvsc_get_ethtool_stats(struct net_device *dev,
1251 				     struct ethtool_stats *stats, u64 *data)
1252 {
1253 	struct net_device_context *ndc = netdev_priv(dev);
1254 	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1255 	const void *nds = &ndc->eth_stats;
1256 	const struct netvsc_stats *qstats;
1257 	struct netvsc_vf_pcpu_stats sum;
1258 	unsigned int start;
1259 	u64 packets, bytes;
1260 	int i, j;
1261 
1262 	if (!nvdev)
1263 		return;
1264 
1265 	for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
1266 		data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1267 
1268 	netvsc_get_vf_stats(dev, &sum);
1269 	for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1270 		data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1271 
1272 	for (j = 0; j < nvdev->num_chn; j++) {
1273 		qstats = &nvdev->chan_table[j].tx_stats;
1274 
1275 		do {
1276 			start = u64_stats_fetch_begin_irq(&qstats->syncp);
1277 			packets = qstats->packets;
1278 			bytes = qstats->bytes;
1279 		} while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1280 		data[i++] = packets;
1281 		data[i++] = bytes;
1282 
1283 		qstats = &nvdev->chan_table[j].rx_stats;
1284 		do {
1285 			start = u64_stats_fetch_begin_irq(&qstats->syncp);
1286 			packets = qstats->packets;
1287 			bytes = qstats->bytes;
1288 		} while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1289 		data[i++] = packets;
1290 		data[i++] = bytes;
1291 	}
1292 }
1293 
1294 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1295 {
1296 	struct net_device_context *ndc = netdev_priv(dev);
1297 	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1298 	u8 *p = data;
1299 	int i;
1300 
1301 	if (!nvdev)
1302 		return;
1303 
1304 	switch (stringset) {
1305 	case ETH_SS_STATS:
1306 		for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1307 			memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1308 			p += ETH_GSTRING_LEN;
1309 		}
1310 
1311 		for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1312 			memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1313 			p += ETH_GSTRING_LEN;
1314 		}
1315 
1316 		for (i = 0; i < nvdev->num_chn; i++) {
1317 			sprintf(p, "tx_queue_%u_packets", i);
1318 			p += ETH_GSTRING_LEN;
1319 			sprintf(p, "tx_queue_%u_bytes", i);
1320 			p += ETH_GSTRING_LEN;
1321 			sprintf(p, "rx_queue_%u_packets", i);
1322 			p += ETH_GSTRING_LEN;
1323 			sprintf(p, "rx_queue_%u_bytes", i);
1324 			p += ETH_GSTRING_LEN;
1325 		}
1326 
1327 		break;
1328 	}
1329 }
1330 
1331 static int
1332 netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1333 			 struct ethtool_rxnfc *info)
1334 {
1335 	const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
1336 
1337 	info->data = RXH_IP_SRC | RXH_IP_DST;
1338 
1339 	switch (info->flow_type) {
1340 	case TCP_V4_FLOW:
1341 		if (ndc->l4_hash & HV_TCP4_L4HASH)
1342 			info->data |= l4_flag;
1343 
1344 		break;
1345 
1346 	case TCP_V6_FLOW:
1347 		if (ndc->l4_hash & HV_TCP6_L4HASH)
1348 			info->data |= l4_flag;
1349 
1350 		break;
1351 
1352 	case UDP_V4_FLOW:
1353 		if (ndc->l4_hash & HV_UDP4_L4HASH)
1354 			info->data |= l4_flag;
1355 
1356 		break;
1357 
1358 	case UDP_V6_FLOW:
1359 		if (ndc->l4_hash & HV_UDP6_L4HASH)
1360 			info->data |= l4_flag;
1361 
1362 		break;
1363 
1364 	case IPV4_FLOW:
1365 	case IPV6_FLOW:
1366 		break;
1367 	default:
1368 		info->data = 0;
1369 		break;
1370 	}
1371 
1372 	return 0;
1373 }
1374 
1375 static int
1376 netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1377 		 u32 *rules)
1378 {
1379 	struct net_device_context *ndc = netdev_priv(dev);
1380 	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1381 
1382 	if (!nvdev)
1383 		return -ENODEV;
1384 
1385 	switch (info->cmd) {
1386 	case ETHTOOL_GRXRINGS:
1387 		info->data = nvdev->num_chn;
1388 		return 0;
1389 
1390 	case ETHTOOL_GRXFH:
1391 		return netvsc_get_rss_hash_opts(ndc, info);
1392 	}
1393 	return -EOPNOTSUPP;
1394 }
1395 
1396 static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1397 				    struct ethtool_rxnfc *info)
1398 {
1399 	if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1400 			   RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1401 		switch (info->flow_type) {
1402 		case TCP_V4_FLOW:
1403 			ndc->l4_hash |= HV_TCP4_L4HASH;
1404 			break;
1405 
1406 		case TCP_V6_FLOW:
1407 			ndc->l4_hash |= HV_TCP6_L4HASH;
1408 			break;
1409 
1410 		case UDP_V4_FLOW:
1411 			ndc->l4_hash |= HV_UDP4_L4HASH;
1412 			break;
1413 
1414 		case UDP_V6_FLOW:
1415 			ndc->l4_hash |= HV_UDP6_L4HASH;
1416 			break;
1417 
1418 		default:
1419 			return -EOPNOTSUPP;
1420 		}
1421 
1422 		return 0;
1423 	}
1424 
1425 	if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1426 		switch (info->flow_type) {
1427 		case TCP_V4_FLOW:
1428 			ndc->l4_hash &= ~HV_TCP4_L4HASH;
1429 			break;
1430 
1431 		case TCP_V6_FLOW:
1432 			ndc->l4_hash &= ~HV_TCP6_L4HASH;
1433 			break;
1434 
1435 		case UDP_V4_FLOW:
1436 			ndc->l4_hash &= ~HV_UDP4_L4HASH;
1437 			break;
1438 
1439 		case UDP_V6_FLOW:
1440 			ndc->l4_hash &= ~HV_UDP6_L4HASH;
1441 			break;
1442 
1443 		default:
1444 			return -EOPNOTSUPP;
1445 		}
1446 
1447 		return 0;
1448 	}
1449 
1450 	return -EOPNOTSUPP;
1451 }
1452 
1453 static int
1454 netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1455 {
1456 	struct net_device_context *ndc = netdev_priv(ndev);
1457 
1458 	if (info->cmd == ETHTOOL_SRXFH)
1459 		return netvsc_set_rss_hash_opts(ndc, info);
1460 
1461 	return -EOPNOTSUPP;
1462 }
1463 
1464 #ifdef CONFIG_NET_POLL_CONTROLLER
1465 static void netvsc_poll_controller(struct net_device *dev)
1466 {
1467 	struct net_device_context *ndc = netdev_priv(dev);
1468 	struct netvsc_device *ndev;
1469 	int i;
1470 
1471 	rcu_read_lock();
1472 	ndev = rcu_dereference(ndc->nvdev);
1473 	if (ndev) {
1474 		for (i = 0; i < ndev->num_chn; i++) {
1475 			struct netvsc_channel *nvchan = &ndev->chan_table[i];
1476 
1477 			napi_schedule(&nvchan->napi);
1478 		}
1479 	}
1480 	rcu_read_unlock();
1481 }
1482 #endif
1483 
1484 static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1485 {
1486 	return NETVSC_HASH_KEYLEN;
1487 }
1488 
1489 static u32 netvsc_rss_indir_size(struct net_device *dev)
1490 {
1491 	return ITAB_NUM;
1492 }
1493 
1494 static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1495 			   u8 *hfunc)
1496 {
1497 	struct net_device_context *ndc = netdev_priv(dev);
1498 	struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1499 	struct rndis_device *rndis_dev;
1500 	int i;
1501 
1502 	if (!ndev)
1503 		return -ENODEV;
1504 
1505 	if (hfunc)
1506 		*hfunc = ETH_RSS_HASH_TOP;	/* Toeplitz */
1507 
1508 	rndis_dev = ndev->extension;
1509 	if (indir) {
1510 		for (i = 0; i < ITAB_NUM; i++)
1511 			indir[i] = rndis_dev->rx_table[i];
1512 	}
1513 
1514 	if (key)
1515 		memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1516 
1517 	return 0;
1518 }
1519 
1520 static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1521 			   const u8 *key, const u8 hfunc)
1522 {
1523 	struct net_device_context *ndc = netdev_priv(dev);
1524 	struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1525 	struct rndis_device *rndis_dev;
1526 	int i;
1527 
1528 	if (!ndev)
1529 		return -ENODEV;
1530 
1531 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1532 		return -EOPNOTSUPP;
1533 
1534 	rndis_dev = ndev->extension;
1535 	if (indir) {
1536 		for (i = 0; i < ITAB_NUM; i++)
1537 			if (indir[i] >= ndev->num_chn)
1538 				return -EINVAL;
1539 
1540 		for (i = 0; i < ITAB_NUM; i++)
1541 			rndis_dev->rx_table[i] = indir[i];
1542 	}
1543 
1544 	if (!key) {
1545 		if (!indir)
1546 			return 0;
1547 
1548 		key = rndis_dev->rss_key;
1549 	}
1550 
1551 	return rndis_filter_set_rss_param(rndis_dev, key);
1552 }
1553 
1554 /* Hyper-V RNDIS protocol does not have ring in the HW sense.
1555  * It does have pre-allocated receive area which is divided into sections.
1556  */
1557 static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1558 				   struct ethtool_ringparam *ring)
1559 {
1560 	u32 max_buf_size;
1561 
1562 	ring->rx_pending = nvdev->recv_section_cnt;
1563 	ring->tx_pending = nvdev->send_section_cnt;
1564 
1565 	if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1566 		max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1567 	else
1568 		max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1569 
1570 	ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1571 	ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1572 		/ nvdev->send_section_size;
1573 }
1574 
1575 static void netvsc_get_ringparam(struct net_device *ndev,
1576 				 struct ethtool_ringparam *ring)
1577 {
1578 	struct net_device_context *ndevctx = netdev_priv(ndev);
1579 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1580 
1581 	if (!nvdev)
1582 		return;
1583 
1584 	__netvsc_get_ringparam(nvdev, ring);
1585 }
1586 
1587 static int netvsc_set_ringparam(struct net_device *ndev,
1588 				struct ethtool_ringparam *ring)
1589 {
1590 	struct net_device_context *ndevctx = netdev_priv(ndev);
1591 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1592 	struct netvsc_device_info device_info;
1593 	struct ethtool_ringparam orig;
1594 	u32 new_tx, new_rx;
1595 	int ret = 0;
1596 
1597 	if (!nvdev || nvdev->destroy)
1598 		return -ENODEV;
1599 
1600 	memset(&orig, 0, sizeof(orig));
1601 	__netvsc_get_ringparam(nvdev, &orig);
1602 
1603 	new_tx = clamp_t(u32, ring->tx_pending,
1604 			 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1605 	new_rx = clamp_t(u32, ring->rx_pending,
1606 			 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1607 
1608 	if (new_tx == orig.tx_pending &&
1609 	    new_rx == orig.rx_pending)
1610 		return 0;	 /* no change */
1611 
1612 	memset(&device_info, 0, sizeof(device_info));
1613 	device_info.num_chn = nvdev->num_chn;
1614 	device_info.send_sections = new_tx;
1615 	device_info.send_section_size = nvdev->send_section_size;
1616 	device_info.recv_sections = new_rx;
1617 	device_info.recv_section_size = nvdev->recv_section_size;
1618 
1619 	ret = netvsc_detach(ndev, nvdev);
1620 	if (ret)
1621 		return ret;
1622 
1623 	ret = netvsc_attach(ndev, &device_info);
1624 	if (ret) {
1625 		device_info.send_sections = orig.tx_pending;
1626 		device_info.recv_sections = orig.rx_pending;
1627 
1628 		if (netvsc_attach(ndev, &device_info))
1629 			netdev_err(ndev, "restoring ringparam failed");
1630 	}
1631 
1632 	return ret;
1633 }
1634 
1635 static u32 netvsc_get_msglevel(struct net_device *ndev)
1636 {
1637 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1638 
1639 	return ndev_ctx->msg_enable;
1640 }
1641 
1642 static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
1643 {
1644 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1645 
1646 	ndev_ctx->msg_enable = val;
1647 }
1648 
1649 static const struct ethtool_ops ethtool_ops = {
1650 	.get_drvinfo	= netvsc_get_drvinfo,
1651 	.get_msglevel	= netvsc_get_msglevel,
1652 	.set_msglevel	= netvsc_set_msglevel,
1653 	.get_link	= ethtool_op_get_link,
1654 	.get_ethtool_stats = netvsc_get_ethtool_stats,
1655 	.get_sset_count = netvsc_get_sset_count,
1656 	.get_strings	= netvsc_get_strings,
1657 	.get_channels   = netvsc_get_channels,
1658 	.set_channels   = netvsc_set_channels,
1659 	.get_ts_info	= ethtool_op_get_ts_info,
1660 	.get_rxnfc	= netvsc_get_rxnfc,
1661 	.set_rxnfc	= netvsc_set_rxnfc,
1662 	.get_rxfh_key_size = netvsc_get_rxfh_key_size,
1663 	.get_rxfh_indir_size = netvsc_rss_indir_size,
1664 	.get_rxfh	= netvsc_get_rxfh,
1665 	.set_rxfh	= netvsc_set_rxfh,
1666 	.get_link_ksettings = netvsc_get_link_ksettings,
1667 	.set_link_ksettings = netvsc_set_link_ksettings,
1668 	.get_ringparam	= netvsc_get_ringparam,
1669 	.set_ringparam	= netvsc_set_ringparam,
1670 };
1671 
1672 static const struct net_device_ops device_ops = {
1673 	.ndo_open =			netvsc_open,
1674 	.ndo_stop =			netvsc_close,
1675 	.ndo_start_xmit =		netvsc_start_xmit,
1676 	.ndo_change_rx_flags =		netvsc_change_rx_flags,
1677 	.ndo_set_rx_mode =		netvsc_set_rx_mode,
1678 	.ndo_change_mtu =		netvsc_change_mtu,
1679 	.ndo_validate_addr =		eth_validate_addr,
1680 	.ndo_set_mac_address =		netvsc_set_mac_addr,
1681 	.ndo_select_queue =		netvsc_select_queue,
1682 	.ndo_get_stats64 =		netvsc_get_stats64,
1683 #ifdef CONFIG_NET_POLL_CONTROLLER
1684 	.ndo_poll_controller =		netvsc_poll_controller,
1685 #endif
1686 };
1687 
1688 /*
1689  * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1690  * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1691  * present send GARP packet to network peers with netif_notify_peers().
1692  */
1693 static void netvsc_link_change(struct work_struct *w)
1694 {
1695 	struct net_device_context *ndev_ctx =
1696 		container_of(w, struct net_device_context, dwork.work);
1697 	struct hv_device *device_obj = ndev_ctx->device_ctx;
1698 	struct net_device *net = hv_get_drvdata(device_obj);
1699 	struct netvsc_device *net_device;
1700 	struct rndis_device *rdev;
1701 	struct netvsc_reconfig *event = NULL;
1702 	bool notify = false, reschedule = false;
1703 	unsigned long flags, next_reconfig, delay;
1704 
1705 	/* if changes are happening, comeback later */
1706 	if (!rtnl_trylock()) {
1707 		schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1708 		return;
1709 	}
1710 
1711 	net_device = rtnl_dereference(ndev_ctx->nvdev);
1712 	if (!net_device)
1713 		goto out_unlock;
1714 
1715 	rdev = net_device->extension;
1716 
1717 	next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1718 	if (time_is_after_jiffies(next_reconfig)) {
1719 		/* link_watch only sends one notification with current state
1720 		 * per second, avoid doing reconfig more frequently. Handle
1721 		 * wrap around.
1722 		 */
1723 		delay = next_reconfig - jiffies;
1724 		delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1725 		schedule_delayed_work(&ndev_ctx->dwork, delay);
1726 		goto out_unlock;
1727 	}
1728 	ndev_ctx->last_reconfig = jiffies;
1729 
1730 	spin_lock_irqsave(&ndev_ctx->lock, flags);
1731 	if (!list_empty(&ndev_ctx->reconfig_events)) {
1732 		event = list_first_entry(&ndev_ctx->reconfig_events,
1733 					 struct netvsc_reconfig, list);
1734 		list_del(&event->list);
1735 		reschedule = !list_empty(&ndev_ctx->reconfig_events);
1736 	}
1737 	spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1738 
1739 	if (!event)
1740 		goto out_unlock;
1741 
1742 	switch (event->event) {
1743 		/* Only the following events are possible due to the check in
1744 		 * netvsc_linkstatus_callback()
1745 		 */
1746 	case RNDIS_STATUS_MEDIA_CONNECT:
1747 		if (rdev->link_state) {
1748 			rdev->link_state = false;
1749 			netif_carrier_on(net);
1750 			netif_tx_wake_all_queues(net);
1751 		} else {
1752 			notify = true;
1753 		}
1754 		kfree(event);
1755 		break;
1756 	case RNDIS_STATUS_MEDIA_DISCONNECT:
1757 		if (!rdev->link_state) {
1758 			rdev->link_state = true;
1759 			netif_carrier_off(net);
1760 			netif_tx_stop_all_queues(net);
1761 		}
1762 		kfree(event);
1763 		break;
1764 	case RNDIS_STATUS_NETWORK_CHANGE:
1765 		/* Only makes sense if carrier is present */
1766 		if (!rdev->link_state) {
1767 			rdev->link_state = true;
1768 			netif_carrier_off(net);
1769 			netif_tx_stop_all_queues(net);
1770 			event->event = RNDIS_STATUS_MEDIA_CONNECT;
1771 			spin_lock_irqsave(&ndev_ctx->lock, flags);
1772 			list_add(&event->list, &ndev_ctx->reconfig_events);
1773 			spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1774 			reschedule = true;
1775 		}
1776 		break;
1777 	}
1778 
1779 	rtnl_unlock();
1780 
1781 	if (notify)
1782 		netdev_notify_peers(net);
1783 
1784 	/* link_watch only sends one notification with current state per
1785 	 * second, handle next reconfig event in 2 seconds.
1786 	 */
1787 	if (reschedule)
1788 		schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1789 
1790 	return;
1791 
1792 out_unlock:
1793 	rtnl_unlock();
1794 }
1795 
1796 static struct net_device *get_netvsc_bymac(const u8 *mac)
1797 {
1798 	struct net_device_context *ndev_ctx;
1799 
1800 	list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
1801 		struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx);
1802 
1803 		if (ether_addr_equal(mac, dev->perm_addr))
1804 			return dev;
1805 	}
1806 
1807 	return NULL;
1808 }
1809 
1810 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1811 {
1812 	struct net_device_context *net_device_ctx;
1813 	struct net_device *dev;
1814 
1815 	dev = netdev_master_upper_dev_get(vf_netdev);
1816 	if (!dev || dev->netdev_ops != &device_ops)
1817 		return NULL;	/* not a netvsc device */
1818 
1819 	net_device_ctx = netdev_priv(dev);
1820 	if (!rtnl_dereference(net_device_ctx->nvdev))
1821 		return NULL;	/* device is removed */
1822 
1823 	return dev;
1824 }
1825 
1826 /* Called when VF is injecting data into network stack.
1827  * Change the associated network device from VF to netvsc.
1828  * note: already called with rcu_read_lock
1829  */
1830 static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
1831 {
1832 	struct sk_buff *skb = *pskb;
1833 	struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
1834 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1835 	struct netvsc_vf_pcpu_stats *pcpu_stats
1836 		 = this_cpu_ptr(ndev_ctx->vf_stats);
1837 
1838 	skb->dev = ndev;
1839 
1840 	u64_stats_update_begin(&pcpu_stats->syncp);
1841 	pcpu_stats->rx_packets++;
1842 	pcpu_stats->rx_bytes += skb->len;
1843 	u64_stats_update_end(&pcpu_stats->syncp);
1844 
1845 	return RX_HANDLER_ANOTHER;
1846 }
1847 
1848 static int netvsc_vf_join(struct net_device *vf_netdev,
1849 			  struct net_device *ndev)
1850 {
1851 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1852 	int ret;
1853 
1854 	ret = netdev_rx_handler_register(vf_netdev,
1855 					 netvsc_vf_handle_frame, ndev);
1856 	if (ret != 0) {
1857 		netdev_err(vf_netdev,
1858 			   "can not register netvsc VF receive handler (err = %d)\n",
1859 			   ret);
1860 		goto rx_handler_failed;
1861 	}
1862 
1863 	ret = netdev_master_upper_dev_link(vf_netdev, ndev,
1864 					   NULL, NULL, NULL);
1865 	if (ret != 0) {
1866 		netdev_err(vf_netdev,
1867 			   "can not set master device %s (err = %d)\n",
1868 			   ndev->name, ret);
1869 		goto upper_link_failed;
1870 	}
1871 
1872 	/* set slave flag before open to prevent IPv6 addrconf */
1873 	vf_netdev->flags |= IFF_SLAVE;
1874 
1875 	schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
1876 
1877 	call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
1878 
1879 	netdev_info(vf_netdev, "joined to %s\n", ndev->name);
1880 	return 0;
1881 
1882 upper_link_failed:
1883 	netdev_rx_handler_unregister(vf_netdev);
1884 rx_handler_failed:
1885 	return ret;
1886 }
1887 
1888 static void __netvsc_vf_setup(struct net_device *ndev,
1889 			      struct net_device *vf_netdev)
1890 {
1891 	int ret;
1892 
1893 	/* Align MTU of VF with master */
1894 	ret = dev_set_mtu(vf_netdev, ndev->mtu);
1895 	if (ret)
1896 		netdev_warn(vf_netdev,
1897 			    "unable to change mtu to %u\n", ndev->mtu);
1898 
1899 	/* set multicast etc flags on VF */
1900 	dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
1901 
1902 	/* sync address list from ndev to VF */
1903 	netif_addr_lock_bh(ndev);
1904 	dev_uc_sync(vf_netdev, ndev);
1905 	dev_mc_sync(vf_netdev, ndev);
1906 	netif_addr_unlock_bh(ndev);
1907 
1908 	if (netif_running(ndev)) {
1909 		ret = dev_open(vf_netdev);
1910 		if (ret)
1911 			netdev_warn(vf_netdev,
1912 				    "unable to open: %d\n", ret);
1913 	}
1914 }
1915 
1916 /* Setup VF as slave of the synthetic device.
1917  * Runs in workqueue to avoid recursion in netlink callbacks.
1918  */
1919 static void netvsc_vf_setup(struct work_struct *w)
1920 {
1921 	struct net_device_context *ndev_ctx
1922 		= container_of(w, struct net_device_context, vf_takeover.work);
1923 	struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
1924 	struct net_device *vf_netdev;
1925 
1926 	if (!rtnl_trylock()) {
1927 		schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
1928 		return;
1929 	}
1930 
1931 	vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
1932 	if (vf_netdev)
1933 		__netvsc_vf_setup(ndev, vf_netdev);
1934 
1935 	rtnl_unlock();
1936 }
1937 
1938 static int netvsc_register_vf(struct net_device *vf_netdev)
1939 {
1940 	struct net_device *ndev;
1941 	struct net_device_context *net_device_ctx;
1942 	struct netvsc_device *netvsc_dev;
1943 	int ret;
1944 
1945 	if (vf_netdev->addr_len != ETH_ALEN)
1946 		return NOTIFY_DONE;
1947 
1948 	/*
1949 	 * We will use the MAC address to locate the synthetic interface to
1950 	 * associate with the VF interface. If we don't find a matching
1951 	 * synthetic interface, move on.
1952 	 */
1953 	ndev = get_netvsc_bymac(vf_netdev->perm_addr);
1954 	if (!ndev)
1955 		return NOTIFY_DONE;
1956 
1957 	net_device_ctx = netdev_priv(ndev);
1958 	netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
1959 	if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
1960 		return NOTIFY_DONE;
1961 
1962 	/* if syntihetic interface is a different namespace,
1963 	 * then move the VF to that namespace; join will be
1964 	 * done again in that context.
1965 	 */
1966 	if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
1967 		ret = dev_change_net_namespace(vf_netdev,
1968 					       dev_net(ndev), "eth%d");
1969 		if (ret)
1970 			netdev_err(vf_netdev,
1971 				   "could not move to same namespace as %s: %d\n",
1972 				   ndev->name, ret);
1973 		else
1974 			netdev_info(vf_netdev,
1975 				    "VF moved to namespace with: %s\n",
1976 				    ndev->name);
1977 		return NOTIFY_DONE;
1978 	}
1979 
1980 	netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
1981 
1982 	if (netvsc_vf_join(vf_netdev, ndev) != 0)
1983 		return NOTIFY_DONE;
1984 
1985 	dev_hold(vf_netdev);
1986 	rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
1987 	return NOTIFY_OK;
1988 }
1989 
1990 /* VF up/down change detected, schedule to change data path */
1991 static int netvsc_vf_changed(struct net_device *vf_netdev)
1992 {
1993 	struct net_device_context *net_device_ctx;
1994 	struct netvsc_device *netvsc_dev;
1995 	struct net_device *ndev;
1996 	bool vf_is_up = netif_running(vf_netdev);
1997 
1998 	ndev = get_netvsc_byref(vf_netdev);
1999 	if (!ndev)
2000 		return NOTIFY_DONE;
2001 
2002 	net_device_ctx = netdev_priv(ndev);
2003 	netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2004 	if (!netvsc_dev)
2005 		return NOTIFY_DONE;
2006 
2007 	netvsc_switch_datapath(ndev, vf_is_up);
2008 	netdev_info(ndev, "Data path switched %s VF: %s\n",
2009 		    vf_is_up ? "to" : "from", vf_netdev->name);
2010 
2011 	return NOTIFY_OK;
2012 }
2013 
2014 static int netvsc_unregister_vf(struct net_device *vf_netdev)
2015 {
2016 	struct net_device *ndev;
2017 	struct net_device_context *net_device_ctx;
2018 
2019 	ndev = get_netvsc_byref(vf_netdev);
2020 	if (!ndev)
2021 		return NOTIFY_DONE;
2022 
2023 	net_device_ctx = netdev_priv(ndev);
2024 	cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
2025 
2026 	netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
2027 
2028 	netdev_rx_handler_unregister(vf_netdev);
2029 	netdev_upper_dev_unlink(vf_netdev, ndev);
2030 	RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
2031 	dev_put(vf_netdev);
2032 
2033 	return NOTIFY_OK;
2034 }
2035 
2036 static int netvsc_probe(struct hv_device *dev,
2037 			const struct hv_vmbus_device_id *dev_id)
2038 {
2039 	struct net_device *net = NULL;
2040 	struct net_device_context *net_device_ctx;
2041 	struct netvsc_device_info device_info;
2042 	struct netvsc_device *nvdev;
2043 	int ret = -ENOMEM;
2044 
2045 	net = alloc_etherdev_mq(sizeof(struct net_device_context),
2046 				VRSS_CHANNEL_MAX);
2047 	if (!net)
2048 		goto no_net;
2049 
2050 	netif_carrier_off(net);
2051 
2052 	netvsc_init_settings(net);
2053 
2054 	net_device_ctx = netdev_priv(net);
2055 	net_device_ctx->device_ctx = dev;
2056 	net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
2057 	if (netif_msg_probe(net_device_ctx))
2058 		netdev_dbg(net, "netvsc msg_enable: %d\n",
2059 			   net_device_ctx->msg_enable);
2060 
2061 	hv_set_drvdata(dev, net);
2062 
2063 	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
2064 
2065 	spin_lock_init(&net_device_ctx->lock);
2066 	INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
2067 	INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
2068 
2069 	net_device_ctx->vf_stats
2070 		= netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
2071 	if (!net_device_ctx->vf_stats)
2072 		goto no_stats;
2073 
2074 	net->netdev_ops = &device_ops;
2075 	net->ethtool_ops = &ethtool_ops;
2076 	SET_NETDEV_DEV(net, &dev->device);
2077 
2078 	/* We always need headroom for rndis header */
2079 	net->needed_headroom = RNDIS_AND_PPI_SIZE;
2080 
2081 	/* Initialize the number of queues to be 1, we may change it if more
2082 	 * channels are offered later.
2083 	 */
2084 	netif_set_real_num_tx_queues(net, 1);
2085 	netif_set_real_num_rx_queues(net, 1);
2086 
2087 	/* Notify the netvsc driver of the new device */
2088 	memset(&device_info, 0, sizeof(device_info));
2089 	device_info.num_chn = VRSS_CHANNEL_DEFAULT;
2090 	device_info.send_sections = NETVSC_DEFAULT_TX;
2091 	device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
2092 	device_info.recv_sections = NETVSC_DEFAULT_RX;
2093 	device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
2094 
2095 	nvdev = rndis_filter_device_add(dev, &device_info);
2096 	if (IS_ERR(nvdev)) {
2097 		ret = PTR_ERR(nvdev);
2098 		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
2099 		goto rndis_failed;
2100 	}
2101 
2102 	memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
2103 
2104 	if (nvdev->num_chn > 1)
2105 		schedule_work(&nvdev->subchan_work);
2106 
2107 	/* hw_features computed in rndis_netdev_set_hwcaps() */
2108 	net->features = net->hw_features |
2109 		NETIF_F_HIGHDMA | NETIF_F_SG |
2110 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2111 	net->vlan_features = net->features;
2112 
2113 	netdev_lockdep_set_classes(net);
2114 
2115 	/* MTU range: 68 - 1500 or 65521 */
2116 	net->min_mtu = NETVSC_MTU_MIN;
2117 	if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2118 		net->max_mtu = NETVSC_MTU - ETH_HLEN;
2119 	else
2120 		net->max_mtu = ETH_DATA_LEN;
2121 
2122 	rtnl_lock();
2123 	ret = register_netdevice(net);
2124 	if (ret != 0) {
2125 		pr_err("Unable to register netdev.\n");
2126 		goto register_failed;
2127 	}
2128 
2129 	list_add(&net_device_ctx->list, &netvsc_dev_list);
2130 	rtnl_unlock();
2131 	return 0;
2132 
2133 register_failed:
2134 	rtnl_unlock();
2135 	rndis_filter_device_remove(dev, nvdev);
2136 rndis_failed:
2137 	free_percpu(net_device_ctx->vf_stats);
2138 no_stats:
2139 	hv_set_drvdata(dev, NULL);
2140 	free_netdev(net);
2141 no_net:
2142 	return ret;
2143 }
2144 
2145 static int netvsc_remove(struct hv_device *dev)
2146 {
2147 	struct net_device_context *ndev_ctx;
2148 	struct net_device *vf_netdev, *net;
2149 	struct netvsc_device *nvdev;
2150 
2151 	net = hv_get_drvdata(dev);
2152 	if (net == NULL) {
2153 		dev_err(&dev->device, "No net device to remove\n");
2154 		return 0;
2155 	}
2156 
2157 	ndev_ctx = netdev_priv(net);
2158 
2159 	cancel_delayed_work_sync(&ndev_ctx->dwork);
2160 
2161 	rcu_read_lock();
2162 	nvdev = rcu_dereference(ndev_ctx->nvdev);
2163 
2164 	if  (nvdev)
2165 		cancel_work_sync(&nvdev->subchan_work);
2166 
2167 	/*
2168 	 * Call to the vsc driver to let it know that the device is being
2169 	 * removed. Also blocks mtu and channel changes.
2170 	 */
2171 	rtnl_lock();
2172 	vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2173 	if (vf_netdev)
2174 		netvsc_unregister_vf(vf_netdev);
2175 
2176 	if (nvdev)
2177 		rndis_filter_device_remove(dev, nvdev);
2178 
2179 	unregister_netdevice(net);
2180 	list_del(&ndev_ctx->list);
2181 
2182 	rtnl_unlock();
2183 	rcu_read_unlock();
2184 
2185 	hv_set_drvdata(dev, NULL);
2186 
2187 	free_percpu(ndev_ctx->vf_stats);
2188 	free_netdev(net);
2189 	return 0;
2190 }
2191 
2192 static const struct hv_vmbus_device_id id_table[] = {
2193 	/* Network guid */
2194 	{ HV_NIC_GUID, },
2195 	{ },
2196 };
2197 
2198 MODULE_DEVICE_TABLE(vmbus, id_table);
2199 
2200 /* The one and only one */
2201 static struct  hv_driver netvsc_drv = {
2202 	.name = KBUILD_MODNAME,
2203 	.id_table = id_table,
2204 	.probe = netvsc_probe,
2205 	.remove = netvsc_remove,
2206 };
2207 
2208 /*
2209  * On Hyper-V, every VF interface is matched with a corresponding
2210  * synthetic interface. The synthetic interface is presented first
2211  * to the guest. When the corresponding VF instance is registered,
2212  * we will take care of switching the data path.
2213  */
2214 static int netvsc_netdev_event(struct notifier_block *this,
2215 			       unsigned long event, void *ptr)
2216 {
2217 	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2218 
2219 	/* Skip our own events */
2220 	if (event_dev->netdev_ops == &device_ops)
2221 		return NOTIFY_DONE;
2222 
2223 	/* Avoid non-Ethernet type devices */
2224 	if (event_dev->type != ARPHRD_ETHER)
2225 		return NOTIFY_DONE;
2226 
2227 	/* Avoid Vlan dev with same MAC registering as VF */
2228 	if (is_vlan_dev(event_dev))
2229 		return NOTIFY_DONE;
2230 
2231 	/* Avoid Bonding master dev with same MAC registering as VF */
2232 	if ((event_dev->priv_flags & IFF_BONDING) &&
2233 	    (event_dev->flags & IFF_MASTER))
2234 		return NOTIFY_DONE;
2235 
2236 	switch (event) {
2237 	case NETDEV_REGISTER:
2238 		return netvsc_register_vf(event_dev);
2239 	case NETDEV_UNREGISTER:
2240 		return netvsc_unregister_vf(event_dev);
2241 	case NETDEV_UP:
2242 	case NETDEV_DOWN:
2243 		return netvsc_vf_changed(event_dev);
2244 	default:
2245 		return NOTIFY_DONE;
2246 	}
2247 }
2248 
2249 static struct notifier_block netvsc_netdev_notifier = {
2250 	.notifier_call = netvsc_netdev_event,
2251 };
2252 
2253 static void __exit netvsc_drv_exit(void)
2254 {
2255 	unregister_netdevice_notifier(&netvsc_netdev_notifier);
2256 	vmbus_driver_unregister(&netvsc_drv);
2257 }
2258 
2259 static int __init netvsc_drv_init(void)
2260 {
2261 	int ret;
2262 
2263 	if (ring_size < RING_SIZE_MIN) {
2264 		ring_size = RING_SIZE_MIN;
2265 		pr_info("Increased ring_size to %u (min allowed)\n",
2266 			ring_size);
2267 	}
2268 	netvsc_ring_bytes = ring_size * PAGE_SIZE;
2269 
2270 	ret = vmbus_driver_register(&netvsc_drv);
2271 	if (ret)
2272 		return ret;
2273 
2274 	register_netdevice_notifier(&netvsc_netdev_notifier);
2275 	return 0;
2276 }
2277 
2278 MODULE_LICENSE("GPL");
2279 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
2280 
2281 module_init(netvsc_drv_init);
2282 module_exit(netvsc_drv_exit);
2283