xref: /openbmc/linux/drivers/net/hyperv/netvsc_drv.c (revision 089a49b6)
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  */
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 
23 #include <linux/init.h>
24 #include <linux/atomic.h>
25 #include <linux/module.h>
26 #include <linux/highmem.h>
27 #include <linux/device.h>
28 #include <linux/io.h>
29 #include <linux/delay.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/if_vlan.h>
35 #include <linux/in.h>
36 #include <linux/slab.h>
37 #include <net/arp.h>
38 #include <net/route.h>
39 #include <net/sock.h>
40 #include <net/pkt_sched.h>
41 
42 #include "hyperv_net.h"
43 
44 struct net_device_context {
45 	/* point back to our device context */
46 	struct hv_device *device_ctx;
47 	struct delayed_work dwork;
48 	struct work_struct work;
49 };
50 
51 #define RING_SIZE_MIN 64
52 static int ring_size = 128;
53 module_param(ring_size, int, S_IRUGO);
54 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
55 
56 static void do_set_multicast(struct work_struct *w)
57 {
58 	struct net_device_context *ndevctx =
59 		container_of(w, struct net_device_context, work);
60 	struct netvsc_device *nvdev;
61 	struct rndis_device *rdev;
62 
63 	nvdev = hv_get_drvdata(ndevctx->device_ctx);
64 	if (nvdev == NULL || nvdev->ndev == NULL)
65 		return;
66 
67 	rdev = nvdev->extension;
68 	if (rdev == NULL)
69 		return;
70 
71 	if (nvdev->ndev->flags & IFF_PROMISC)
72 		rndis_filter_set_packet_filter(rdev,
73 			NDIS_PACKET_TYPE_PROMISCUOUS);
74 	else
75 		rndis_filter_set_packet_filter(rdev,
76 			NDIS_PACKET_TYPE_BROADCAST |
77 			NDIS_PACKET_TYPE_ALL_MULTICAST |
78 			NDIS_PACKET_TYPE_DIRECTED);
79 }
80 
81 static void netvsc_set_multicast_list(struct net_device *net)
82 {
83 	struct net_device_context *net_device_ctx = netdev_priv(net);
84 
85 	schedule_work(&net_device_ctx->work);
86 }
87 
88 static int netvsc_open(struct net_device *net)
89 {
90 	struct net_device_context *net_device_ctx = netdev_priv(net);
91 	struct hv_device *device_obj = net_device_ctx->device_ctx;
92 	int ret = 0;
93 
94 	/* Open up the device */
95 	ret = rndis_filter_open(device_obj);
96 	if (ret != 0) {
97 		netdev_err(net, "unable to open device (ret %d).\n", ret);
98 		return ret;
99 	}
100 
101 	netif_start_queue(net);
102 
103 	return ret;
104 }
105 
106 static int netvsc_close(struct net_device *net)
107 {
108 	struct net_device_context *net_device_ctx = netdev_priv(net);
109 	struct hv_device *device_obj = net_device_ctx->device_ctx;
110 	int ret;
111 
112 	netif_tx_disable(net);
113 
114 	/* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
115 	cancel_work_sync(&net_device_ctx->work);
116 	ret = rndis_filter_close(device_obj);
117 	if (ret != 0)
118 		netdev_err(net, "unable to close device (ret %d).\n", ret);
119 
120 	return ret;
121 }
122 
123 static void netvsc_xmit_completion(void *context)
124 {
125 	struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
126 	struct sk_buff *skb = (struct sk_buff *)
127 		(unsigned long)packet->completion.send.send_completion_tid;
128 
129 	kfree(packet);
130 
131 	if (skb)
132 		dev_kfree_skb_any(skb);
133 }
134 
135 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
136 {
137 	struct net_device_context *net_device_ctx = netdev_priv(net);
138 	struct hv_netvsc_packet *packet;
139 	int ret;
140 	unsigned int i, num_pages, npg_data;
141 
142 	/* Add multipages for skb->data and additional 2 for RNDIS */
143 	npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
144 		>> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1;
145 	num_pages = skb_shinfo(skb)->nr_frags + npg_data + 2;
146 
147 	/* Allocate a netvsc packet based on # of frags. */
148 	packet = kzalloc(sizeof(struct hv_netvsc_packet) +
149 			 (num_pages * sizeof(struct hv_page_buffer)) +
150 			 sizeof(struct rndis_filter_packet) +
151 			 NDIS_VLAN_PPI_SIZE, GFP_ATOMIC);
152 	if (!packet) {
153 		/* out of memory, drop packet */
154 		netdev_err(net, "unable to allocate hv_netvsc_packet\n");
155 
156 		dev_kfree_skb(skb);
157 		net->stats.tx_dropped++;
158 		return NETDEV_TX_OK;
159 	}
160 
161 	packet->vlan_tci = skb->vlan_tci;
162 
163 	packet->extension = (void *)(unsigned long)packet +
164 				sizeof(struct hv_netvsc_packet) +
165 				    (num_pages * sizeof(struct hv_page_buffer));
166 
167 	/* If the rndis msg goes beyond 1 page, we will add 1 later */
168 	packet->page_buf_cnt = num_pages - 1;
169 
170 	/* Initialize it from the skb */
171 	packet->total_data_buflen = skb->len;
172 
173 	/* Start filling in the page buffers starting after RNDIS buffer. */
174 	packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
175 	packet->page_buf[1].offset
176 		= (unsigned long)skb->data & (PAGE_SIZE - 1);
177 	if (npg_data == 1)
178 		packet->page_buf[1].len = skb_headlen(skb);
179 	else
180 		packet->page_buf[1].len = PAGE_SIZE
181 			- packet->page_buf[1].offset;
182 
183 	for (i = 2; i <= npg_data; i++) {
184 		packet->page_buf[i].pfn = virt_to_phys(skb->data
185 			+ PAGE_SIZE * (i-1)) >> PAGE_SHIFT;
186 		packet->page_buf[i].offset = 0;
187 		packet->page_buf[i].len = PAGE_SIZE;
188 	}
189 	if (npg_data > 1)
190 		packet->page_buf[npg_data].len = (((unsigned long)skb->data
191 			+ skb_headlen(skb) - 1) & (PAGE_SIZE - 1)) + 1;
192 
193 	/* Additional fragments are after SKB data */
194 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
195 		const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
196 
197 		packet->page_buf[i+npg_data+1].pfn =
198 			page_to_pfn(skb_frag_page(f));
199 		packet->page_buf[i+npg_data+1].offset = f->page_offset;
200 		packet->page_buf[i+npg_data+1].len = skb_frag_size(f);
201 	}
202 
203 	/* Set the completion routine */
204 	packet->completion.send.send_completion = netvsc_xmit_completion;
205 	packet->completion.send.send_completion_ctx = packet;
206 	packet->completion.send.send_completion_tid = (unsigned long)skb;
207 
208 	ret = rndis_filter_send(net_device_ctx->device_ctx,
209 				  packet);
210 	if (ret == 0) {
211 		net->stats.tx_bytes += skb->len;
212 		net->stats.tx_packets++;
213 	} else {
214 		kfree(packet);
215 		if (ret != -EAGAIN) {
216 			dev_kfree_skb_any(skb);
217 			net->stats.tx_dropped++;
218 		}
219 	}
220 
221 	return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
222 }
223 
224 /*
225  * netvsc_linkstatus_callback - Link up/down notification
226  */
227 void netvsc_linkstatus_callback(struct hv_device *device_obj,
228 				       unsigned int status)
229 {
230 	struct net_device *net;
231 	struct net_device_context *ndev_ctx;
232 	struct netvsc_device *net_device;
233 
234 	net_device = hv_get_drvdata(device_obj);
235 	net = net_device->ndev;
236 
237 	if (!net) {
238 		netdev_err(net, "got link status but net device "
239 				"not initialized yet\n");
240 		return;
241 	}
242 
243 	if (status == 1) {
244 		netif_carrier_on(net);
245 		ndev_ctx = netdev_priv(net);
246 		schedule_delayed_work(&ndev_ctx->dwork, 0);
247 		schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
248 	} else {
249 		netif_carrier_off(net);
250 	}
251 }
252 
253 /*
254  * netvsc_recv_callback -  Callback when we receive a packet from the
255  * "wire" on the specified device.
256  */
257 int netvsc_recv_callback(struct hv_device *device_obj,
258 				struct hv_netvsc_packet *packet)
259 {
260 	struct net_device *net;
261 	struct sk_buff *skb;
262 
263 	net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
264 	if (!net) {
265 		netdev_err(net, "got receive callback but net device"
266 			" not initialized yet\n");
267 		packet->status = NVSP_STAT_FAIL;
268 		return 0;
269 	}
270 
271 	/* Allocate a skb - TODO direct I/O to pages? */
272 	skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
273 	if (unlikely(!skb)) {
274 		++net->stats.rx_dropped;
275 		packet->status = NVSP_STAT_FAIL;
276 		return 0;
277 	}
278 
279 	/*
280 	 * Copy to skb. This copy is needed here since the memory pointed by
281 	 * hv_netvsc_packet cannot be deallocated
282 	 */
283 	memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
284 		packet->total_data_buflen);
285 
286 	skb->protocol = eth_type_trans(skb, net);
287 	skb->ip_summed = CHECKSUM_NONE;
288 	if (packet->vlan_tci & VLAN_TAG_PRESENT)
289 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
290 				       packet->vlan_tci);
291 
292 	net->stats.rx_packets++;
293 	net->stats.rx_bytes += packet->total_data_buflen;
294 
295 	/*
296 	 * Pass the skb back up. Network stack will deallocate the skb when it
297 	 * is done.
298 	 * TODO - use NAPI?
299 	 */
300 	netif_rx(skb);
301 
302 	return 0;
303 }
304 
305 static void netvsc_get_drvinfo(struct net_device *net,
306 			       struct ethtool_drvinfo *info)
307 {
308 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
309 	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
310 }
311 
312 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
313 {
314 	struct net_device_context *ndevctx = netdev_priv(ndev);
315 	struct hv_device *hdev =  ndevctx->device_ctx;
316 	struct netvsc_device *nvdev = hv_get_drvdata(hdev);
317 	struct netvsc_device_info device_info;
318 	int limit = ETH_DATA_LEN;
319 
320 	if (nvdev == NULL || nvdev->destroy)
321 		return -ENODEV;
322 
323 	if (nvdev->nvsp_version == NVSP_PROTOCOL_VERSION_2)
324 		limit = NETVSC_MTU;
325 
326 	if (mtu < 68 || mtu > limit)
327 		return -EINVAL;
328 
329 	nvdev->start_remove = true;
330 	cancel_delayed_work_sync(&ndevctx->dwork);
331 	cancel_work_sync(&ndevctx->work);
332 	netif_tx_disable(ndev);
333 	rndis_filter_device_remove(hdev);
334 
335 	ndev->mtu = mtu;
336 
337 	ndevctx->device_ctx = hdev;
338 	hv_set_drvdata(hdev, ndev);
339 	device_info.ring_size = ring_size;
340 	rndis_filter_device_add(hdev, &device_info);
341 	netif_wake_queue(ndev);
342 
343 	return 0;
344 }
345 
346 
347 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
348 {
349 	struct net_device_context *ndevctx = netdev_priv(ndev);
350 	struct hv_device *hdev =  ndevctx->device_ctx;
351 	struct sockaddr *addr = p;
352 	char save_adr[ETH_ALEN];
353 	unsigned char save_aatype;
354 	int err;
355 
356 	memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
357 	save_aatype = ndev->addr_assign_type;
358 
359 	err = eth_mac_addr(ndev, p);
360 	if (err != 0)
361 		return err;
362 
363 	err = rndis_filter_set_device_mac(hdev, addr->sa_data);
364 	if (err != 0) {
365 		/* roll back to saved MAC */
366 		memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
367 		ndev->addr_assign_type = save_aatype;
368 	}
369 
370 	return err;
371 }
372 
373 
374 static const struct ethtool_ops ethtool_ops = {
375 	.get_drvinfo	= netvsc_get_drvinfo,
376 	.get_link	= ethtool_op_get_link,
377 };
378 
379 static const struct net_device_ops device_ops = {
380 	.ndo_open =			netvsc_open,
381 	.ndo_stop =			netvsc_close,
382 	.ndo_start_xmit =		netvsc_start_xmit,
383 	.ndo_set_rx_mode =		netvsc_set_multicast_list,
384 	.ndo_change_mtu =		netvsc_change_mtu,
385 	.ndo_validate_addr =		eth_validate_addr,
386 	.ndo_set_mac_address =		netvsc_set_mac_addr,
387 };
388 
389 /*
390  * Send GARP packet to network peers after migrations.
391  * After Quick Migration, the network is not immediately operational in the
392  * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
393  * another netif_notify_peers() into a delayed work, otherwise GARP packet
394  * will not be sent after quick migration, and cause network disconnection.
395  */
396 static void netvsc_send_garp(struct work_struct *w)
397 {
398 	struct net_device_context *ndev_ctx;
399 	struct net_device *net;
400 	struct netvsc_device *net_device;
401 
402 	ndev_ctx = container_of(w, struct net_device_context, dwork.work);
403 	net_device = hv_get_drvdata(ndev_ctx->device_ctx);
404 	net = net_device->ndev;
405 	netdev_notify_peers(net);
406 }
407 
408 
409 static int netvsc_probe(struct hv_device *dev,
410 			const struct hv_vmbus_device_id *dev_id)
411 {
412 	struct net_device *net = NULL;
413 	struct net_device_context *net_device_ctx;
414 	struct netvsc_device_info device_info;
415 	int ret;
416 
417 	net = alloc_etherdev(sizeof(struct net_device_context));
418 	if (!net)
419 		return -ENOMEM;
420 
421 	/* Set initial state */
422 	netif_carrier_off(net);
423 
424 	net_device_ctx = netdev_priv(net);
425 	net_device_ctx->device_ctx = dev;
426 	hv_set_drvdata(dev, net);
427 	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
428 	INIT_WORK(&net_device_ctx->work, do_set_multicast);
429 
430 	net->netdev_ops = &device_ops;
431 
432 	/* TODO: Add GSO and Checksum offload */
433 	net->hw_features = 0;
434 	net->features = NETIF_F_HW_VLAN_CTAG_TX;
435 
436 	SET_ETHTOOL_OPS(net, &ethtool_ops);
437 	SET_NETDEV_DEV(net, &dev->device);
438 
439 	ret = register_netdev(net);
440 	if (ret != 0) {
441 		pr_err("Unable to register netdev.\n");
442 		free_netdev(net);
443 		goto out;
444 	}
445 
446 	/* Notify the netvsc driver of the new device */
447 	device_info.ring_size = ring_size;
448 	ret = rndis_filter_device_add(dev, &device_info);
449 	if (ret != 0) {
450 		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
451 		unregister_netdev(net);
452 		free_netdev(net);
453 		hv_set_drvdata(dev, NULL);
454 		return ret;
455 	}
456 	memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
457 
458 	netif_carrier_on(net);
459 
460 out:
461 	return ret;
462 }
463 
464 static int netvsc_remove(struct hv_device *dev)
465 {
466 	struct net_device *net;
467 	struct net_device_context *ndev_ctx;
468 	struct netvsc_device *net_device;
469 
470 	net_device = hv_get_drvdata(dev);
471 	net = net_device->ndev;
472 
473 	if (net == NULL) {
474 		dev_err(&dev->device, "No net device to remove\n");
475 		return 0;
476 	}
477 
478 	net_device->start_remove = true;
479 
480 	ndev_ctx = netdev_priv(net);
481 	cancel_delayed_work_sync(&ndev_ctx->dwork);
482 	cancel_work_sync(&ndev_ctx->work);
483 
484 	/* Stop outbound asap */
485 	netif_tx_disable(net);
486 
487 	unregister_netdev(net);
488 
489 	/*
490 	 * Call to the vsc driver to let it know that the device is being
491 	 * removed
492 	 */
493 	rndis_filter_device_remove(dev);
494 
495 	free_netdev(net);
496 	return 0;
497 }
498 
499 static const struct hv_vmbus_device_id id_table[] = {
500 	/* Network guid */
501 	{ HV_NIC_GUID, },
502 	{ },
503 };
504 
505 MODULE_DEVICE_TABLE(vmbus, id_table);
506 
507 /* The one and only one */
508 static struct  hv_driver netvsc_drv = {
509 	.name = KBUILD_MODNAME,
510 	.id_table = id_table,
511 	.probe = netvsc_probe,
512 	.remove = netvsc_remove,
513 };
514 
515 static void __exit netvsc_drv_exit(void)
516 {
517 	vmbus_driver_unregister(&netvsc_drv);
518 }
519 
520 static int __init netvsc_drv_init(void)
521 {
522 	if (ring_size < RING_SIZE_MIN) {
523 		ring_size = RING_SIZE_MIN;
524 		pr_info("Increased ring_size to %d (min allowed)\n",
525 			ring_size);
526 	}
527 	return vmbus_driver_register(&netvsc_drv);
528 }
529 
530 MODULE_LICENSE("GPL");
531 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
532 
533 module_init(netvsc_drv_init);
534 module_exit(netvsc_drv_exit);
535