xref: /openbmc/linux/drivers/net/ntb_netdev.c (revision 2f4e3926)
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9  *
10  *   This program is free software; you can redistribute it and/or modify
11  *   it under the terms of version 2 of the GNU General Public License as
12  *   published by the Free Software Foundation.
13  *
14  *   BSD LICENSE
15  *
16  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18  *
19  *   Redistribution and use in source and binary forms, with or without
20  *   modification, are permitted provided that the following conditions
21  *   are met:
22  *
23  *     * Redistributions of source code must retain the above copyright
24  *       notice, this list of conditions and the following disclaimer.
25  *     * Redistributions in binary form must reproduce the above copy
26  *       notice, this list of conditions and the following disclaimer in
27  *       the documentation and/or other materials provided with the
28  *       distribution.
29  *     * Neither the name of Intel Corporation nor the names of its
30  *       contributors may be used to endorse or promote products derived
31  *       from this software without specific prior written permission.
32  *
33  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44  *
45  * PCIe NTB Network Linux driver
46  *
47  * Contact Information:
48  * Jon Mason <jon.mason@intel.com>
49  */
50 #include <linux/etherdevice.h>
51 #include <linux/ethtool.h>
52 #include <linux/module.h>
53 #include <linux/pci.h>
54 #include <linux/ntb.h>
55 #include <linux/ntb_transport.h>
56 
57 #define NTB_NETDEV_VER	"0.7"
58 
59 MODULE_DESCRIPTION(KBUILD_MODNAME);
60 MODULE_VERSION(NTB_NETDEV_VER);
61 MODULE_LICENSE("Dual BSD/GPL");
62 MODULE_AUTHOR("Intel Corporation");
63 
64 /* Time in usecs for tx resource reaper */
65 static unsigned int tx_time = 1;
66 
67 /* Number of descriptors to free before resuming tx */
68 static unsigned int tx_start = 10;
69 
70 /* Number of descriptors still available before stop upper layer tx */
71 static unsigned int tx_stop = 5;
72 
73 struct ntb_netdev {
74 	struct pci_dev *pdev;
75 	struct net_device *ndev;
76 	struct ntb_transport_qp *qp;
77 	struct timer_list tx_timer;
78 };
79 
80 #define	NTB_TX_TIMEOUT_MS	1000
81 #define	NTB_RXQ_SIZE		100
82 
83 static void ntb_netdev_event_handler(void *data, int link_is_up)
84 {
85 	struct net_device *ndev = data;
86 	struct ntb_netdev *dev = netdev_priv(ndev);
87 
88 	netdev_dbg(ndev, "Event %x, Link %x\n", link_is_up,
89 		   ntb_transport_link_query(dev->qp));
90 
91 	if (link_is_up) {
92 		if (ntb_transport_link_query(dev->qp))
93 			netif_carrier_on(ndev);
94 	} else {
95 		netif_carrier_off(ndev);
96 	}
97 }
98 
99 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
100 				  void *data, int len)
101 {
102 	struct net_device *ndev = qp_data;
103 	struct sk_buff *skb;
104 	int rc;
105 
106 	skb = data;
107 	if (!skb)
108 		return;
109 
110 	netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
111 
112 	if (len < 0) {
113 		ndev->stats.rx_errors++;
114 		ndev->stats.rx_length_errors++;
115 		goto enqueue_again;
116 	}
117 
118 	skb_put(skb, len);
119 	skb->protocol = eth_type_trans(skb, ndev);
120 	skb->ip_summed = CHECKSUM_NONE;
121 
122 	if (netif_rx(skb) == NET_RX_DROP) {
123 		ndev->stats.rx_errors++;
124 		ndev->stats.rx_dropped++;
125 	} else {
126 		ndev->stats.rx_packets++;
127 		ndev->stats.rx_bytes += len;
128 	}
129 
130 	skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
131 	if (!skb) {
132 		ndev->stats.rx_errors++;
133 		ndev->stats.rx_frame_errors++;
134 		return;
135 	}
136 
137 enqueue_again:
138 	rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
139 	if (rc) {
140 		dev_kfree_skb_any(skb);
141 		ndev->stats.rx_errors++;
142 		ndev->stats.rx_fifo_errors++;
143 	}
144 }
145 
146 static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev,
147 				      struct ntb_transport_qp *qp, int size)
148 {
149 	struct ntb_netdev *dev = netdev_priv(netdev);
150 
151 	netif_stop_queue(netdev);
152 	/* Make sure to see the latest value of ntb_transport_tx_free_entry()
153 	 * since the queue was last started.
154 	 */
155 	smp_mb();
156 
157 	if (likely(ntb_transport_tx_free_entry(qp) < size)) {
158 		mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
159 		return -EBUSY;
160 	}
161 
162 	netif_start_queue(netdev);
163 	return 0;
164 }
165 
166 static int ntb_netdev_maybe_stop_tx(struct net_device *ndev,
167 				    struct ntb_transport_qp *qp, int size)
168 {
169 	if (netif_queue_stopped(ndev) ||
170 	    (ntb_transport_tx_free_entry(qp) >= size))
171 		return 0;
172 
173 	return __ntb_netdev_maybe_stop_tx(ndev, qp, size);
174 }
175 
176 static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
177 				  void *data, int len)
178 {
179 	struct net_device *ndev = qp_data;
180 	struct sk_buff *skb;
181 	struct ntb_netdev *dev = netdev_priv(ndev);
182 
183 	skb = data;
184 	if (!skb || !ndev)
185 		return;
186 
187 	if (len > 0) {
188 		ndev->stats.tx_packets++;
189 		ndev->stats.tx_bytes += skb->len;
190 	} else {
191 		ndev->stats.tx_errors++;
192 		ndev->stats.tx_aborted_errors++;
193 	}
194 
195 	dev_kfree_skb_any(skb);
196 
197 	if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
198 		/* Make sure anybody stopping the queue after this sees the new
199 		 * value of ntb_transport_tx_free_entry()
200 		 */
201 		smp_mb();
202 		if (netif_queue_stopped(ndev))
203 			netif_wake_queue(ndev);
204 	}
205 }
206 
207 static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
208 					 struct net_device *ndev)
209 {
210 	struct ntb_netdev *dev = netdev_priv(ndev);
211 	int rc;
212 
213 	ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
214 
215 	rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
216 	if (rc)
217 		goto err;
218 
219 	/* check for next submit */
220 	ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
221 
222 	return NETDEV_TX_OK;
223 
224 err:
225 	ndev->stats.tx_dropped++;
226 	ndev->stats.tx_errors++;
227 	return NETDEV_TX_BUSY;
228 }
229 
230 static void ntb_netdev_tx_timer(struct timer_list *t)
231 {
232 	struct ntb_netdev *dev = from_timer(dev, t, tx_timer);
233 	struct net_device *ndev = dev->ndev;
234 
235 	if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
236 		mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
237 	} else {
238 		/* Make sure anybody stopping the queue after this sees the new
239 		 * value of ntb_transport_tx_free_entry()
240 		 */
241 		smp_mb();
242 		if (netif_queue_stopped(ndev))
243 			netif_wake_queue(ndev);
244 	}
245 }
246 
247 static int ntb_netdev_open(struct net_device *ndev)
248 {
249 	struct ntb_netdev *dev = netdev_priv(ndev);
250 	struct sk_buff *skb;
251 	int rc, i, len;
252 
253 	/* Add some empty rx bufs */
254 	for (i = 0; i < NTB_RXQ_SIZE; i++) {
255 		skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
256 		if (!skb) {
257 			rc = -ENOMEM;
258 			goto err;
259 		}
260 
261 		rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
262 					      ndev->mtu + ETH_HLEN);
263 		if (rc) {
264 			dev_kfree_skb(skb);
265 			goto err;
266 		}
267 	}
268 
269 	timer_setup(&dev->tx_timer, ntb_netdev_tx_timer, 0);
270 
271 	netif_carrier_off(ndev);
272 	ntb_transport_link_up(dev->qp);
273 	netif_start_queue(ndev);
274 
275 	return 0;
276 
277 err:
278 	while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
279 		dev_kfree_skb(skb);
280 	return rc;
281 }
282 
283 static int ntb_netdev_close(struct net_device *ndev)
284 {
285 	struct ntb_netdev *dev = netdev_priv(ndev);
286 	struct sk_buff *skb;
287 	int len;
288 
289 	ntb_transport_link_down(dev->qp);
290 
291 	while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
292 		dev_kfree_skb(skb);
293 
294 	del_timer_sync(&dev->tx_timer);
295 
296 	return 0;
297 }
298 
299 static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
300 {
301 	struct ntb_netdev *dev = netdev_priv(ndev);
302 	struct sk_buff *skb;
303 	int len, rc;
304 
305 	if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN)
306 		return -EINVAL;
307 
308 	if (!netif_running(ndev)) {
309 		ndev->mtu = new_mtu;
310 		return 0;
311 	}
312 
313 	/* Bring down the link and dispose of posted rx entries */
314 	ntb_transport_link_down(dev->qp);
315 
316 	if (ndev->mtu < new_mtu) {
317 		int i;
318 
319 		for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++)
320 			dev_kfree_skb(skb);
321 
322 		for (; i; i--) {
323 			skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN);
324 			if (!skb) {
325 				rc = -ENOMEM;
326 				goto err;
327 			}
328 
329 			rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
330 						      new_mtu + ETH_HLEN);
331 			if (rc) {
332 				dev_kfree_skb(skb);
333 				goto err;
334 			}
335 		}
336 	}
337 
338 	ndev->mtu = new_mtu;
339 
340 	ntb_transport_link_up(dev->qp);
341 
342 	return 0;
343 
344 err:
345 	ntb_transport_link_down(dev->qp);
346 
347 	while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
348 		dev_kfree_skb(skb);
349 
350 	netdev_err(ndev, "Error changing MTU, device inoperable\n");
351 	return rc;
352 }
353 
354 static const struct net_device_ops ntb_netdev_ops = {
355 	.ndo_open = ntb_netdev_open,
356 	.ndo_stop = ntb_netdev_close,
357 	.ndo_start_xmit = ntb_netdev_start_xmit,
358 	.ndo_change_mtu = ntb_netdev_change_mtu,
359 	.ndo_set_mac_address = eth_mac_addr,
360 };
361 
362 static void ntb_get_drvinfo(struct net_device *ndev,
363 			    struct ethtool_drvinfo *info)
364 {
365 	struct ntb_netdev *dev = netdev_priv(ndev);
366 
367 	strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
368 	strscpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
369 	strscpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
370 }
371 
372 static int ntb_get_link_ksettings(struct net_device *dev,
373 				  struct ethtool_link_ksettings *cmd)
374 {
375 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
376 	ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
377 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
378 	ethtool_link_ksettings_add_link_mode(cmd, advertising, Backplane);
379 
380 	cmd->base.speed = SPEED_UNKNOWN;
381 	cmd->base.duplex = DUPLEX_FULL;
382 	cmd->base.port = PORT_OTHER;
383 	cmd->base.phy_address = 0;
384 	cmd->base.autoneg = AUTONEG_ENABLE;
385 
386 	return 0;
387 }
388 
389 static const struct ethtool_ops ntb_ethtool_ops = {
390 	.get_drvinfo = ntb_get_drvinfo,
391 	.get_link = ethtool_op_get_link,
392 	.get_link_ksettings = ntb_get_link_ksettings,
393 };
394 
395 static const struct ntb_queue_handlers ntb_netdev_handlers = {
396 	.tx_handler = ntb_netdev_tx_handler,
397 	.rx_handler = ntb_netdev_rx_handler,
398 	.event_handler = ntb_netdev_event_handler,
399 };
400 
401 static int ntb_netdev_probe(struct device *client_dev)
402 {
403 	struct ntb_dev *ntb;
404 	struct net_device *ndev;
405 	struct pci_dev *pdev;
406 	struct ntb_netdev *dev;
407 	int rc;
408 
409 	ntb = dev_ntb(client_dev->parent);
410 	pdev = ntb->pdev;
411 	if (!pdev)
412 		return -ENODEV;
413 
414 	ndev = alloc_etherdev(sizeof(*dev));
415 	if (!ndev)
416 		return -ENOMEM;
417 
418 	SET_NETDEV_DEV(ndev, client_dev);
419 
420 	dev = netdev_priv(ndev);
421 	dev->ndev = ndev;
422 	dev->pdev = pdev;
423 	ndev->features = NETIF_F_HIGHDMA;
424 
425 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
426 
427 	ndev->hw_features = ndev->features;
428 	ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
429 
430 	eth_random_addr(ndev->perm_addr);
431 	dev_addr_set(ndev, ndev->perm_addr);
432 
433 	ndev->netdev_ops = &ntb_netdev_ops;
434 	ndev->ethtool_ops = &ntb_ethtool_ops;
435 
436 	ndev->min_mtu = 0;
437 	ndev->max_mtu = ETH_MAX_MTU;
438 
439 	dev->qp = ntb_transport_create_queue(ndev, client_dev,
440 					     &ntb_netdev_handlers);
441 	if (!dev->qp) {
442 		rc = -EIO;
443 		goto err;
444 	}
445 
446 	ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN;
447 
448 	rc = register_netdev(ndev);
449 	if (rc)
450 		goto err1;
451 
452 	dev_set_drvdata(client_dev, ndev);
453 	dev_info(&pdev->dev, "%s created\n", ndev->name);
454 	return 0;
455 
456 err1:
457 	ntb_transport_free_queue(dev->qp);
458 err:
459 	free_netdev(ndev);
460 	return rc;
461 }
462 
463 static void ntb_netdev_remove(struct device *client_dev)
464 {
465 	struct net_device *ndev = dev_get_drvdata(client_dev);
466 	struct ntb_netdev *dev = netdev_priv(ndev);
467 
468 	unregister_netdev(ndev);
469 	ntb_transport_free_queue(dev->qp);
470 	free_netdev(ndev);
471 }
472 
473 static struct ntb_transport_client ntb_netdev_client = {
474 	.driver.name = KBUILD_MODNAME,
475 	.driver.owner = THIS_MODULE,
476 	.probe = ntb_netdev_probe,
477 	.remove = ntb_netdev_remove,
478 };
479 
480 static int __init ntb_netdev_init_module(void)
481 {
482 	int rc;
483 
484 	rc = ntb_transport_register_client_dev(KBUILD_MODNAME);
485 	if (rc)
486 		return rc;
487 
488 	rc = ntb_transport_register_client(&ntb_netdev_client);
489 	if (rc) {
490 		ntb_transport_unregister_client_dev(KBUILD_MODNAME);
491 		return rc;
492 	}
493 
494 	return 0;
495 }
496 late_initcall(ntb_netdev_init_module);
497 
498 static void __exit ntb_netdev_exit_module(void)
499 {
500 	ntb_transport_unregister_client(&ntb_netdev_client);
501 	ntb_transport_unregister_client_dev(KBUILD_MODNAME);
502 }
503 module_exit(ntb_netdev_exit_module);
504