1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2012 Intel Corporation. All rights reserved. 8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * BSD LICENSE 15 * 16 * Copyright(c) 2012 Intel Corporation. All rights reserved. 17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 23 * * Redistributions of source code must retain the above copyright 24 * notice, this list of conditions and the following disclaimer. 25 * * Redistributions in binary form must reproduce the above copy 26 * notice, this list of conditions and the following disclaimer in 27 * the documentation and/or other materials provided with the 28 * distribution. 29 * * Neither the name of Intel Corporation nor the names of its 30 * contributors may be used to endorse or promote products derived 31 * from this software without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 44 * 45 * PCIe NTB Network Linux driver 46 * 47 * Contact Information: 48 * Jon Mason <jon.mason@intel.com> 49 */ 50 #include <linux/etherdevice.h> 51 #include <linux/ethtool.h> 52 #include <linux/module.h> 53 #include <linux/pci.h> 54 #include <linux/ntb.h> 55 #include <linux/ntb_transport.h> 56 57 #define NTB_NETDEV_VER "0.7" 58 59 MODULE_DESCRIPTION(KBUILD_MODNAME); 60 MODULE_VERSION(NTB_NETDEV_VER); 61 MODULE_LICENSE("Dual BSD/GPL"); 62 MODULE_AUTHOR("Intel Corporation"); 63 64 /* Time in usecs for tx resource reaper */ 65 static unsigned int tx_time = 1; 66 67 /* Number of descriptors to free before resuming tx */ 68 static unsigned int tx_start = 10; 69 70 /* Number of descriptors still available before stop upper layer tx */ 71 static unsigned int tx_stop = 5; 72 73 struct ntb_netdev { 74 struct list_head list; 75 struct pci_dev *pdev; 76 struct net_device *ndev; 77 struct ntb_transport_qp *qp; 78 struct timer_list tx_timer; 79 }; 80 81 #define NTB_TX_TIMEOUT_MS 1000 82 #define NTB_RXQ_SIZE 100 83 84 static LIST_HEAD(dev_list); 85 86 static void ntb_netdev_event_handler(void *data, int link_is_up) 87 { 88 struct net_device *ndev = data; 89 struct ntb_netdev *dev = netdev_priv(ndev); 90 91 netdev_dbg(ndev, "Event %x, Link %x\n", link_is_up, 92 ntb_transport_link_query(dev->qp)); 93 94 if (link_is_up) { 95 if (ntb_transport_link_query(dev->qp)) 96 netif_carrier_on(ndev); 97 } else { 98 netif_carrier_off(ndev); 99 } 100 } 101 102 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, 103 void *data, int len) 104 { 105 struct net_device *ndev = qp_data; 106 struct sk_buff *skb; 107 int rc; 108 109 skb = data; 110 if (!skb) 111 return; 112 113 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len); 114 115 if (len < 0) { 116 ndev->stats.rx_errors++; 117 ndev->stats.rx_length_errors++; 118 goto enqueue_again; 119 } 120 121 skb_put(skb, len); 122 skb->protocol = eth_type_trans(skb, ndev); 123 skb->ip_summed = CHECKSUM_NONE; 124 125 if (netif_rx(skb) == NET_RX_DROP) { 126 ndev->stats.rx_errors++; 127 ndev->stats.rx_dropped++; 128 } else { 129 ndev->stats.rx_packets++; 130 ndev->stats.rx_bytes += len; 131 } 132 133 skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN); 134 if (!skb) { 135 ndev->stats.rx_errors++; 136 ndev->stats.rx_frame_errors++; 137 return; 138 } 139 140 enqueue_again: 141 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); 142 if (rc) { 143 dev_kfree_skb(skb); 144 ndev->stats.rx_errors++; 145 ndev->stats.rx_fifo_errors++; 146 } 147 } 148 149 static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev, 150 struct ntb_transport_qp *qp, int size) 151 { 152 struct ntb_netdev *dev = netdev_priv(netdev); 153 154 netif_stop_queue(netdev); 155 /* Make sure to see the latest value of ntb_transport_tx_free_entry() 156 * since the queue was last started. 157 */ 158 smp_mb(); 159 160 if (likely(ntb_transport_tx_free_entry(qp) < size)) { 161 mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time)); 162 return -EBUSY; 163 } 164 165 netif_start_queue(netdev); 166 return 0; 167 } 168 169 static int ntb_netdev_maybe_stop_tx(struct net_device *ndev, 170 struct ntb_transport_qp *qp, int size) 171 { 172 if (netif_queue_stopped(ndev) || 173 (ntb_transport_tx_free_entry(qp) >= size)) 174 return 0; 175 176 return __ntb_netdev_maybe_stop_tx(ndev, qp, size); 177 } 178 179 static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, 180 void *data, int len) 181 { 182 struct net_device *ndev = qp_data; 183 struct sk_buff *skb; 184 struct ntb_netdev *dev = netdev_priv(ndev); 185 186 skb = data; 187 if (!skb || !ndev) 188 return; 189 190 if (len > 0) { 191 ndev->stats.tx_packets++; 192 ndev->stats.tx_bytes += skb->len; 193 } else { 194 ndev->stats.tx_errors++; 195 ndev->stats.tx_aborted_errors++; 196 } 197 198 dev_kfree_skb(skb); 199 200 if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) { 201 /* Make sure anybody stopping the queue after this sees the new 202 * value of ntb_transport_tx_free_entry() 203 */ 204 smp_mb(); 205 if (netif_queue_stopped(ndev)) 206 netif_wake_queue(ndev); 207 } 208 } 209 210 static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb, 211 struct net_device *ndev) 212 { 213 struct ntb_netdev *dev = netdev_priv(ndev); 214 int rc; 215 216 ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop); 217 218 rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len); 219 if (rc) 220 goto err; 221 222 /* check for next submit */ 223 ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop); 224 225 return NETDEV_TX_OK; 226 227 err: 228 ndev->stats.tx_dropped++; 229 ndev->stats.tx_errors++; 230 return NETDEV_TX_BUSY; 231 } 232 233 static void ntb_netdev_tx_timer(unsigned long data) 234 { 235 struct net_device *ndev = (struct net_device *)data; 236 struct ntb_netdev *dev = netdev_priv(ndev); 237 238 if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) { 239 mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time)); 240 } else { 241 /* Make sure anybody stopping the queue after this sees the new 242 * value of ntb_transport_tx_free_entry() 243 */ 244 smp_mb(); 245 if (netif_queue_stopped(ndev)) 246 netif_wake_queue(ndev); 247 } 248 } 249 250 static int ntb_netdev_open(struct net_device *ndev) 251 { 252 struct ntb_netdev *dev = netdev_priv(ndev); 253 struct sk_buff *skb; 254 int rc, i, len; 255 256 /* Add some empty rx bufs */ 257 for (i = 0; i < NTB_RXQ_SIZE; i++) { 258 skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN); 259 if (!skb) { 260 rc = -ENOMEM; 261 goto err; 262 } 263 264 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, 265 ndev->mtu + ETH_HLEN); 266 if (rc) { 267 dev_kfree_skb(skb); 268 goto err; 269 } 270 } 271 272 setup_timer(&dev->tx_timer, ntb_netdev_tx_timer, (unsigned long)ndev); 273 274 netif_carrier_off(ndev); 275 ntb_transport_link_up(dev->qp); 276 netif_start_queue(ndev); 277 278 return 0; 279 280 err: 281 while ((skb = ntb_transport_rx_remove(dev->qp, &len))) 282 dev_kfree_skb(skb); 283 return rc; 284 } 285 286 static int ntb_netdev_close(struct net_device *ndev) 287 { 288 struct ntb_netdev *dev = netdev_priv(ndev); 289 struct sk_buff *skb; 290 int len; 291 292 ntb_transport_link_down(dev->qp); 293 294 while ((skb = ntb_transport_rx_remove(dev->qp, &len))) 295 dev_kfree_skb(skb); 296 297 del_timer_sync(&dev->tx_timer); 298 299 return 0; 300 } 301 302 static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu) 303 { 304 struct ntb_netdev *dev = netdev_priv(ndev); 305 struct sk_buff *skb; 306 int len, rc; 307 308 if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN) 309 return -EINVAL; 310 311 if (!netif_running(ndev)) { 312 ndev->mtu = new_mtu; 313 return 0; 314 } 315 316 /* Bring down the link and dispose of posted rx entries */ 317 ntb_transport_link_down(dev->qp); 318 319 if (ndev->mtu < new_mtu) { 320 int i; 321 322 for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++) 323 dev_kfree_skb(skb); 324 325 for (; i; i--) { 326 skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN); 327 if (!skb) { 328 rc = -ENOMEM; 329 goto err; 330 } 331 332 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, 333 new_mtu + ETH_HLEN); 334 if (rc) { 335 dev_kfree_skb(skb); 336 goto err; 337 } 338 } 339 } 340 341 ndev->mtu = new_mtu; 342 343 ntb_transport_link_up(dev->qp); 344 345 return 0; 346 347 err: 348 ntb_transport_link_down(dev->qp); 349 350 while ((skb = ntb_transport_rx_remove(dev->qp, &len))) 351 dev_kfree_skb(skb); 352 353 netdev_err(ndev, "Error changing MTU, device inoperable\n"); 354 return rc; 355 } 356 357 static const struct net_device_ops ntb_netdev_ops = { 358 .ndo_open = ntb_netdev_open, 359 .ndo_stop = ntb_netdev_close, 360 .ndo_start_xmit = ntb_netdev_start_xmit, 361 .ndo_change_mtu = ntb_netdev_change_mtu, 362 .ndo_set_mac_address = eth_mac_addr, 363 }; 364 365 static void ntb_get_drvinfo(struct net_device *ndev, 366 struct ethtool_drvinfo *info) 367 { 368 struct ntb_netdev *dev = netdev_priv(ndev); 369 370 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 371 strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version)); 372 strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info)); 373 } 374 375 static int ntb_get_link_ksettings(struct net_device *dev, 376 struct ethtool_link_ksettings *cmd) 377 { 378 ethtool_link_ksettings_zero_link_mode(cmd, supported); 379 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 380 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 381 ethtool_link_ksettings_add_link_mode(cmd, advertising, Backplane); 382 383 cmd->base.speed = SPEED_UNKNOWN; 384 cmd->base.duplex = DUPLEX_FULL; 385 cmd->base.port = PORT_OTHER; 386 cmd->base.phy_address = 0; 387 cmd->base.autoneg = AUTONEG_ENABLE; 388 389 return 0; 390 } 391 392 static const struct ethtool_ops ntb_ethtool_ops = { 393 .get_drvinfo = ntb_get_drvinfo, 394 .get_link = ethtool_op_get_link, 395 .get_link_ksettings = ntb_get_link_ksettings, 396 }; 397 398 static const struct ntb_queue_handlers ntb_netdev_handlers = { 399 .tx_handler = ntb_netdev_tx_handler, 400 .rx_handler = ntb_netdev_rx_handler, 401 .event_handler = ntb_netdev_event_handler, 402 }; 403 404 static int ntb_netdev_probe(struct device *client_dev) 405 { 406 struct ntb_dev *ntb; 407 struct net_device *ndev; 408 struct pci_dev *pdev; 409 struct ntb_netdev *dev; 410 int rc; 411 412 ntb = dev_ntb(client_dev->parent); 413 pdev = ntb->pdev; 414 if (!pdev) 415 return -ENODEV; 416 417 ndev = alloc_etherdev(sizeof(*dev)); 418 if (!ndev) 419 return -ENOMEM; 420 421 SET_NETDEV_DEV(ndev, client_dev); 422 423 dev = netdev_priv(ndev); 424 dev->ndev = ndev; 425 dev->pdev = pdev; 426 ndev->features = NETIF_F_HIGHDMA; 427 428 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 429 430 ndev->hw_features = ndev->features; 431 ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS); 432 433 random_ether_addr(ndev->perm_addr); 434 memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len); 435 436 ndev->netdev_ops = &ntb_netdev_ops; 437 ndev->ethtool_ops = &ntb_ethtool_ops; 438 439 ndev->min_mtu = 0; 440 ndev->max_mtu = ETH_MAX_MTU; 441 442 dev->qp = ntb_transport_create_queue(ndev, client_dev, 443 &ntb_netdev_handlers); 444 if (!dev->qp) { 445 rc = -EIO; 446 goto err; 447 } 448 449 ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN; 450 451 rc = register_netdev(ndev); 452 if (rc) 453 goto err1; 454 455 list_add(&dev->list, &dev_list); 456 dev_info(&pdev->dev, "%s created\n", ndev->name); 457 return 0; 458 459 err1: 460 ntb_transport_free_queue(dev->qp); 461 err: 462 free_netdev(ndev); 463 return rc; 464 } 465 466 static void ntb_netdev_remove(struct device *client_dev) 467 { 468 struct ntb_dev *ntb; 469 struct net_device *ndev; 470 struct pci_dev *pdev; 471 struct ntb_netdev *dev; 472 bool found = false; 473 474 ntb = dev_ntb(client_dev->parent); 475 pdev = ntb->pdev; 476 477 list_for_each_entry(dev, &dev_list, list) { 478 if (dev->pdev == pdev) { 479 found = true; 480 break; 481 } 482 } 483 if (!found) 484 return; 485 486 list_del(&dev->list); 487 488 ndev = dev->ndev; 489 490 unregister_netdev(ndev); 491 ntb_transport_free_queue(dev->qp); 492 free_netdev(ndev); 493 } 494 495 static struct ntb_transport_client ntb_netdev_client = { 496 .driver.name = KBUILD_MODNAME, 497 .driver.owner = THIS_MODULE, 498 .probe = ntb_netdev_probe, 499 .remove = ntb_netdev_remove, 500 }; 501 502 static int __init ntb_netdev_init_module(void) 503 { 504 int rc; 505 506 rc = ntb_transport_register_client_dev(KBUILD_MODNAME); 507 if (rc) 508 return rc; 509 return ntb_transport_register_client(&ntb_netdev_client); 510 } 511 module_init(ntb_netdev_init_module); 512 513 static void __exit ntb_netdev_exit_module(void) 514 { 515 ntb_transport_unregister_client(&ntb_netdev_client); 516 ntb_transport_unregister_client_dev(KBUILD_MODNAME); 517 } 518 module_exit(ntb_netdev_exit_module); 519