1 /* 2 * Keystone NetCP Core driver 3 * 4 * Copyright (C) 2014 Texas Instruments Incorporated 5 * Authors: Sandeep Nair <sandeep_n@ti.com> 6 * Sandeep Paulraj <s-paulraj@ti.com> 7 * Cyril Chemparathy <cyril@ti.com> 8 * Santosh Shilimkar <santosh.shilimkar@ti.com> 9 * Murali Karicheri <m-karicheri2@ti.com> 10 * Wingman Kwok <w-kwok2@ti.com> 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License as 14 * published by the Free Software Foundation version 2. 15 * 16 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 17 * kind, whether express or implied; without even the implied warranty 18 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 */ 21 22 #include <linux/io.h> 23 #include <linux/module.h> 24 #include <linux/of_net.h> 25 #include <linux/of_address.h> 26 #include <linux/if_vlan.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/platform_device.h> 29 #include <linux/soc/ti/knav_qmss.h> 30 #include <linux/soc/ti/knav_dma.h> 31 32 #include "netcp.h" 33 34 #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) 35 #define NETCP_NAPI_WEIGHT 64 36 #define NETCP_TX_TIMEOUT (5 * HZ) 37 #define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN) 38 #define NETCP_MIN_PACKET_SIZE ETH_ZLEN 39 #define NETCP_MAX_MCAST_ADDR 16 40 41 #define NETCP_EFUSE_REG_INDEX 0 42 43 #define NETCP_MOD_PROBE_SKIPPED 1 44 #define NETCP_MOD_PROBE_FAILED 2 45 46 #define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ 47 NETIF_MSG_DRV | NETIF_MSG_LINK | \ 48 NETIF_MSG_IFUP | NETIF_MSG_INTR | \ 49 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \ 50 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \ 51 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \ 52 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ 53 NETIF_MSG_RX_STATUS) 54 55 #define NETCP_EFUSE_ADDR_SWAP 2 56 57 #define knav_queue_get_id(q) knav_queue_device_control(q, \ 58 KNAV_QUEUE_GET_ID, (unsigned long)NULL) 59 60 #define knav_queue_enable_notify(q) knav_queue_device_control(q, \ 61 KNAV_QUEUE_ENABLE_NOTIFY, \ 62 (unsigned long)NULL) 63 64 #define knav_queue_disable_notify(q) knav_queue_device_control(q, \ 65 KNAV_QUEUE_DISABLE_NOTIFY, \ 66 (unsigned long)NULL) 67 68 #define knav_queue_get_count(q) knav_queue_device_control(q, \ 69 KNAV_QUEUE_GET_COUNT, (unsigned long)NULL) 70 71 #define for_each_netcp_module(module) \ 72 list_for_each_entry(module, &netcp_modules, module_list) 73 74 #define for_each_netcp_device_module(netcp_device, inst_modpriv) \ 75 list_for_each_entry(inst_modpriv, \ 76 &((netcp_device)->modpriv_head), inst_list) 77 78 #define for_each_module(netcp, intf_modpriv) \ 79 list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list) 80 81 /* Module management structures */ 82 struct netcp_device { 83 struct list_head device_list; 84 struct list_head interface_head; 85 struct list_head modpriv_head; 86 struct device *device; 87 }; 88 89 struct netcp_inst_modpriv { 90 struct netcp_device *netcp_device; 91 struct netcp_module *netcp_module; 92 struct list_head inst_list; 93 void *module_priv; 94 }; 95 96 struct netcp_intf_modpriv { 97 struct netcp_intf *netcp_priv; 98 struct netcp_module *netcp_module; 99 struct list_head intf_list; 100 void *module_priv; 101 }; 102 103 static LIST_HEAD(netcp_devices); 104 static LIST_HEAD(netcp_modules); 105 static DEFINE_MUTEX(netcp_modules_lock); 106 107 static int netcp_debug_level = -1; 108 module_param(netcp_debug_level, int, 0); 109 MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)"); 110 111 /* Helper functions - Get/Set */ 112 static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc, 113 struct knav_dma_desc *desc) 114 { 115 *buff_len = le32_to_cpu(desc->buff_len); 116 *buff = le32_to_cpu(desc->buff); 117 *ndesc = le32_to_cpu(desc->next_desc); 118 } 119 120 static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc) 121 { 122 *pad0 = le32_to_cpu(desc->pad[0]); 123 *pad1 = le32_to_cpu(desc->pad[1]); 124 *pad2 = le32_to_cpu(desc->pad[2]); 125 } 126 127 static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc) 128 { 129 u64 pad64; 130 131 pad64 = le32_to_cpu(desc->pad[0]) + 132 ((u64)le32_to_cpu(desc->pad[1]) << 32); 133 *padptr = (void *)(uintptr_t)pad64; 134 } 135 136 static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, 137 struct knav_dma_desc *desc) 138 { 139 *buff = le32_to_cpu(desc->orig_buff); 140 *buff_len = le32_to_cpu(desc->orig_len); 141 } 142 143 static void get_words(dma_addr_t *words, int num_words, __le32 *desc) 144 { 145 int i; 146 147 for (i = 0; i < num_words; i++) 148 words[i] = le32_to_cpu(desc[i]); 149 } 150 151 static void set_pkt_info(dma_addr_t buff, u32 buff_len, u32 ndesc, 152 struct knav_dma_desc *desc) 153 { 154 desc->buff_len = cpu_to_le32(buff_len); 155 desc->buff = cpu_to_le32(buff); 156 desc->next_desc = cpu_to_le32(ndesc); 157 } 158 159 static void set_desc_info(u32 desc_info, u32 pkt_info, 160 struct knav_dma_desc *desc) 161 { 162 desc->desc_info = cpu_to_le32(desc_info); 163 desc->packet_info = cpu_to_le32(pkt_info); 164 } 165 166 static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc) 167 { 168 desc->pad[0] = cpu_to_le32(pad0); 169 desc->pad[1] = cpu_to_le32(pad1); 170 desc->pad[2] = cpu_to_le32(pad1); 171 } 172 173 static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, 174 struct knav_dma_desc *desc) 175 { 176 desc->orig_buff = cpu_to_le32(buff); 177 desc->orig_len = cpu_to_le32(buff_len); 178 } 179 180 static void set_words(u32 *words, int num_words, __le32 *desc) 181 { 182 int i; 183 184 for (i = 0; i < num_words; i++) 185 desc[i] = cpu_to_le32(words[i]); 186 } 187 188 /* Read the e-fuse value as 32 bit values to be endian independent */ 189 static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap) 190 { 191 unsigned int addr0, addr1; 192 193 addr1 = readl(efuse_mac + 4); 194 addr0 = readl(efuse_mac); 195 196 switch (swap) { 197 case NETCP_EFUSE_ADDR_SWAP: 198 addr0 = addr1; 199 addr1 = readl(efuse_mac); 200 break; 201 default: 202 break; 203 } 204 205 x[0] = (addr1 & 0x0000ff00) >> 8; 206 x[1] = addr1 & 0x000000ff; 207 x[2] = (addr0 & 0xff000000) >> 24; 208 x[3] = (addr0 & 0x00ff0000) >> 16; 209 x[4] = (addr0 & 0x0000ff00) >> 8; 210 x[5] = addr0 & 0x000000ff; 211 212 return 0; 213 } 214 215 static const char *netcp_node_name(struct device_node *node) 216 { 217 const char *name; 218 219 if (of_property_read_string(node, "label", &name) < 0) 220 name = node->name; 221 if (!name) 222 name = "unknown"; 223 return name; 224 } 225 226 /* Module management routines */ 227 static int netcp_register_interface(struct netcp_intf *netcp) 228 { 229 int ret; 230 231 ret = register_netdev(netcp->ndev); 232 if (!ret) 233 netcp->netdev_registered = true; 234 return ret; 235 } 236 237 static int netcp_module_probe(struct netcp_device *netcp_device, 238 struct netcp_module *module) 239 { 240 struct device *dev = netcp_device->device; 241 struct device_node *devices, *interface, *node = dev->of_node; 242 struct device_node *child; 243 struct netcp_inst_modpriv *inst_modpriv; 244 struct netcp_intf *netcp_intf; 245 struct netcp_module *tmp; 246 bool primary_module_registered = false; 247 int ret; 248 249 /* Find this module in the sub-tree for this device */ 250 devices = of_get_child_by_name(node, "netcp-devices"); 251 if (!devices) { 252 dev_err(dev, "could not find netcp-devices node\n"); 253 return NETCP_MOD_PROBE_SKIPPED; 254 } 255 256 for_each_available_child_of_node(devices, child) { 257 const char *name = netcp_node_name(child); 258 259 if (!strcasecmp(module->name, name)) 260 break; 261 } 262 263 of_node_put(devices); 264 /* If module not used for this device, skip it */ 265 if (!child) { 266 dev_warn(dev, "module(%s) not used for device\n", module->name); 267 return NETCP_MOD_PROBE_SKIPPED; 268 } 269 270 inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL); 271 if (!inst_modpriv) { 272 of_node_put(child); 273 return -ENOMEM; 274 } 275 276 inst_modpriv->netcp_device = netcp_device; 277 inst_modpriv->netcp_module = module; 278 list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head); 279 280 ret = module->probe(netcp_device, dev, child, 281 &inst_modpriv->module_priv); 282 of_node_put(child); 283 if (ret) { 284 dev_err(dev, "Probe of module(%s) failed with %d\n", 285 module->name, ret); 286 list_del(&inst_modpriv->inst_list); 287 devm_kfree(dev, inst_modpriv); 288 return NETCP_MOD_PROBE_FAILED; 289 } 290 291 /* Attach modules only if the primary module is probed */ 292 for_each_netcp_module(tmp) { 293 if (tmp->primary) 294 primary_module_registered = true; 295 } 296 297 if (!primary_module_registered) 298 return 0; 299 300 /* Attach module to interfaces */ 301 list_for_each_entry(netcp_intf, &netcp_device->interface_head, 302 interface_list) { 303 struct netcp_intf_modpriv *intf_modpriv; 304 305 intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv), 306 GFP_KERNEL); 307 if (!intf_modpriv) 308 return -ENOMEM; 309 310 interface = of_parse_phandle(netcp_intf->node_interface, 311 module->name, 0); 312 313 if (!interface) { 314 devm_kfree(dev, intf_modpriv); 315 continue; 316 } 317 318 intf_modpriv->netcp_priv = netcp_intf; 319 intf_modpriv->netcp_module = module; 320 list_add_tail(&intf_modpriv->intf_list, 321 &netcp_intf->module_head); 322 323 ret = module->attach(inst_modpriv->module_priv, 324 netcp_intf->ndev, interface, 325 &intf_modpriv->module_priv); 326 of_node_put(interface); 327 if (ret) { 328 dev_dbg(dev, "Attach of module %s declined with %d\n", 329 module->name, ret); 330 list_del(&intf_modpriv->intf_list); 331 devm_kfree(dev, intf_modpriv); 332 continue; 333 } 334 } 335 336 /* Now register the interface with netdev */ 337 list_for_each_entry(netcp_intf, 338 &netcp_device->interface_head, 339 interface_list) { 340 /* If interface not registered then register now */ 341 if (!netcp_intf->netdev_registered) { 342 ret = netcp_register_interface(netcp_intf); 343 if (ret) 344 return -ENODEV; 345 } 346 } 347 return 0; 348 } 349 350 int netcp_register_module(struct netcp_module *module) 351 { 352 struct netcp_device *netcp_device; 353 struct netcp_module *tmp; 354 int ret; 355 356 if (!module->name) { 357 WARN(1, "error registering netcp module: no name\n"); 358 return -EINVAL; 359 } 360 361 if (!module->probe) { 362 WARN(1, "error registering netcp module: no probe\n"); 363 return -EINVAL; 364 } 365 366 mutex_lock(&netcp_modules_lock); 367 368 for_each_netcp_module(tmp) { 369 if (!strcasecmp(tmp->name, module->name)) { 370 mutex_unlock(&netcp_modules_lock); 371 return -EEXIST; 372 } 373 } 374 list_add_tail(&module->module_list, &netcp_modules); 375 376 list_for_each_entry(netcp_device, &netcp_devices, device_list) { 377 ret = netcp_module_probe(netcp_device, module); 378 if (ret < 0) 379 goto fail; 380 } 381 mutex_unlock(&netcp_modules_lock); 382 return 0; 383 384 fail: 385 mutex_unlock(&netcp_modules_lock); 386 netcp_unregister_module(module); 387 return ret; 388 } 389 EXPORT_SYMBOL_GPL(netcp_register_module); 390 391 static void netcp_release_module(struct netcp_device *netcp_device, 392 struct netcp_module *module) 393 { 394 struct netcp_inst_modpriv *inst_modpriv, *inst_tmp; 395 struct netcp_intf *netcp_intf, *netcp_tmp; 396 struct device *dev = netcp_device->device; 397 398 /* Release the module from each interface */ 399 list_for_each_entry_safe(netcp_intf, netcp_tmp, 400 &netcp_device->interface_head, 401 interface_list) { 402 struct netcp_intf_modpriv *intf_modpriv, *intf_tmp; 403 404 list_for_each_entry_safe(intf_modpriv, intf_tmp, 405 &netcp_intf->module_head, 406 intf_list) { 407 if (intf_modpriv->netcp_module == module) { 408 module->release(intf_modpriv->module_priv); 409 list_del(&intf_modpriv->intf_list); 410 devm_kfree(dev, intf_modpriv); 411 break; 412 } 413 } 414 } 415 416 /* Remove the module from each instance */ 417 list_for_each_entry_safe(inst_modpriv, inst_tmp, 418 &netcp_device->modpriv_head, inst_list) { 419 if (inst_modpriv->netcp_module == module) { 420 module->remove(netcp_device, 421 inst_modpriv->module_priv); 422 list_del(&inst_modpriv->inst_list); 423 devm_kfree(dev, inst_modpriv); 424 break; 425 } 426 } 427 } 428 429 void netcp_unregister_module(struct netcp_module *module) 430 { 431 struct netcp_device *netcp_device; 432 struct netcp_module *module_tmp; 433 434 mutex_lock(&netcp_modules_lock); 435 436 list_for_each_entry(netcp_device, &netcp_devices, device_list) { 437 netcp_release_module(netcp_device, module); 438 } 439 440 /* Remove the module from the module list */ 441 for_each_netcp_module(module_tmp) { 442 if (module == module_tmp) { 443 list_del(&module->module_list); 444 break; 445 } 446 } 447 448 mutex_unlock(&netcp_modules_lock); 449 } 450 EXPORT_SYMBOL_GPL(netcp_unregister_module); 451 452 void *netcp_module_get_intf_data(struct netcp_module *module, 453 struct netcp_intf *intf) 454 { 455 struct netcp_intf_modpriv *intf_modpriv; 456 457 list_for_each_entry(intf_modpriv, &intf->module_head, intf_list) 458 if (intf_modpriv->netcp_module == module) 459 return intf_modpriv->module_priv; 460 return NULL; 461 } 462 EXPORT_SYMBOL_GPL(netcp_module_get_intf_data); 463 464 /* Module TX and RX Hook management */ 465 struct netcp_hook_list { 466 struct list_head list; 467 netcp_hook_rtn *hook_rtn; 468 void *hook_data; 469 int order; 470 }; 471 472 int netcp_register_txhook(struct netcp_intf *netcp_priv, int order, 473 netcp_hook_rtn *hook_rtn, void *hook_data) 474 { 475 struct netcp_hook_list *entry; 476 struct netcp_hook_list *next; 477 unsigned long flags; 478 479 entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL); 480 if (!entry) 481 return -ENOMEM; 482 483 entry->hook_rtn = hook_rtn; 484 entry->hook_data = hook_data; 485 entry->order = order; 486 487 spin_lock_irqsave(&netcp_priv->lock, flags); 488 list_for_each_entry(next, &netcp_priv->txhook_list_head, list) { 489 if (next->order > order) 490 break; 491 } 492 __list_add(&entry->list, next->list.prev, &next->list); 493 spin_unlock_irqrestore(&netcp_priv->lock, flags); 494 495 return 0; 496 } 497 EXPORT_SYMBOL_GPL(netcp_register_txhook); 498 499 int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order, 500 netcp_hook_rtn *hook_rtn, void *hook_data) 501 { 502 struct netcp_hook_list *next, *n; 503 unsigned long flags; 504 505 spin_lock_irqsave(&netcp_priv->lock, flags); 506 list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) { 507 if ((next->order == order) && 508 (next->hook_rtn == hook_rtn) && 509 (next->hook_data == hook_data)) { 510 list_del(&next->list); 511 spin_unlock_irqrestore(&netcp_priv->lock, flags); 512 devm_kfree(netcp_priv->dev, next); 513 return 0; 514 } 515 } 516 spin_unlock_irqrestore(&netcp_priv->lock, flags); 517 return -ENOENT; 518 } 519 EXPORT_SYMBOL_GPL(netcp_unregister_txhook); 520 521 int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order, 522 netcp_hook_rtn *hook_rtn, void *hook_data) 523 { 524 struct netcp_hook_list *entry; 525 struct netcp_hook_list *next; 526 unsigned long flags; 527 528 entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL); 529 if (!entry) 530 return -ENOMEM; 531 532 entry->hook_rtn = hook_rtn; 533 entry->hook_data = hook_data; 534 entry->order = order; 535 536 spin_lock_irqsave(&netcp_priv->lock, flags); 537 list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) { 538 if (next->order > order) 539 break; 540 } 541 __list_add(&entry->list, next->list.prev, &next->list); 542 spin_unlock_irqrestore(&netcp_priv->lock, flags); 543 544 return 0; 545 } 546 547 int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order, 548 netcp_hook_rtn *hook_rtn, void *hook_data) 549 { 550 struct netcp_hook_list *next, *n; 551 unsigned long flags; 552 553 spin_lock_irqsave(&netcp_priv->lock, flags); 554 list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) { 555 if ((next->order == order) && 556 (next->hook_rtn == hook_rtn) && 557 (next->hook_data == hook_data)) { 558 list_del(&next->list); 559 spin_unlock_irqrestore(&netcp_priv->lock, flags); 560 devm_kfree(netcp_priv->dev, next); 561 return 0; 562 } 563 } 564 spin_unlock_irqrestore(&netcp_priv->lock, flags); 565 566 return -ENOENT; 567 } 568 569 static void netcp_frag_free(bool is_frag, void *ptr) 570 { 571 if (is_frag) 572 skb_free_frag(ptr); 573 else 574 kfree(ptr); 575 } 576 577 static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, 578 struct knav_dma_desc *desc) 579 { 580 struct knav_dma_desc *ndesc; 581 dma_addr_t dma_desc, dma_buf; 582 unsigned int buf_len, dma_sz = sizeof(*ndesc); 583 void *buf_ptr; 584 u32 pad[2]; 585 u32 tmp; 586 587 get_words(&dma_desc, 1, &desc->next_desc); 588 589 while (dma_desc) { 590 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); 591 if (unlikely(!ndesc)) { 592 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); 593 break; 594 } 595 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); 596 get_pad_ptr(&buf_ptr, ndesc); 597 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); 598 __free_page(buf_ptr); 599 knav_pool_desc_put(netcp->rx_pool, desc); 600 } 601 602 get_pad_info(&pad[0], &pad[1], &buf_len, desc); 603 buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); 604 605 if (buf_ptr) 606 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); 607 knav_pool_desc_put(netcp->rx_pool, desc); 608 } 609 610 static void netcp_empty_rx_queue(struct netcp_intf *netcp) 611 { 612 struct knav_dma_desc *desc; 613 unsigned int dma_sz; 614 dma_addr_t dma; 615 616 for (; ;) { 617 dma = knav_queue_pop(netcp->rx_queue, &dma_sz); 618 if (!dma) 619 break; 620 621 desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz); 622 if (unlikely(!desc)) { 623 dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n", 624 __func__); 625 netcp->ndev->stats.rx_errors++; 626 continue; 627 } 628 netcp_free_rx_desc_chain(netcp, desc); 629 netcp->ndev->stats.rx_dropped++; 630 } 631 } 632 633 static int netcp_process_one_rx_packet(struct netcp_intf *netcp) 634 { 635 unsigned int dma_sz, buf_len, org_buf_len; 636 struct knav_dma_desc *desc, *ndesc; 637 unsigned int pkt_sz = 0, accum_sz; 638 struct netcp_hook_list *rx_hook; 639 dma_addr_t dma_desc, dma_buff; 640 struct netcp_packet p_info; 641 struct sk_buff *skb; 642 u32 pad[2]; 643 void *org_buf_ptr; 644 645 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); 646 if (!dma_desc) 647 return -1; 648 649 desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); 650 if (unlikely(!desc)) { 651 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); 652 return 0; 653 } 654 655 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); 656 get_pad_info(&pad[0], &pad[1], &org_buf_len, desc); 657 org_buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); 658 659 if (unlikely(!org_buf_ptr)) { 660 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); 661 goto free_desc; 662 } 663 664 pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK; 665 accum_sz = buf_len; 666 dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE); 667 668 /* Build a new sk_buff for the primary buffer */ 669 skb = build_skb(org_buf_ptr, org_buf_len); 670 if (unlikely(!skb)) { 671 dev_err(netcp->ndev_dev, "build_skb() failed\n"); 672 goto free_desc; 673 } 674 675 /* update data, tail and len */ 676 skb_reserve(skb, NETCP_SOP_OFFSET); 677 __skb_put(skb, buf_len); 678 679 /* Fill in the page fragment list */ 680 while (dma_desc) { 681 struct page *page; 682 void *ptr; 683 684 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); 685 if (unlikely(!ndesc)) { 686 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); 687 goto free_desc; 688 } 689 690 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); 691 get_pad_ptr(&ptr, ndesc); 692 page = ptr; 693 694 if (likely(dma_buff && buf_len && page)) { 695 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, 696 DMA_FROM_DEVICE); 697 } else { 698 dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n", 699 &dma_buff, buf_len, page); 700 goto free_desc; 701 } 702 703 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 704 offset_in_page(dma_buff), buf_len, PAGE_SIZE); 705 accum_sz += buf_len; 706 707 /* Free the descriptor */ 708 knav_pool_desc_put(netcp->rx_pool, ndesc); 709 } 710 711 /* Free the primary descriptor */ 712 knav_pool_desc_put(netcp->rx_pool, desc); 713 714 /* check for packet len and warn */ 715 if (unlikely(pkt_sz != accum_sz)) 716 dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n", 717 pkt_sz, accum_sz); 718 719 /* Remove ethernet FCS from the packet */ 720 __pskb_trim(skb, skb->len - ETH_FCS_LEN); 721 722 /* Call each of the RX hooks */ 723 p_info.skb = skb; 724 p_info.rxtstamp_complete = false; 725 list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) { 726 int ret; 727 728 ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data, 729 &p_info); 730 if (unlikely(ret)) { 731 dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n", 732 rx_hook->order, ret); 733 netcp->ndev->stats.rx_errors++; 734 dev_kfree_skb(skb); 735 return 0; 736 } 737 } 738 739 netcp->ndev->stats.rx_packets++; 740 netcp->ndev->stats.rx_bytes += skb->len; 741 742 /* push skb up the stack */ 743 skb->protocol = eth_type_trans(skb, netcp->ndev); 744 netif_receive_skb(skb); 745 return 0; 746 747 free_desc: 748 netcp_free_rx_desc_chain(netcp, desc); 749 netcp->ndev->stats.rx_errors++; 750 return 0; 751 } 752 753 static int netcp_process_rx_packets(struct netcp_intf *netcp, 754 unsigned int budget) 755 { 756 int i; 757 758 for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++) 759 ; 760 return i; 761 } 762 763 /* Release descriptors and attached buffers from Rx FDQ */ 764 static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq) 765 { 766 struct knav_dma_desc *desc; 767 unsigned int buf_len, dma_sz; 768 dma_addr_t dma; 769 void *buf_ptr; 770 771 /* Allocate descriptor */ 772 while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) { 773 desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz); 774 if (unlikely(!desc)) { 775 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); 776 continue; 777 } 778 779 get_org_pkt_info(&dma, &buf_len, desc); 780 get_pad_ptr(&buf_ptr, desc); 781 782 if (unlikely(!dma)) { 783 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); 784 knav_pool_desc_put(netcp->rx_pool, desc); 785 continue; 786 } 787 788 if (unlikely(!buf_ptr)) { 789 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); 790 knav_pool_desc_put(netcp->rx_pool, desc); 791 continue; 792 } 793 794 if (fdq == 0) { 795 dma_unmap_single(netcp->dev, dma, buf_len, 796 DMA_FROM_DEVICE); 797 netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr); 798 } else { 799 dma_unmap_page(netcp->dev, dma, buf_len, 800 DMA_FROM_DEVICE); 801 __free_page(buf_ptr); 802 } 803 804 knav_pool_desc_put(netcp->rx_pool, desc); 805 } 806 } 807 808 static void netcp_rxpool_free(struct netcp_intf *netcp) 809 { 810 int i; 811 812 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && 813 !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++) 814 netcp_free_rx_buf(netcp, i); 815 816 if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size) 817 dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n", 818 netcp->rx_pool_size - knav_pool_count(netcp->rx_pool)); 819 820 knav_pool_destroy(netcp->rx_pool); 821 netcp->rx_pool = NULL; 822 } 823 824 static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) 825 { 826 struct knav_dma_desc *hwdesc; 827 unsigned int buf_len, dma_sz; 828 u32 desc_info, pkt_info; 829 struct page *page; 830 dma_addr_t dma; 831 void *bufptr; 832 u32 pad[3]; 833 834 /* Allocate descriptor */ 835 hwdesc = knav_pool_desc_get(netcp->rx_pool); 836 if (IS_ERR_OR_NULL(hwdesc)) { 837 dev_dbg(netcp->ndev_dev, "out of rx pool desc\n"); 838 return -ENOMEM; 839 } 840 841 if (likely(fdq == 0)) { 842 unsigned int primary_buf_len; 843 /* Allocate a primary receive queue entry */ 844 buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET; 845 primary_buf_len = SKB_DATA_ALIGN(buf_len) + 846 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 847 848 bufptr = netdev_alloc_frag(primary_buf_len); 849 pad[2] = primary_buf_len; 850 851 if (unlikely(!bufptr)) { 852 dev_warn_ratelimited(netcp->ndev_dev, 853 "Primary RX buffer alloc failed\n"); 854 goto fail; 855 } 856 dma = dma_map_single(netcp->dev, bufptr, buf_len, 857 DMA_TO_DEVICE); 858 if (unlikely(dma_mapping_error(netcp->dev, dma))) 859 goto fail; 860 861 pad[0] = lower_32_bits((uintptr_t)bufptr); 862 pad[1] = upper_32_bits((uintptr_t)bufptr); 863 864 } else { 865 /* Allocate a secondary receive queue entry */ 866 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); 867 if (unlikely(!page)) { 868 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); 869 goto fail; 870 } 871 buf_len = PAGE_SIZE; 872 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); 873 pad[0] = lower_32_bits(dma); 874 pad[1] = upper_32_bits(dma); 875 pad[2] = 0; 876 } 877 878 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; 879 desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK; 880 pkt_info = KNAV_DMA_DESC_HAS_EPIB; 881 pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT; 882 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << 883 KNAV_DMA_DESC_RETQ_SHIFT; 884 set_org_pkt_info(dma, buf_len, hwdesc); 885 set_pad_info(pad[0], pad[1], pad[2], hwdesc); 886 set_desc_info(desc_info, pkt_info, hwdesc); 887 888 /* Push to FDQs */ 889 knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma, 890 &dma_sz); 891 knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0); 892 return 0; 893 894 fail: 895 knav_pool_desc_put(netcp->rx_pool, hwdesc); 896 return -ENOMEM; 897 } 898 899 /* Refill Rx FDQ with descriptors & attached buffers */ 900 static void netcp_rxpool_refill(struct netcp_intf *netcp) 901 { 902 u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0}; 903 int i, ret = 0; 904 905 /* Calculate the FDQ deficit and refill */ 906 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) { 907 fdq_deficit[i] = netcp->rx_queue_depths[i] - 908 knav_queue_get_count(netcp->rx_fdq[i]); 909 910 while (fdq_deficit[i]-- && !ret) 911 ret = netcp_allocate_rx_buf(netcp, i); 912 } /* end for fdqs */ 913 } 914 915 /* NAPI poll */ 916 static int netcp_rx_poll(struct napi_struct *napi, int budget) 917 { 918 struct netcp_intf *netcp = container_of(napi, struct netcp_intf, 919 rx_napi); 920 unsigned int packets; 921 922 packets = netcp_process_rx_packets(netcp, budget); 923 924 netcp_rxpool_refill(netcp); 925 if (packets < budget) { 926 napi_complete(&netcp->rx_napi); 927 knav_queue_enable_notify(netcp->rx_queue); 928 } 929 930 return packets; 931 } 932 933 static void netcp_rx_notify(void *arg) 934 { 935 struct netcp_intf *netcp = arg; 936 937 knav_queue_disable_notify(netcp->rx_queue); 938 napi_schedule(&netcp->rx_napi); 939 } 940 941 static void netcp_free_tx_desc_chain(struct netcp_intf *netcp, 942 struct knav_dma_desc *desc, 943 unsigned int desc_sz) 944 { 945 struct knav_dma_desc *ndesc = desc; 946 dma_addr_t dma_desc, dma_buf; 947 unsigned int buf_len; 948 949 while (ndesc) { 950 get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc); 951 952 if (dma_buf && buf_len) 953 dma_unmap_single(netcp->dev, dma_buf, buf_len, 954 DMA_TO_DEVICE); 955 else 956 dev_warn(netcp->ndev_dev, "bad Tx desc buf(%pad), len(%d)\n", 957 &dma_buf, buf_len); 958 959 knav_pool_desc_put(netcp->tx_pool, ndesc); 960 ndesc = NULL; 961 if (dma_desc) { 962 ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc, 963 desc_sz); 964 if (!ndesc) 965 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n"); 966 } 967 } 968 } 969 970 static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, 971 unsigned int budget) 972 { 973 struct knav_dma_desc *desc; 974 void *ptr; 975 struct sk_buff *skb; 976 unsigned int dma_sz; 977 dma_addr_t dma; 978 int pkts = 0; 979 980 while (budget--) { 981 dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz); 982 if (!dma) 983 break; 984 desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz); 985 if (unlikely(!desc)) { 986 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n"); 987 netcp->ndev->stats.tx_errors++; 988 continue; 989 } 990 991 get_pad_ptr(&ptr, desc); 992 skb = ptr; 993 netcp_free_tx_desc_chain(netcp, desc, dma_sz); 994 if (!skb) { 995 dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); 996 netcp->ndev->stats.tx_errors++; 997 continue; 998 } 999 1000 if (netif_subqueue_stopped(netcp->ndev, skb) && 1001 netif_running(netcp->ndev) && 1002 (knav_pool_count(netcp->tx_pool) > 1003 netcp->tx_resume_threshold)) { 1004 u16 subqueue = skb_get_queue_mapping(skb); 1005 1006 netif_wake_subqueue(netcp->ndev, subqueue); 1007 } 1008 1009 netcp->ndev->stats.tx_packets++; 1010 netcp->ndev->stats.tx_bytes += skb->len; 1011 dev_kfree_skb(skb); 1012 pkts++; 1013 } 1014 return pkts; 1015 } 1016 1017 static int netcp_tx_poll(struct napi_struct *napi, int budget) 1018 { 1019 int packets; 1020 struct netcp_intf *netcp = container_of(napi, struct netcp_intf, 1021 tx_napi); 1022 1023 packets = netcp_process_tx_compl_packets(netcp, budget); 1024 if (packets < budget) { 1025 napi_complete(&netcp->tx_napi); 1026 knav_queue_enable_notify(netcp->tx_compl_q); 1027 } 1028 1029 return packets; 1030 } 1031 1032 static void netcp_tx_notify(void *arg) 1033 { 1034 struct netcp_intf *netcp = arg; 1035 1036 knav_queue_disable_notify(netcp->tx_compl_q); 1037 napi_schedule(&netcp->tx_napi); 1038 } 1039 1040 static struct knav_dma_desc* 1041 netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) 1042 { 1043 struct knav_dma_desc *desc, *ndesc, *pdesc; 1044 unsigned int pkt_len = skb_headlen(skb); 1045 struct device *dev = netcp->dev; 1046 dma_addr_t dma_addr; 1047 unsigned int dma_sz; 1048 int i; 1049 1050 /* Map the linear buffer */ 1051 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE); 1052 if (unlikely(dma_mapping_error(dev, dma_addr))) { 1053 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n"); 1054 return NULL; 1055 } 1056 1057 desc = knav_pool_desc_get(netcp->tx_pool); 1058 if (IS_ERR_OR_NULL(desc)) { 1059 dev_err(netcp->ndev_dev, "out of TX desc\n"); 1060 dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE); 1061 return NULL; 1062 } 1063 1064 set_pkt_info(dma_addr, pkt_len, 0, desc); 1065 if (skb_is_nonlinear(skb)) { 1066 prefetchw(skb_shinfo(skb)); 1067 } else { 1068 desc->next_desc = 0; 1069 goto upd_pkt_len; 1070 } 1071 1072 pdesc = desc; 1073 1074 /* Handle the case where skb is fragmented in pages */ 1075 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1076 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1077 struct page *page = skb_frag_page(frag); 1078 u32 page_offset = frag->page_offset; 1079 u32 buf_len = skb_frag_size(frag); 1080 dma_addr_t desc_dma; 1081 u32 desc_dma_32; 1082 u32 pkt_info; 1083 1084 dma_addr = dma_map_page(dev, page, page_offset, buf_len, 1085 DMA_TO_DEVICE); 1086 if (unlikely(!dma_addr)) { 1087 dev_err(netcp->ndev_dev, "Failed to map skb page\n"); 1088 goto free_descs; 1089 } 1090 1091 ndesc = knav_pool_desc_get(netcp->tx_pool); 1092 if (IS_ERR_OR_NULL(ndesc)) { 1093 dev_err(netcp->ndev_dev, "out of TX desc for frags\n"); 1094 dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE); 1095 goto free_descs; 1096 } 1097 1098 desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, ndesc); 1099 pkt_info = 1100 (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) << 1101 KNAV_DMA_DESC_RETQ_SHIFT; 1102 set_pkt_info(dma_addr, buf_len, 0, ndesc); 1103 desc_dma_32 = (u32)desc_dma; 1104 set_words(&desc_dma_32, 1, &pdesc->next_desc); 1105 pkt_len += buf_len; 1106 if (pdesc != desc) 1107 knav_pool_desc_map(netcp->tx_pool, pdesc, 1108 sizeof(*pdesc), &desc_dma, &dma_sz); 1109 pdesc = ndesc; 1110 } 1111 if (pdesc != desc) 1112 knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc), 1113 &dma_addr, &dma_sz); 1114 1115 /* frag list based linkage is not supported for now. */ 1116 if (skb_shinfo(skb)->frag_list) { 1117 dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n"); 1118 goto free_descs; 1119 } 1120 1121 upd_pkt_len: 1122 WARN_ON(pkt_len != skb->len); 1123 1124 pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK; 1125 set_words(&pkt_len, 1, &desc->desc_info); 1126 return desc; 1127 1128 free_descs: 1129 netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc)); 1130 return NULL; 1131 } 1132 1133 static int netcp_tx_submit_skb(struct netcp_intf *netcp, 1134 struct sk_buff *skb, 1135 struct knav_dma_desc *desc) 1136 { 1137 struct netcp_tx_pipe *tx_pipe = NULL; 1138 struct netcp_hook_list *tx_hook; 1139 struct netcp_packet p_info; 1140 unsigned int dma_sz; 1141 dma_addr_t dma; 1142 u32 tmp = 0; 1143 int ret = 0; 1144 1145 p_info.netcp = netcp; 1146 p_info.skb = skb; 1147 p_info.tx_pipe = NULL; 1148 p_info.psdata_len = 0; 1149 p_info.ts_context = NULL; 1150 p_info.txtstamp_complete = NULL; 1151 p_info.epib = desc->epib; 1152 p_info.psdata = (u32 __force *)desc->psdata; 1153 memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(__le32)); 1154 1155 /* Find out where to inject the packet for transmission */ 1156 list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) { 1157 ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data, 1158 &p_info); 1159 if (unlikely(ret != 0)) { 1160 dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n", 1161 tx_hook->order, ret); 1162 ret = (ret < 0) ? ret : NETDEV_TX_OK; 1163 goto out; 1164 } 1165 } 1166 1167 /* Make sure some TX hook claimed the packet */ 1168 tx_pipe = p_info.tx_pipe; 1169 if (!tx_pipe) { 1170 dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n"); 1171 ret = -ENXIO; 1172 goto out; 1173 } 1174 1175 /* update descriptor */ 1176 if (p_info.psdata_len) { 1177 /* psdata points to both native-endian and device-endian data */ 1178 __le32 *psdata = (void __force *)p_info.psdata; 1179 1180 memmove(p_info.psdata, p_info.psdata + p_info.psdata_len, 1181 p_info.psdata_len); 1182 set_words(p_info.psdata, p_info.psdata_len, psdata); 1183 tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) << 1184 KNAV_DMA_DESC_PSLEN_SHIFT; 1185 } 1186 1187 tmp |= KNAV_DMA_DESC_HAS_EPIB | 1188 ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) << 1189 KNAV_DMA_DESC_RETQ_SHIFT); 1190 1191 if (!(tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO)) { 1192 tmp |= ((tx_pipe->switch_to_port & KNAV_DMA_DESC_PSFLAG_MASK) << 1193 KNAV_DMA_DESC_PSFLAG_SHIFT); 1194 } 1195 1196 set_words(&tmp, 1, &desc->packet_info); 1197 tmp = lower_32_bits((uintptr_t)&skb); 1198 set_words(&tmp, 1, &desc->pad[0]); 1199 tmp = upper_32_bits((uintptr_t)&skb); 1200 set_words(&tmp, 1, &desc->pad[1]); 1201 1202 if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { 1203 tmp = tx_pipe->switch_to_port; 1204 set_words(&tmp, 1, &desc->tag_info); 1205 } 1206 1207 /* submit packet descriptor */ 1208 ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma, 1209 &dma_sz); 1210 if (unlikely(ret)) { 1211 dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__); 1212 ret = -ENOMEM; 1213 goto out; 1214 } 1215 skb_tx_timestamp(skb); 1216 knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0); 1217 1218 out: 1219 return ret; 1220 } 1221 1222 /* Submit the packet */ 1223 static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1224 { 1225 struct netcp_intf *netcp = netdev_priv(ndev); 1226 int subqueue = skb_get_queue_mapping(skb); 1227 struct knav_dma_desc *desc; 1228 int desc_count, ret = 0; 1229 1230 if (unlikely(skb->len <= 0)) { 1231 dev_kfree_skb(skb); 1232 return NETDEV_TX_OK; 1233 } 1234 1235 if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) { 1236 ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE); 1237 if (ret < 0) { 1238 /* If we get here, the skb has already been dropped */ 1239 dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n", 1240 ret); 1241 ndev->stats.tx_dropped++; 1242 return ret; 1243 } 1244 skb->len = NETCP_MIN_PACKET_SIZE; 1245 } 1246 1247 desc = netcp_tx_map_skb(skb, netcp); 1248 if (unlikely(!desc)) { 1249 netif_stop_subqueue(ndev, subqueue); 1250 ret = -ENOBUFS; 1251 goto drop; 1252 } 1253 1254 ret = netcp_tx_submit_skb(netcp, skb, desc); 1255 if (ret) 1256 goto drop; 1257 1258 ndev->trans_start = jiffies; 1259 1260 /* Check Tx pool count & stop subqueue if needed */ 1261 desc_count = knav_pool_count(netcp->tx_pool); 1262 if (desc_count < netcp->tx_pause_threshold) { 1263 dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count); 1264 netif_stop_subqueue(ndev, subqueue); 1265 } 1266 return NETDEV_TX_OK; 1267 1268 drop: 1269 ndev->stats.tx_dropped++; 1270 if (desc) 1271 netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc)); 1272 dev_kfree_skb(skb); 1273 return ret; 1274 } 1275 1276 int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe) 1277 { 1278 if (tx_pipe->dma_channel) { 1279 knav_dma_close_channel(tx_pipe->dma_channel); 1280 tx_pipe->dma_channel = NULL; 1281 } 1282 return 0; 1283 } 1284 EXPORT_SYMBOL_GPL(netcp_txpipe_close); 1285 1286 int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe) 1287 { 1288 struct device *dev = tx_pipe->netcp_device->device; 1289 struct knav_dma_cfg config; 1290 int ret = 0; 1291 u8 name[16]; 1292 1293 memset(&config, 0, sizeof(config)); 1294 config.direction = DMA_MEM_TO_DEV; 1295 config.u.tx.filt_einfo = false; 1296 config.u.tx.filt_pswords = false; 1297 config.u.tx.priority = DMA_PRIO_MED_L; 1298 1299 tx_pipe->dma_channel = knav_dma_open_channel(dev, 1300 tx_pipe->dma_chan_name, &config); 1301 if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) { 1302 dev_err(dev, "failed opening tx chan(%s)\n", 1303 tx_pipe->dma_chan_name); 1304 goto err; 1305 } 1306 1307 snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev)); 1308 tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id, 1309 KNAV_QUEUE_SHARED); 1310 if (IS_ERR(tx_pipe->dma_queue)) { 1311 dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n", 1312 name, ret); 1313 ret = PTR_ERR(tx_pipe->dma_queue); 1314 goto err; 1315 } 1316 1317 dev_dbg(dev, "opened tx pipe %s\n", name); 1318 return 0; 1319 1320 err: 1321 if (!IS_ERR_OR_NULL(tx_pipe->dma_channel)) 1322 knav_dma_close_channel(tx_pipe->dma_channel); 1323 tx_pipe->dma_channel = NULL; 1324 return ret; 1325 } 1326 EXPORT_SYMBOL_GPL(netcp_txpipe_open); 1327 1328 int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe, 1329 struct netcp_device *netcp_device, 1330 const char *dma_chan_name, unsigned int dma_queue_id) 1331 { 1332 memset(tx_pipe, 0, sizeof(*tx_pipe)); 1333 tx_pipe->netcp_device = netcp_device; 1334 tx_pipe->dma_chan_name = dma_chan_name; 1335 tx_pipe->dma_queue_id = dma_queue_id; 1336 return 0; 1337 } 1338 EXPORT_SYMBOL_GPL(netcp_txpipe_init); 1339 1340 static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp, 1341 const u8 *addr, 1342 enum netcp_addr_type type) 1343 { 1344 struct netcp_addr *naddr; 1345 1346 list_for_each_entry(naddr, &netcp->addr_list, node) { 1347 if (naddr->type != type) 1348 continue; 1349 if (addr && memcmp(addr, naddr->addr, ETH_ALEN)) 1350 continue; 1351 return naddr; 1352 } 1353 1354 return NULL; 1355 } 1356 1357 static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp, 1358 const u8 *addr, 1359 enum netcp_addr_type type) 1360 { 1361 struct netcp_addr *naddr; 1362 1363 naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC); 1364 if (!naddr) 1365 return NULL; 1366 1367 naddr->type = type; 1368 naddr->flags = 0; 1369 naddr->netcp = netcp; 1370 if (addr) 1371 ether_addr_copy(naddr->addr, addr); 1372 else 1373 eth_zero_addr(naddr->addr); 1374 list_add_tail(&naddr->node, &netcp->addr_list); 1375 1376 return naddr; 1377 } 1378 1379 static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr) 1380 { 1381 list_del(&naddr->node); 1382 devm_kfree(netcp->dev, naddr); 1383 } 1384 1385 static void netcp_addr_clear_mark(struct netcp_intf *netcp) 1386 { 1387 struct netcp_addr *naddr; 1388 1389 list_for_each_entry(naddr, &netcp->addr_list, node) 1390 naddr->flags = 0; 1391 } 1392 1393 static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr, 1394 enum netcp_addr_type type) 1395 { 1396 struct netcp_addr *naddr; 1397 1398 naddr = netcp_addr_find(netcp, addr, type); 1399 if (naddr) { 1400 naddr->flags |= ADDR_VALID; 1401 return; 1402 } 1403 1404 naddr = netcp_addr_add(netcp, addr, type); 1405 if (!WARN_ON(!naddr)) 1406 naddr->flags |= ADDR_NEW; 1407 } 1408 1409 static void netcp_addr_sweep_del(struct netcp_intf *netcp) 1410 { 1411 struct netcp_addr *naddr, *tmp; 1412 struct netcp_intf_modpriv *priv; 1413 struct netcp_module *module; 1414 int error; 1415 1416 list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) { 1417 if (naddr->flags & (ADDR_VALID | ADDR_NEW)) 1418 continue; 1419 dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n", 1420 naddr->addr, naddr->type); 1421 for_each_module(netcp, priv) { 1422 module = priv->netcp_module; 1423 if (!module->del_addr) 1424 continue; 1425 error = module->del_addr(priv->module_priv, 1426 naddr); 1427 WARN_ON(error); 1428 } 1429 netcp_addr_del(netcp, naddr); 1430 } 1431 } 1432 1433 static void netcp_addr_sweep_add(struct netcp_intf *netcp) 1434 { 1435 struct netcp_addr *naddr, *tmp; 1436 struct netcp_intf_modpriv *priv; 1437 struct netcp_module *module; 1438 int error; 1439 1440 list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) { 1441 if (!(naddr->flags & ADDR_NEW)) 1442 continue; 1443 dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n", 1444 naddr->addr, naddr->type); 1445 1446 for_each_module(netcp, priv) { 1447 module = priv->netcp_module; 1448 if (!module->add_addr) 1449 continue; 1450 error = module->add_addr(priv->module_priv, naddr); 1451 WARN_ON(error); 1452 } 1453 } 1454 } 1455 1456 static void netcp_set_rx_mode(struct net_device *ndev) 1457 { 1458 struct netcp_intf *netcp = netdev_priv(ndev); 1459 struct netdev_hw_addr *ndev_addr; 1460 bool promisc; 1461 1462 promisc = (ndev->flags & IFF_PROMISC || 1463 ndev->flags & IFF_ALLMULTI || 1464 netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR); 1465 1466 spin_lock(&netcp->lock); 1467 /* first clear all marks */ 1468 netcp_addr_clear_mark(netcp); 1469 1470 /* next add new entries, mark existing ones */ 1471 netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST); 1472 for_each_dev_addr(ndev, ndev_addr) 1473 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV); 1474 netdev_for_each_uc_addr(ndev_addr, ndev) 1475 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST); 1476 netdev_for_each_mc_addr(ndev_addr, ndev) 1477 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST); 1478 1479 if (promisc) 1480 netcp_addr_add_mark(netcp, NULL, ADDR_ANY); 1481 1482 /* finally sweep and callout into modules */ 1483 netcp_addr_sweep_del(netcp); 1484 netcp_addr_sweep_add(netcp); 1485 spin_unlock(&netcp->lock); 1486 } 1487 1488 static void netcp_free_navigator_resources(struct netcp_intf *netcp) 1489 { 1490 int i; 1491 1492 if (netcp->rx_channel) { 1493 knav_dma_close_channel(netcp->rx_channel); 1494 netcp->rx_channel = NULL; 1495 } 1496 1497 if (!IS_ERR_OR_NULL(netcp->rx_pool)) 1498 netcp_rxpool_free(netcp); 1499 1500 if (!IS_ERR_OR_NULL(netcp->rx_queue)) { 1501 knav_queue_close(netcp->rx_queue); 1502 netcp->rx_queue = NULL; 1503 } 1504 1505 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && 1506 !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) { 1507 knav_queue_close(netcp->rx_fdq[i]); 1508 netcp->rx_fdq[i] = NULL; 1509 } 1510 1511 if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) { 1512 knav_queue_close(netcp->tx_compl_q); 1513 netcp->tx_compl_q = NULL; 1514 } 1515 1516 if (!IS_ERR_OR_NULL(netcp->tx_pool)) { 1517 knav_pool_destroy(netcp->tx_pool); 1518 netcp->tx_pool = NULL; 1519 } 1520 } 1521 1522 static int netcp_setup_navigator_resources(struct net_device *ndev) 1523 { 1524 struct netcp_intf *netcp = netdev_priv(ndev); 1525 struct knav_queue_notify_config notify_cfg; 1526 struct knav_dma_cfg config; 1527 u32 last_fdq = 0; 1528 u8 name[16]; 1529 int ret; 1530 int i; 1531 1532 /* Create Rx/Tx descriptor pools */ 1533 snprintf(name, sizeof(name), "rx-pool-%s", ndev->name); 1534 netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size, 1535 netcp->rx_pool_region_id); 1536 if (IS_ERR_OR_NULL(netcp->rx_pool)) { 1537 dev_err(netcp->ndev_dev, "Couldn't create rx pool\n"); 1538 ret = PTR_ERR(netcp->rx_pool); 1539 goto fail; 1540 } 1541 1542 snprintf(name, sizeof(name), "tx-pool-%s", ndev->name); 1543 netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size, 1544 netcp->tx_pool_region_id); 1545 if (IS_ERR_OR_NULL(netcp->tx_pool)) { 1546 dev_err(netcp->ndev_dev, "Couldn't create tx pool\n"); 1547 ret = PTR_ERR(netcp->tx_pool); 1548 goto fail; 1549 } 1550 1551 /* open Tx completion queue */ 1552 snprintf(name, sizeof(name), "tx-compl-%s", ndev->name); 1553 netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0); 1554 if (IS_ERR_OR_NULL(netcp->tx_compl_q)) { 1555 ret = PTR_ERR(netcp->tx_compl_q); 1556 goto fail; 1557 } 1558 netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q); 1559 1560 /* Set notification for Tx completion */ 1561 notify_cfg.fn = netcp_tx_notify; 1562 notify_cfg.fn_arg = netcp; 1563 ret = knav_queue_device_control(netcp->tx_compl_q, 1564 KNAV_QUEUE_SET_NOTIFIER, 1565 (unsigned long)¬ify_cfg); 1566 if (ret) 1567 goto fail; 1568 1569 knav_queue_disable_notify(netcp->tx_compl_q); 1570 1571 /* open Rx completion queue */ 1572 snprintf(name, sizeof(name), "rx-compl-%s", ndev->name); 1573 netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0); 1574 if (IS_ERR_OR_NULL(netcp->rx_queue)) { 1575 ret = PTR_ERR(netcp->rx_queue); 1576 goto fail; 1577 } 1578 netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue); 1579 1580 /* Set notification for Rx completion */ 1581 notify_cfg.fn = netcp_rx_notify; 1582 notify_cfg.fn_arg = netcp; 1583 ret = knav_queue_device_control(netcp->rx_queue, 1584 KNAV_QUEUE_SET_NOTIFIER, 1585 (unsigned long)¬ify_cfg); 1586 if (ret) 1587 goto fail; 1588 1589 knav_queue_disable_notify(netcp->rx_queue); 1590 1591 /* open Rx FDQs */ 1592 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i]; 1593 ++i) { 1594 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i); 1595 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0); 1596 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) { 1597 ret = PTR_ERR(netcp->rx_fdq[i]); 1598 goto fail; 1599 } 1600 } 1601 1602 memset(&config, 0, sizeof(config)); 1603 config.direction = DMA_DEV_TO_MEM; 1604 config.u.rx.einfo_present = true; 1605 config.u.rx.psinfo_present = true; 1606 config.u.rx.err_mode = DMA_DROP; 1607 config.u.rx.desc_type = DMA_DESC_HOST; 1608 config.u.rx.psinfo_at_sop = false; 1609 config.u.rx.sop_offset = NETCP_SOP_OFFSET; 1610 config.u.rx.dst_q = netcp->rx_queue_id; 1611 config.u.rx.thresh = DMA_THRESH_NONE; 1612 1613 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) { 1614 if (netcp->rx_fdq[i]) 1615 last_fdq = knav_queue_get_id(netcp->rx_fdq[i]); 1616 config.u.rx.fdq[i] = last_fdq; 1617 } 1618 1619 netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device, 1620 netcp->dma_chan_name, &config); 1621 if (IS_ERR_OR_NULL(netcp->rx_channel)) { 1622 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n", 1623 netcp->dma_chan_name); 1624 goto fail; 1625 } 1626 1627 dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel); 1628 return 0; 1629 1630 fail: 1631 netcp_free_navigator_resources(netcp); 1632 return ret; 1633 } 1634 1635 /* Open the device */ 1636 static int netcp_ndo_open(struct net_device *ndev) 1637 { 1638 struct netcp_intf *netcp = netdev_priv(ndev); 1639 struct netcp_intf_modpriv *intf_modpriv; 1640 struct netcp_module *module; 1641 int ret; 1642 1643 netif_carrier_off(ndev); 1644 ret = netcp_setup_navigator_resources(ndev); 1645 if (ret) { 1646 dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n"); 1647 goto fail; 1648 } 1649 1650 for_each_module(netcp, intf_modpriv) { 1651 module = intf_modpriv->netcp_module; 1652 if (module->open) { 1653 ret = module->open(intf_modpriv->module_priv, ndev); 1654 if (ret != 0) { 1655 dev_err(netcp->ndev_dev, "module open failed\n"); 1656 goto fail_open; 1657 } 1658 } 1659 } 1660 1661 napi_enable(&netcp->rx_napi); 1662 napi_enable(&netcp->tx_napi); 1663 knav_queue_enable_notify(netcp->tx_compl_q); 1664 knav_queue_enable_notify(netcp->rx_queue); 1665 netcp_rxpool_refill(netcp); 1666 netif_tx_wake_all_queues(ndev); 1667 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); 1668 return 0; 1669 1670 fail_open: 1671 for_each_module(netcp, intf_modpriv) { 1672 module = intf_modpriv->netcp_module; 1673 if (module->close) 1674 module->close(intf_modpriv->module_priv, ndev); 1675 } 1676 1677 fail: 1678 netcp_free_navigator_resources(netcp); 1679 return ret; 1680 } 1681 1682 /* Close the device */ 1683 static int netcp_ndo_stop(struct net_device *ndev) 1684 { 1685 struct netcp_intf *netcp = netdev_priv(ndev); 1686 struct netcp_intf_modpriv *intf_modpriv; 1687 struct netcp_module *module; 1688 int err = 0; 1689 1690 netif_tx_stop_all_queues(ndev); 1691 netif_carrier_off(ndev); 1692 netcp_addr_clear_mark(netcp); 1693 netcp_addr_sweep_del(netcp); 1694 knav_queue_disable_notify(netcp->rx_queue); 1695 knav_queue_disable_notify(netcp->tx_compl_q); 1696 napi_disable(&netcp->rx_napi); 1697 napi_disable(&netcp->tx_napi); 1698 1699 for_each_module(netcp, intf_modpriv) { 1700 module = intf_modpriv->netcp_module; 1701 if (module->close) { 1702 err = module->close(intf_modpriv->module_priv, ndev); 1703 if (err != 0) 1704 dev_err(netcp->ndev_dev, "Close failed\n"); 1705 } 1706 } 1707 1708 /* Recycle Rx descriptors from completion queue */ 1709 netcp_empty_rx_queue(netcp); 1710 1711 /* Recycle Tx descriptors from completion queue */ 1712 netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size); 1713 1714 if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size) 1715 dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n", 1716 netcp->tx_pool_size - knav_pool_count(netcp->tx_pool)); 1717 1718 netcp_free_navigator_resources(netcp); 1719 dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name); 1720 return 0; 1721 } 1722 1723 static int netcp_ndo_ioctl(struct net_device *ndev, 1724 struct ifreq *req, int cmd) 1725 { 1726 struct netcp_intf *netcp = netdev_priv(ndev); 1727 struct netcp_intf_modpriv *intf_modpriv; 1728 struct netcp_module *module; 1729 int ret = -1, err = -EOPNOTSUPP; 1730 1731 if (!netif_running(ndev)) 1732 return -EINVAL; 1733 1734 for_each_module(netcp, intf_modpriv) { 1735 module = intf_modpriv->netcp_module; 1736 if (!module->ioctl) 1737 continue; 1738 1739 err = module->ioctl(intf_modpriv->module_priv, req, cmd); 1740 if ((err < 0) && (err != -EOPNOTSUPP)) { 1741 ret = err; 1742 goto out; 1743 } 1744 if (err == 0) 1745 ret = err; 1746 } 1747 1748 out: 1749 return (ret == 0) ? 0 : err; 1750 } 1751 1752 static int netcp_ndo_change_mtu(struct net_device *ndev, int new_mtu) 1753 { 1754 struct netcp_intf *netcp = netdev_priv(ndev); 1755 1756 /* MTU < 68 is an error for IPv4 traffic */ 1757 if ((new_mtu < 68) || 1758 (new_mtu > (NETCP_MAX_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN))) { 1759 dev_err(netcp->ndev_dev, "Invalid mtu size = %d\n", new_mtu); 1760 return -EINVAL; 1761 } 1762 1763 ndev->mtu = new_mtu; 1764 return 0; 1765 } 1766 1767 static void netcp_ndo_tx_timeout(struct net_device *ndev) 1768 { 1769 struct netcp_intf *netcp = netdev_priv(ndev); 1770 unsigned int descs = knav_pool_count(netcp->tx_pool); 1771 1772 dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs); 1773 netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size); 1774 ndev->trans_start = jiffies; 1775 netif_tx_wake_all_queues(ndev); 1776 } 1777 1778 static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) 1779 { 1780 struct netcp_intf *netcp = netdev_priv(ndev); 1781 struct netcp_intf_modpriv *intf_modpriv; 1782 struct netcp_module *module; 1783 unsigned long flags; 1784 int err = 0; 1785 1786 dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid); 1787 1788 spin_lock_irqsave(&netcp->lock, flags); 1789 for_each_module(netcp, intf_modpriv) { 1790 module = intf_modpriv->netcp_module; 1791 if ((module->add_vid) && (vid != 0)) { 1792 err = module->add_vid(intf_modpriv->module_priv, vid); 1793 if (err != 0) { 1794 dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n", 1795 vid); 1796 break; 1797 } 1798 } 1799 } 1800 spin_unlock_irqrestore(&netcp->lock, flags); 1801 1802 return err; 1803 } 1804 1805 static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) 1806 { 1807 struct netcp_intf *netcp = netdev_priv(ndev); 1808 struct netcp_intf_modpriv *intf_modpriv; 1809 struct netcp_module *module; 1810 unsigned long flags; 1811 int err = 0; 1812 1813 dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid); 1814 1815 spin_lock_irqsave(&netcp->lock, flags); 1816 for_each_module(netcp, intf_modpriv) { 1817 module = intf_modpriv->netcp_module; 1818 if (module->del_vid) { 1819 err = module->del_vid(intf_modpriv->module_priv, vid); 1820 if (err != 0) { 1821 dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n", 1822 vid); 1823 break; 1824 } 1825 } 1826 } 1827 spin_unlock_irqrestore(&netcp->lock, flags); 1828 return err; 1829 } 1830 1831 static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, 1832 void *accel_priv, 1833 select_queue_fallback_t fallback) 1834 { 1835 return 0; 1836 } 1837 1838 static int netcp_setup_tc(struct net_device *dev, u8 num_tc) 1839 { 1840 int i; 1841 1842 /* setup tc must be called under rtnl lock */ 1843 ASSERT_RTNL(); 1844 1845 /* Sanity-check the number of traffic classes requested */ 1846 if ((dev->real_num_tx_queues <= 1) || 1847 (dev->real_num_tx_queues < num_tc)) 1848 return -EINVAL; 1849 1850 /* Configure traffic class to queue mappings */ 1851 if (num_tc) { 1852 netdev_set_num_tc(dev, num_tc); 1853 for (i = 0; i < num_tc; i++) 1854 netdev_set_tc_queue(dev, i, 1, i); 1855 } else { 1856 netdev_reset_tc(dev); 1857 } 1858 1859 return 0; 1860 } 1861 1862 static const struct net_device_ops netcp_netdev_ops = { 1863 .ndo_open = netcp_ndo_open, 1864 .ndo_stop = netcp_ndo_stop, 1865 .ndo_start_xmit = netcp_ndo_start_xmit, 1866 .ndo_set_rx_mode = netcp_set_rx_mode, 1867 .ndo_do_ioctl = netcp_ndo_ioctl, 1868 .ndo_change_mtu = netcp_ndo_change_mtu, 1869 .ndo_set_mac_address = eth_mac_addr, 1870 .ndo_validate_addr = eth_validate_addr, 1871 .ndo_vlan_rx_add_vid = netcp_rx_add_vid, 1872 .ndo_vlan_rx_kill_vid = netcp_rx_kill_vid, 1873 .ndo_tx_timeout = netcp_ndo_tx_timeout, 1874 .ndo_select_queue = netcp_select_queue, 1875 .ndo_setup_tc = netcp_setup_tc, 1876 }; 1877 1878 static int netcp_create_interface(struct netcp_device *netcp_device, 1879 struct device_node *node_interface) 1880 { 1881 struct device *dev = netcp_device->device; 1882 struct device_node *node = dev->of_node; 1883 struct netcp_intf *netcp; 1884 struct net_device *ndev; 1885 resource_size_t size; 1886 struct resource res; 1887 void __iomem *efuse = NULL; 1888 u32 efuse_mac = 0; 1889 const void *mac_addr; 1890 u8 efuse_mac_addr[6]; 1891 u32 temp[2]; 1892 int ret = 0; 1893 1894 ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1); 1895 if (!ndev) { 1896 dev_err(dev, "Error allocating netdev\n"); 1897 return -ENOMEM; 1898 } 1899 1900 ndev->features |= NETIF_F_SG; 1901 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1902 ndev->hw_features = ndev->features; 1903 ndev->vlan_features |= NETIF_F_SG; 1904 1905 netcp = netdev_priv(ndev); 1906 spin_lock_init(&netcp->lock); 1907 INIT_LIST_HEAD(&netcp->module_head); 1908 INIT_LIST_HEAD(&netcp->txhook_list_head); 1909 INIT_LIST_HEAD(&netcp->rxhook_list_head); 1910 INIT_LIST_HEAD(&netcp->addr_list); 1911 netcp->netcp_device = netcp_device; 1912 netcp->dev = netcp_device->device; 1913 netcp->ndev = ndev; 1914 netcp->ndev_dev = &ndev->dev; 1915 netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG); 1916 netcp->tx_pause_threshold = MAX_SKB_FRAGS; 1917 netcp->tx_resume_threshold = netcp->tx_pause_threshold; 1918 netcp->node_interface = node_interface; 1919 1920 ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac); 1921 if (efuse_mac) { 1922 if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) { 1923 dev_err(dev, "could not find efuse-mac reg resource\n"); 1924 ret = -ENODEV; 1925 goto quit; 1926 } 1927 size = resource_size(&res); 1928 1929 if (!devm_request_mem_region(dev, res.start, size, 1930 dev_name(dev))) { 1931 dev_err(dev, "could not reserve resource\n"); 1932 ret = -ENOMEM; 1933 goto quit; 1934 } 1935 1936 efuse = devm_ioremap_nocache(dev, res.start, size); 1937 if (!efuse) { 1938 dev_err(dev, "could not map resource\n"); 1939 devm_release_mem_region(dev, res.start, size); 1940 ret = -ENOMEM; 1941 goto quit; 1942 } 1943 1944 emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac); 1945 if (is_valid_ether_addr(efuse_mac_addr)) 1946 ether_addr_copy(ndev->dev_addr, efuse_mac_addr); 1947 else 1948 random_ether_addr(ndev->dev_addr); 1949 1950 devm_iounmap(dev, efuse); 1951 devm_release_mem_region(dev, res.start, size); 1952 } else { 1953 mac_addr = of_get_mac_address(node_interface); 1954 if (mac_addr) 1955 ether_addr_copy(ndev->dev_addr, mac_addr); 1956 else 1957 random_ether_addr(ndev->dev_addr); 1958 } 1959 1960 ret = of_property_read_string(node_interface, "rx-channel", 1961 &netcp->dma_chan_name); 1962 if (ret < 0) { 1963 dev_err(dev, "missing \"rx-channel\" parameter\n"); 1964 ret = -ENODEV; 1965 goto quit; 1966 } 1967 1968 ret = of_property_read_u32(node_interface, "rx-queue", 1969 &netcp->rx_queue_id); 1970 if (ret < 0) { 1971 dev_warn(dev, "missing \"rx-queue\" parameter\n"); 1972 netcp->rx_queue_id = KNAV_QUEUE_QPEND; 1973 } 1974 1975 ret = of_property_read_u32_array(node_interface, "rx-queue-depth", 1976 netcp->rx_queue_depths, 1977 KNAV_DMA_FDQ_PER_CHAN); 1978 if (ret < 0) { 1979 dev_err(dev, "missing \"rx-queue-depth\" parameter\n"); 1980 netcp->rx_queue_depths[0] = 128; 1981 } 1982 1983 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2); 1984 if (ret < 0) { 1985 dev_err(dev, "missing \"rx-pool\" parameter\n"); 1986 ret = -ENODEV; 1987 goto quit; 1988 } 1989 netcp->rx_pool_size = temp[0]; 1990 netcp->rx_pool_region_id = temp[1]; 1991 1992 ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2); 1993 if (ret < 0) { 1994 dev_err(dev, "missing \"tx-pool\" parameter\n"); 1995 ret = -ENODEV; 1996 goto quit; 1997 } 1998 netcp->tx_pool_size = temp[0]; 1999 netcp->tx_pool_region_id = temp[1]; 2000 2001 if (netcp->tx_pool_size < MAX_SKB_FRAGS) { 2002 dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n", 2003 MAX_SKB_FRAGS); 2004 ret = -ENODEV; 2005 goto quit; 2006 } 2007 2008 ret = of_property_read_u32(node_interface, "tx-completion-queue", 2009 &netcp->tx_compl_qid); 2010 if (ret < 0) { 2011 dev_warn(dev, "missing \"tx-completion-queue\" parameter\n"); 2012 netcp->tx_compl_qid = KNAV_QUEUE_QPEND; 2013 } 2014 2015 /* NAPI register */ 2016 netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT); 2017 netif_tx_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT); 2018 2019 /* Register the network device */ 2020 ndev->dev_id = 0; 2021 ndev->watchdog_timeo = NETCP_TX_TIMEOUT; 2022 ndev->netdev_ops = &netcp_netdev_ops; 2023 SET_NETDEV_DEV(ndev, dev); 2024 2025 list_add_tail(&netcp->interface_list, &netcp_device->interface_head); 2026 return 0; 2027 2028 quit: 2029 free_netdev(ndev); 2030 return ret; 2031 } 2032 2033 static void netcp_delete_interface(struct netcp_device *netcp_device, 2034 struct net_device *ndev) 2035 { 2036 struct netcp_intf_modpriv *intf_modpriv, *tmp; 2037 struct netcp_intf *netcp = netdev_priv(ndev); 2038 struct netcp_module *module; 2039 2040 dev_dbg(netcp_device->device, "Removing interface \"%s\"\n", 2041 ndev->name); 2042 2043 /* Notify each of the modules that the interface is going away */ 2044 list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head, 2045 intf_list) { 2046 module = intf_modpriv->netcp_module; 2047 dev_dbg(netcp_device->device, "Releasing module \"%s\"\n", 2048 module->name); 2049 if (module->release) 2050 module->release(intf_modpriv->module_priv); 2051 list_del(&intf_modpriv->intf_list); 2052 kfree(intf_modpriv); 2053 } 2054 WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n", 2055 ndev->name); 2056 2057 list_del(&netcp->interface_list); 2058 2059 of_node_put(netcp->node_interface); 2060 unregister_netdev(ndev); 2061 netif_napi_del(&netcp->rx_napi); 2062 free_netdev(ndev); 2063 } 2064 2065 static int netcp_probe(struct platform_device *pdev) 2066 { 2067 struct device_node *node = pdev->dev.of_node; 2068 struct netcp_intf *netcp_intf, *netcp_tmp; 2069 struct device_node *child, *interfaces; 2070 struct netcp_device *netcp_device; 2071 struct device *dev = &pdev->dev; 2072 int ret; 2073 2074 if (!node) { 2075 dev_err(dev, "could not find device info\n"); 2076 return -ENODEV; 2077 } 2078 2079 /* Allocate a new NETCP device instance */ 2080 netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL); 2081 if (!netcp_device) 2082 return -ENOMEM; 2083 2084 pm_runtime_enable(&pdev->dev); 2085 ret = pm_runtime_get_sync(&pdev->dev); 2086 if (ret < 0) { 2087 dev_err(dev, "Failed to enable NETCP power-domain\n"); 2088 pm_runtime_disable(&pdev->dev); 2089 return ret; 2090 } 2091 2092 /* Initialize the NETCP device instance */ 2093 INIT_LIST_HEAD(&netcp_device->interface_head); 2094 INIT_LIST_HEAD(&netcp_device->modpriv_head); 2095 netcp_device->device = dev; 2096 platform_set_drvdata(pdev, netcp_device); 2097 2098 /* create interfaces */ 2099 interfaces = of_get_child_by_name(node, "netcp-interfaces"); 2100 if (!interfaces) { 2101 dev_err(dev, "could not find netcp-interfaces node\n"); 2102 ret = -ENODEV; 2103 goto probe_quit; 2104 } 2105 2106 for_each_available_child_of_node(interfaces, child) { 2107 ret = netcp_create_interface(netcp_device, child); 2108 if (ret) { 2109 dev_err(dev, "could not create interface(%s)\n", 2110 child->name); 2111 goto probe_quit_interface; 2112 } 2113 } 2114 2115 /* Add the device instance to the list */ 2116 list_add_tail(&netcp_device->device_list, &netcp_devices); 2117 2118 return 0; 2119 2120 probe_quit_interface: 2121 list_for_each_entry_safe(netcp_intf, netcp_tmp, 2122 &netcp_device->interface_head, 2123 interface_list) { 2124 netcp_delete_interface(netcp_device, netcp_intf->ndev); 2125 } 2126 2127 probe_quit: 2128 pm_runtime_put_sync(&pdev->dev); 2129 pm_runtime_disable(&pdev->dev); 2130 platform_set_drvdata(pdev, NULL); 2131 return ret; 2132 } 2133 2134 static int netcp_remove(struct platform_device *pdev) 2135 { 2136 struct netcp_device *netcp_device = platform_get_drvdata(pdev); 2137 struct netcp_intf *netcp_intf, *netcp_tmp; 2138 struct netcp_inst_modpriv *inst_modpriv, *tmp; 2139 struct netcp_module *module; 2140 2141 list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head, 2142 inst_list) { 2143 module = inst_modpriv->netcp_module; 2144 dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name); 2145 module->remove(netcp_device, inst_modpriv->module_priv); 2146 list_del(&inst_modpriv->inst_list); 2147 kfree(inst_modpriv); 2148 } 2149 2150 /* now that all modules are removed, clean up the interfaces */ 2151 list_for_each_entry_safe(netcp_intf, netcp_tmp, 2152 &netcp_device->interface_head, 2153 interface_list) { 2154 netcp_delete_interface(netcp_device, netcp_intf->ndev); 2155 } 2156 2157 WARN(!list_empty(&netcp_device->interface_head), 2158 "%s interface list not empty!\n", pdev->name); 2159 2160 pm_runtime_put_sync(&pdev->dev); 2161 pm_runtime_disable(&pdev->dev); 2162 platform_set_drvdata(pdev, NULL); 2163 return 0; 2164 } 2165 2166 static const struct of_device_id of_match[] = { 2167 { .compatible = "ti,netcp-1.0", }, 2168 {}, 2169 }; 2170 MODULE_DEVICE_TABLE(of, of_match); 2171 2172 static struct platform_driver netcp_driver = { 2173 .driver = { 2174 .name = "netcp-1.0", 2175 .of_match_table = of_match, 2176 }, 2177 .probe = netcp_probe, 2178 .remove = netcp_remove, 2179 }; 2180 module_platform_driver(netcp_driver); 2181 2182 MODULE_LICENSE("GPL v2"); 2183 MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs"); 2184 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com"); 2185