1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Keystone NetCP Core driver 4 * 5 * Copyright (C) 2014 Texas Instruments Incorporated 6 * Authors: Sandeep Nair <sandeep_n@ti.com> 7 * Sandeep Paulraj <s-paulraj@ti.com> 8 * Cyril Chemparathy <cyril@ti.com> 9 * Santosh Shilimkar <santosh.shilimkar@ti.com> 10 * Murali Karicheri <m-karicheri2@ti.com> 11 * Wingman Kwok <w-kwok2@ti.com> 12 */ 13 14 #include <linux/io.h> 15 #include <linux/module.h> 16 #include <linux/of_net.h> 17 #include <linux/of_address.h> 18 #include <linux/if_vlan.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/platform_device.h> 21 #include <linux/soc/ti/knav_qmss.h> 22 #include <linux/soc/ti/knav_dma.h> 23 24 #include "netcp.h" 25 26 #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) 27 #define NETCP_NAPI_WEIGHT 64 28 #define NETCP_TX_TIMEOUT (5 * HZ) 29 #define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN) 30 #define NETCP_MIN_PACKET_SIZE ETH_ZLEN 31 #define NETCP_MAX_MCAST_ADDR 16 32 33 #define NETCP_EFUSE_REG_INDEX 0 34 35 #define NETCP_MOD_PROBE_SKIPPED 1 36 #define NETCP_MOD_PROBE_FAILED 2 37 38 #define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ 39 NETIF_MSG_DRV | NETIF_MSG_LINK | \ 40 NETIF_MSG_IFUP | NETIF_MSG_INTR | \ 41 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \ 42 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \ 43 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \ 44 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ 45 NETIF_MSG_RX_STATUS) 46 47 #define NETCP_EFUSE_ADDR_SWAP 2 48 49 #define knav_queue_get_id(q) knav_queue_device_control(q, \ 50 KNAV_QUEUE_GET_ID, (unsigned long)NULL) 51 52 #define knav_queue_enable_notify(q) knav_queue_device_control(q, \ 53 KNAV_QUEUE_ENABLE_NOTIFY, \ 54 (unsigned long)NULL) 55 56 #define knav_queue_disable_notify(q) knav_queue_device_control(q, \ 57 KNAV_QUEUE_DISABLE_NOTIFY, \ 58 (unsigned long)NULL) 59 60 #define knav_queue_get_count(q) knav_queue_device_control(q, \ 61 KNAV_QUEUE_GET_COUNT, (unsigned long)NULL) 62 63 #define for_each_netcp_module(module) \ 64 list_for_each_entry(module, &netcp_modules, module_list) 65 66 #define for_each_netcp_device_module(netcp_device, inst_modpriv) \ 67 list_for_each_entry(inst_modpriv, \ 68 &((netcp_device)->modpriv_head), inst_list) 69 70 #define for_each_module(netcp, intf_modpriv) \ 71 list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list) 72 73 /* Module management structures */ 74 struct netcp_device { 75 struct list_head device_list; 76 struct list_head interface_head; 77 struct list_head modpriv_head; 78 struct device *device; 79 }; 80 81 struct netcp_inst_modpriv { 82 struct netcp_device *netcp_device; 83 struct netcp_module *netcp_module; 84 struct list_head inst_list; 85 void *module_priv; 86 }; 87 88 struct netcp_intf_modpriv { 89 struct netcp_intf *netcp_priv; 90 struct netcp_module *netcp_module; 91 struct list_head intf_list; 92 void *module_priv; 93 }; 94 95 struct netcp_tx_cb { 96 void *ts_context; 97 void (*txtstamp)(void *context, struct sk_buff *skb); 98 }; 99 100 static LIST_HEAD(netcp_devices); 101 static LIST_HEAD(netcp_modules); 102 static DEFINE_MUTEX(netcp_modules_lock); 103 104 static int netcp_debug_level = -1; 105 module_param(netcp_debug_level, int, 0); 106 MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)"); 107 108 /* Helper functions - Get/Set */ 109 static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc, 110 struct knav_dma_desc *desc) 111 { 112 *buff_len = le32_to_cpu(desc->buff_len); 113 *buff = le32_to_cpu(desc->buff); 114 *ndesc = le32_to_cpu(desc->next_desc); 115 } 116 117 static void get_desc_info(u32 *desc_info, u32 *pkt_info, 118 struct knav_dma_desc *desc) 119 { 120 *desc_info = le32_to_cpu(desc->desc_info); 121 *pkt_info = le32_to_cpu(desc->packet_info); 122 } 123 124 static u32 get_sw_data(int index, struct knav_dma_desc *desc) 125 { 126 /* No Endian conversion needed as this data is untouched by hw */ 127 return desc->sw_data[index]; 128 } 129 130 /* use these macros to get sw data */ 131 #define GET_SW_DATA0(desc) get_sw_data(0, desc) 132 #define GET_SW_DATA1(desc) get_sw_data(1, desc) 133 #define GET_SW_DATA2(desc) get_sw_data(2, desc) 134 #define GET_SW_DATA3(desc) get_sw_data(3, desc) 135 136 static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, 137 struct knav_dma_desc *desc) 138 { 139 *buff = le32_to_cpu(desc->orig_buff); 140 *buff_len = le32_to_cpu(desc->orig_len); 141 } 142 143 static void get_words(dma_addr_t *words, int num_words, __le32 *desc) 144 { 145 int i; 146 147 for (i = 0; i < num_words; i++) 148 words[i] = le32_to_cpu(desc[i]); 149 } 150 151 static void set_pkt_info(dma_addr_t buff, u32 buff_len, u32 ndesc, 152 struct knav_dma_desc *desc) 153 { 154 desc->buff_len = cpu_to_le32(buff_len); 155 desc->buff = cpu_to_le32(buff); 156 desc->next_desc = cpu_to_le32(ndesc); 157 } 158 159 static void set_desc_info(u32 desc_info, u32 pkt_info, 160 struct knav_dma_desc *desc) 161 { 162 desc->desc_info = cpu_to_le32(desc_info); 163 desc->packet_info = cpu_to_le32(pkt_info); 164 } 165 166 static void set_sw_data(int index, u32 data, struct knav_dma_desc *desc) 167 { 168 /* No Endian conversion needed as this data is untouched by hw */ 169 desc->sw_data[index] = data; 170 } 171 172 /* use these macros to set sw data */ 173 #define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc) 174 #define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc) 175 #define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc) 176 #define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc) 177 178 static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, 179 struct knav_dma_desc *desc) 180 { 181 desc->orig_buff = cpu_to_le32(buff); 182 desc->orig_len = cpu_to_le32(buff_len); 183 } 184 185 static void set_words(u32 *words, int num_words, __le32 *desc) 186 { 187 int i; 188 189 for (i = 0; i < num_words; i++) 190 desc[i] = cpu_to_le32(words[i]); 191 } 192 193 /* Read the e-fuse value as 32 bit values to be endian independent */ 194 static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap) 195 { 196 unsigned int addr0, addr1; 197 198 addr1 = readl(efuse_mac + 4); 199 addr0 = readl(efuse_mac); 200 201 switch (swap) { 202 case NETCP_EFUSE_ADDR_SWAP: 203 addr0 = addr1; 204 addr1 = readl(efuse_mac); 205 break; 206 default: 207 break; 208 } 209 210 x[0] = (addr1 & 0x0000ff00) >> 8; 211 x[1] = addr1 & 0x000000ff; 212 x[2] = (addr0 & 0xff000000) >> 24; 213 x[3] = (addr0 & 0x00ff0000) >> 16; 214 x[4] = (addr0 & 0x0000ff00) >> 8; 215 x[5] = addr0 & 0x000000ff; 216 217 return 0; 218 } 219 220 /* Module management routines */ 221 static int netcp_register_interface(struct netcp_intf *netcp) 222 { 223 int ret; 224 225 ret = register_netdev(netcp->ndev); 226 if (!ret) 227 netcp->netdev_registered = true; 228 return ret; 229 } 230 231 static int netcp_module_probe(struct netcp_device *netcp_device, 232 struct netcp_module *module) 233 { 234 struct device *dev = netcp_device->device; 235 struct device_node *devices, *interface, *node = dev->of_node; 236 struct device_node *child; 237 struct netcp_inst_modpriv *inst_modpriv; 238 struct netcp_intf *netcp_intf; 239 struct netcp_module *tmp; 240 bool primary_module_registered = false; 241 int ret; 242 243 /* Find this module in the sub-tree for this device */ 244 devices = of_get_child_by_name(node, "netcp-devices"); 245 if (!devices) { 246 dev_err(dev, "could not find netcp-devices node\n"); 247 return NETCP_MOD_PROBE_SKIPPED; 248 } 249 250 for_each_available_child_of_node(devices, child) { 251 const char *name; 252 char node_name[32]; 253 254 if (of_property_read_string(child, "label", &name) < 0) { 255 snprintf(node_name, sizeof(node_name), "%pOFn", child); 256 name = node_name; 257 } 258 if (!strcasecmp(module->name, name)) 259 break; 260 } 261 262 of_node_put(devices); 263 /* If module not used for this device, skip it */ 264 if (!child) { 265 dev_warn(dev, "module(%s) not used for device\n", module->name); 266 return NETCP_MOD_PROBE_SKIPPED; 267 } 268 269 inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL); 270 if (!inst_modpriv) { 271 of_node_put(child); 272 return -ENOMEM; 273 } 274 275 inst_modpriv->netcp_device = netcp_device; 276 inst_modpriv->netcp_module = module; 277 list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head); 278 279 ret = module->probe(netcp_device, dev, child, 280 &inst_modpriv->module_priv); 281 of_node_put(child); 282 if (ret) { 283 dev_err(dev, "Probe of module(%s) failed with %d\n", 284 module->name, ret); 285 list_del(&inst_modpriv->inst_list); 286 devm_kfree(dev, inst_modpriv); 287 return NETCP_MOD_PROBE_FAILED; 288 } 289 290 /* Attach modules only if the primary module is probed */ 291 for_each_netcp_module(tmp) { 292 if (tmp->primary) 293 primary_module_registered = true; 294 } 295 296 if (!primary_module_registered) 297 return 0; 298 299 /* Attach module to interfaces */ 300 list_for_each_entry(netcp_intf, &netcp_device->interface_head, 301 interface_list) { 302 struct netcp_intf_modpriv *intf_modpriv; 303 304 intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv), 305 GFP_KERNEL); 306 if (!intf_modpriv) 307 return -ENOMEM; 308 309 interface = of_parse_phandle(netcp_intf->node_interface, 310 module->name, 0); 311 312 if (!interface) { 313 devm_kfree(dev, intf_modpriv); 314 continue; 315 } 316 317 intf_modpriv->netcp_priv = netcp_intf; 318 intf_modpriv->netcp_module = module; 319 list_add_tail(&intf_modpriv->intf_list, 320 &netcp_intf->module_head); 321 322 ret = module->attach(inst_modpriv->module_priv, 323 netcp_intf->ndev, interface, 324 &intf_modpriv->module_priv); 325 of_node_put(interface); 326 if (ret) { 327 dev_dbg(dev, "Attach of module %s declined with %d\n", 328 module->name, ret); 329 list_del(&intf_modpriv->intf_list); 330 devm_kfree(dev, intf_modpriv); 331 continue; 332 } 333 } 334 335 /* Now register the interface with netdev */ 336 list_for_each_entry(netcp_intf, 337 &netcp_device->interface_head, 338 interface_list) { 339 /* If interface not registered then register now */ 340 if (!netcp_intf->netdev_registered) { 341 ret = netcp_register_interface(netcp_intf); 342 if (ret) 343 return -ENODEV; 344 } 345 } 346 return 0; 347 } 348 349 int netcp_register_module(struct netcp_module *module) 350 { 351 struct netcp_device *netcp_device; 352 struct netcp_module *tmp; 353 int ret; 354 355 if (!module->name) { 356 WARN(1, "error registering netcp module: no name\n"); 357 return -EINVAL; 358 } 359 360 if (!module->probe) { 361 WARN(1, "error registering netcp module: no probe\n"); 362 return -EINVAL; 363 } 364 365 mutex_lock(&netcp_modules_lock); 366 367 for_each_netcp_module(tmp) { 368 if (!strcasecmp(tmp->name, module->name)) { 369 mutex_unlock(&netcp_modules_lock); 370 return -EEXIST; 371 } 372 } 373 list_add_tail(&module->module_list, &netcp_modules); 374 375 list_for_each_entry(netcp_device, &netcp_devices, device_list) { 376 ret = netcp_module_probe(netcp_device, module); 377 if (ret < 0) 378 goto fail; 379 } 380 mutex_unlock(&netcp_modules_lock); 381 return 0; 382 383 fail: 384 mutex_unlock(&netcp_modules_lock); 385 netcp_unregister_module(module); 386 return ret; 387 } 388 EXPORT_SYMBOL_GPL(netcp_register_module); 389 390 static void netcp_release_module(struct netcp_device *netcp_device, 391 struct netcp_module *module) 392 { 393 struct netcp_inst_modpriv *inst_modpriv, *inst_tmp; 394 struct netcp_intf *netcp_intf, *netcp_tmp; 395 struct device *dev = netcp_device->device; 396 397 /* Release the module from each interface */ 398 list_for_each_entry_safe(netcp_intf, netcp_tmp, 399 &netcp_device->interface_head, 400 interface_list) { 401 struct netcp_intf_modpriv *intf_modpriv, *intf_tmp; 402 403 list_for_each_entry_safe(intf_modpriv, intf_tmp, 404 &netcp_intf->module_head, 405 intf_list) { 406 if (intf_modpriv->netcp_module == module) { 407 module->release(intf_modpriv->module_priv); 408 list_del(&intf_modpriv->intf_list); 409 devm_kfree(dev, intf_modpriv); 410 break; 411 } 412 } 413 } 414 415 /* Remove the module from each instance */ 416 list_for_each_entry_safe(inst_modpriv, inst_tmp, 417 &netcp_device->modpriv_head, inst_list) { 418 if (inst_modpriv->netcp_module == module) { 419 module->remove(netcp_device, 420 inst_modpriv->module_priv); 421 list_del(&inst_modpriv->inst_list); 422 devm_kfree(dev, inst_modpriv); 423 break; 424 } 425 } 426 } 427 428 void netcp_unregister_module(struct netcp_module *module) 429 { 430 struct netcp_device *netcp_device; 431 struct netcp_module *module_tmp; 432 433 mutex_lock(&netcp_modules_lock); 434 435 list_for_each_entry(netcp_device, &netcp_devices, device_list) { 436 netcp_release_module(netcp_device, module); 437 } 438 439 /* Remove the module from the module list */ 440 for_each_netcp_module(module_tmp) { 441 if (module == module_tmp) { 442 list_del(&module->module_list); 443 break; 444 } 445 } 446 447 mutex_unlock(&netcp_modules_lock); 448 } 449 EXPORT_SYMBOL_GPL(netcp_unregister_module); 450 451 void *netcp_module_get_intf_data(struct netcp_module *module, 452 struct netcp_intf *intf) 453 { 454 struct netcp_intf_modpriv *intf_modpriv; 455 456 list_for_each_entry(intf_modpriv, &intf->module_head, intf_list) 457 if (intf_modpriv->netcp_module == module) 458 return intf_modpriv->module_priv; 459 return NULL; 460 } 461 EXPORT_SYMBOL_GPL(netcp_module_get_intf_data); 462 463 /* Module TX and RX Hook management */ 464 struct netcp_hook_list { 465 struct list_head list; 466 netcp_hook_rtn *hook_rtn; 467 void *hook_data; 468 int order; 469 }; 470 471 int netcp_register_txhook(struct netcp_intf *netcp_priv, int order, 472 netcp_hook_rtn *hook_rtn, void *hook_data) 473 { 474 struct netcp_hook_list *entry; 475 struct netcp_hook_list *next; 476 unsigned long flags; 477 478 entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL); 479 if (!entry) 480 return -ENOMEM; 481 482 entry->hook_rtn = hook_rtn; 483 entry->hook_data = hook_data; 484 entry->order = order; 485 486 spin_lock_irqsave(&netcp_priv->lock, flags); 487 list_for_each_entry(next, &netcp_priv->txhook_list_head, list) { 488 if (next->order > order) 489 break; 490 } 491 __list_add(&entry->list, next->list.prev, &next->list); 492 spin_unlock_irqrestore(&netcp_priv->lock, flags); 493 494 return 0; 495 } 496 EXPORT_SYMBOL_GPL(netcp_register_txhook); 497 498 int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order, 499 netcp_hook_rtn *hook_rtn, void *hook_data) 500 { 501 struct netcp_hook_list *next, *n; 502 unsigned long flags; 503 504 spin_lock_irqsave(&netcp_priv->lock, flags); 505 list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) { 506 if ((next->order == order) && 507 (next->hook_rtn == hook_rtn) && 508 (next->hook_data == hook_data)) { 509 list_del(&next->list); 510 spin_unlock_irqrestore(&netcp_priv->lock, flags); 511 devm_kfree(netcp_priv->dev, next); 512 return 0; 513 } 514 } 515 spin_unlock_irqrestore(&netcp_priv->lock, flags); 516 return -ENOENT; 517 } 518 EXPORT_SYMBOL_GPL(netcp_unregister_txhook); 519 520 int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order, 521 netcp_hook_rtn *hook_rtn, void *hook_data) 522 { 523 struct netcp_hook_list *entry; 524 struct netcp_hook_list *next; 525 unsigned long flags; 526 527 entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL); 528 if (!entry) 529 return -ENOMEM; 530 531 entry->hook_rtn = hook_rtn; 532 entry->hook_data = hook_data; 533 entry->order = order; 534 535 spin_lock_irqsave(&netcp_priv->lock, flags); 536 list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) { 537 if (next->order > order) 538 break; 539 } 540 __list_add(&entry->list, next->list.prev, &next->list); 541 spin_unlock_irqrestore(&netcp_priv->lock, flags); 542 543 return 0; 544 } 545 EXPORT_SYMBOL_GPL(netcp_register_rxhook); 546 547 int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order, 548 netcp_hook_rtn *hook_rtn, void *hook_data) 549 { 550 struct netcp_hook_list *next, *n; 551 unsigned long flags; 552 553 spin_lock_irqsave(&netcp_priv->lock, flags); 554 list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) { 555 if ((next->order == order) && 556 (next->hook_rtn == hook_rtn) && 557 (next->hook_data == hook_data)) { 558 list_del(&next->list); 559 spin_unlock_irqrestore(&netcp_priv->lock, flags); 560 devm_kfree(netcp_priv->dev, next); 561 return 0; 562 } 563 } 564 spin_unlock_irqrestore(&netcp_priv->lock, flags); 565 566 return -ENOENT; 567 } 568 EXPORT_SYMBOL_GPL(netcp_unregister_rxhook); 569 570 static void netcp_frag_free(bool is_frag, void *ptr) 571 { 572 if (is_frag) 573 skb_free_frag(ptr); 574 else 575 kfree(ptr); 576 } 577 578 static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, 579 struct knav_dma_desc *desc) 580 { 581 struct knav_dma_desc *ndesc; 582 dma_addr_t dma_desc, dma_buf; 583 unsigned int buf_len, dma_sz = sizeof(*ndesc); 584 void *buf_ptr; 585 u32 tmp; 586 587 get_words(&dma_desc, 1, &desc->next_desc); 588 589 while (dma_desc) { 590 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); 591 if (unlikely(!ndesc)) { 592 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); 593 break; 594 } 595 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); 596 /* warning!!!! We are retrieving the virtual ptr in the sw_data 597 * field as a 32bit value. Will not work on 64bit machines 598 */ 599 buf_ptr = (void *)GET_SW_DATA0(ndesc); 600 buf_len = (int)GET_SW_DATA1(desc); 601 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); 602 __free_page(buf_ptr); 603 knav_pool_desc_put(netcp->rx_pool, desc); 604 } 605 /* warning!!!! We are retrieving the virtual ptr in the sw_data 606 * field as a 32bit value. Will not work on 64bit machines 607 */ 608 buf_ptr = (void *)GET_SW_DATA0(desc); 609 buf_len = (int)GET_SW_DATA1(desc); 610 611 if (buf_ptr) 612 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); 613 knav_pool_desc_put(netcp->rx_pool, desc); 614 } 615 616 static void netcp_empty_rx_queue(struct netcp_intf *netcp) 617 { 618 struct netcp_stats *rx_stats = &netcp->stats; 619 struct knav_dma_desc *desc; 620 unsigned int dma_sz; 621 dma_addr_t dma; 622 623 for (; ;) { 624 dma = knav_queue_pop(netcp->rx_queue, &dma_sz); 625 if (!dma) 626 break; 627 628 desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz); 629 if (unlikely(!desc)) { 630 dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n", 631 __func__); 632 rx_stats->rx_errors++; 633 continue; 634 } 635 netcp_free_rx_desc_chain(netcp, desc); 636 rx_stats->rx_dropped++; 637 } 638 } 639 640 static int netcp_process_one_rx_packet(struct netcp_intf *netcp) 641 { 642 struct netcp_stats *rx_stats = &netcp->stats; 643 unsigned int dma_sz, buf_len, org_buf_len; 644 struct knav_dma_desc *desc, *ndesc; 645 unsigned int pkt_sz = 0, accum_sz; 646 struct netcp_hook_list *rx_hook; 647 dma_addr_t dma_desc, dma_buff; 648 struct netcp_packet p_info; 649 struct sk_buff *skb; 650 void *org_buf_ptr; 651 u32 tmp; 652 653 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); 654 if (!dma_desc) 655 return -1; 656 657 desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); 658 if (unlikely(!desc)) { 659 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); 660 return 0; 661 } 662 663 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); 664 /* warning!!!! We are retrieving the virtual ptr in the sw_data 665 * field as a 32bit value. Will not work on 64bit machines 666 */ 667 org_buf_ptr = (void *)GET_SW_DATA0(desc); 668 org_buf_len = (int)GET_SW_DATA1(desc); 669 670 if (unlikely(!org_buf_ptr)) { 671 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); 672 goto free_desc; 673 } 674 675 pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK; 676 accum_sz = buf_len; 677 dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE); 678 679 /* Build a new sk_buff for the primary buffer */ 680 skb = build_skb(org_buf_ptr, org_buf_len); 681 if (unlikely(!skb)) { 682 dev_err(netcp->ndev_dev, "build_skb() failed\n"); 683 goto free_desc; 684 } 685 686 /* update data, tail and len */ 687 skb_reserve(skb, NETCP_SOP_OFFSET); 688 __skb_put(skb, buf_len); 689 690 /* Fill in the page fragment list */ 691 while (dma_desc) { 692 struct page *page; 693 694 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); 695 if (unlikely(!ndesc)) { 696 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); 697 goto free_desc; 698 } 699 700 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); 701 /* warning!!!! We are retrieving the virtual ptr in the sw_data 702 * field as a 32bit value. Will not work on 64bit machines 703 */ 704 page = (struct page *)GET_SW_DATA0(ndesc); 705 706 if (likely(dma_buff && buf_len && page)) { 707 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, 708 DMA_FROM_DEVICE); 709 } else { 710 dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n", 711 &dma_buff, buf_len, page); 712 goto free_desc; 713 } 714 715 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 716 offset_in_page(dma_buff), buf_len, PAGE_SIZE); 717 accum_sz += buf_len; 718 719 /* Free the descriptor */ 720 knav_pool_desc_put(netcp->rx_pool, ndesc); 721 } 722 723 /* check for packet len and warn */ 724 if (unlikely(pkt_sz != accum_sz)) 725 dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n", 726 pkt_sz, accum_sz); 727 728 /* Newer version of the Ethernet switch can trim the Ethernet FCS 729 * from the packet and is indicated in hw_cap. So trim it only for 730 * older h/w 731 */ 732 if (!(netcp->hw_cap & ETH_SW_CAN_REMOVE_ETH_FCS)) 733 __pskb_trim(skb, skb->len - ETH_FCS_LEN); 734 735 /* Call each of the RX hooks */ 736 p_info.skb = skb; 737 skb->dev = netcp->ndev; 738 p_info.rxtstamp_complete = false; 739 get_desc_info(&tmp, &p_info.eflags, desc); 740 p_info.epib = desc->epib; 741 p_info.psdata = (u32 __force *)desc->psdata; 742 p_info.eflags = ((p_info.eflags >> KNAV_DMA_DESC_EFLAGS_SHIFT) & 743 KNAV_DMA_DESC_EFLAGS_MASK); 744 list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) { 745 int ret; 746 747 ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data, 748 &p_info); 749 if (unlikely(ret)) { 750 dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n", 751 rx_hook->order, ret); 752 /* Free the primary descriptor */ 753 rx_stats->rx_dropped++; 754 knav_pool_desc_put(netcp->rx_pool, desc); 755 dev_kfree_skb(skb); 756 return 0; 757 } 758 } 759 /* Free the primary descriptor */ 760 knav_pool_desc_put(netcp->rx_pool, desc); 761 762 u64_stats_update_begin(&rx_stats->syncp_rx); 763 rx_stats->rx_packets++; 764 rx_stats->rx_bytes += skb->len; 765 u64_stats_update_end(&rx_stats->syncp_rx); 766 767 /* push skb up the stack */ 768 skb->protocol = eth_type_trans(skb, netcp->ndev); 769 netif_receive_skb(skb); 770 return 0; 771 772 free_desc: 773 netcp_free_rx_desc_chain(netcp, desc); 774 rx_stats->rx_errors++; 775 return 0; 776 } 777 778 static int netcp_process_rx_packets(struct netcp_intf *netcp, 779 unsigned int budget) 780 { 781 int i; 782 783 for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++) 784 ; 785 return i; 786 } 787 788 /* Release descriptors and attached buffers from Rx FDQ */ 789 static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq) 790 { 791 struct knav_dma_desc *desc; 792 unsigned int buf_len, dma_sz; 793 dma_addr_t dma; 794 void *buf_ptr; 795 796 /* Allocate descriptor */ 797 while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) { 798 desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz); 799 if (unlikely(!desc)) { 800 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); 801 continue; 802 } 803 804 get_org_pkt_info(&dma, &buf_len, desc); 805 /* warning!!!! We are retrieving the virtual ptr in the sw_data 806 * field as a 32bit value. Will not work on 64bit machines 807 */ 808 buf_ptr = (void *)GET_SW_DATA0(desc); 809 810 if (unlikely(!dma)) { 811 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); 812 knav_pool_desc_put(netcp->rx_pool, desc); 813 continue; 814 } 815 816 if (unlikely(!buf_ptr)) { 817 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); 818 knav_pool_desc_put(netcp->rx_pool, desc); 819 continue; 820 } 821 822 if (fdq == 0) { 823 dma_unmap_single(netcp->dev, dma, buf_len, 824 DMA_FROM_DEVICE); 825 netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr); 826 } else { 827 dma_unmap_page(netcp->dev, dma, buf_len, 828 DMA_FROM_DEVICE); 829 __free_page(buf_ptr); 830 } 831 832 knav_pool_desc_put(netcp->rx_pool, desc); 833 } 834 } 835 836 static void netcp_rxpool_free(struct netcp_intf *netcp) 837 { 838 int i; 839 840 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && 841 !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++) 842 netcp_free_rx_buf(netcp, i); 843 844 if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size) 845 dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n", 846 netcp->rx_pool_size - knav_pool_count(netcp->rx_pool)); 847 848 knav_pool_destroy(netcp->rx_pool); 849 netcp->rx_pool = NULL; 850 } 851 852 static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) 853 { 854 struct knav_dma_desc *hwdesc; 855 unsigned int buf_len, dma_sz; 856 u32 desc_info, pkt_info; 857 struct page *page; 858 dma_addr_t dma; 859 void *bufptr; 860 u32 sw_data[2]; 861 862 /* Allocate descriptor */ 863 hwdesc = knav_pool_desc_get(netcp->rx_pool); 864 if (IS_ERR_OR_NULL(hwdesc)) { 865 dev_dbg(netcp->ndev_dev, "out of rx pool desc\n"); 866 return -ENOMEM; 867 } 868 869 if (likely(fdq == 0)) { 870 unsigned int primary_buf_len; 871 /* Allocate a primary receive queue entry */ 872 buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET; 873 primary_buf_len = SKB_DATA_ALIGN(buf_len) + 874 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 875 876 bufptr = netdev_alloc_frag(primary_buf_len); 877 sw_data[1] = primary_buf_len; 878 879 if (unlikely(!bufptr)) { 880 dev_warn_ratelimited(netcp->ndev_dev, 881 "Primary RX buffer alloc failed\n"); 882 goto fail; 883 } 884 dma = dma_map_single(netcp->dev, bufptr, buf_len, 885 DMA_TO_DEVICE); 886 if (unlikely(dma_mapping_error(netcp->dev, dma))) 887 goto fail; 888 889 /* warning!!!! We are saving the virtual ptr in the sw_data 890 * field as a 32bit value. Will not work on 64bit machines 891 */ 892 sw_data[0] = (u32)bufptr; 893 } else { 894 /* Allocate a secondary receive queue entry */ 895 page = alloc_page(GFP_ATOMIC | GFP_DMA); 896 if (unlikely(!page)) { 897 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); 898 goto fail; 899 } 900 buf_len = PAGE_SIZE; 901 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); 902 /* warning!!!! We are saving the virtual ptr in the sw_data 903 * field as a 32bit value. Will not work on 64bit machines 904 */ 905 sw_data[0] = (u32)page; 906 sw_data[1] = 0; 907 } 908 909 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; 910 desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK; 911 pkt_info = KNAV_DMA_DESC_HAS_EPIB; 912 pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT; 913 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << 914 KNAV_DMA_DESC_RETQ_SHIFT; 915 set_org_pkt_info(dma, buf_len, hwdesc); 916 SET_SW_DATA0(sw_data[0], hwdesc); 917 SET_SW_DATA1(sw_data[1], hwdesc); 918 set_desc_info(desc_info, pkt_info, hwdesc); 919 920 /* Push to FDQs */ 921 knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma, 922 &dma_sz); 923 knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0); 924 return 0; 925 926 fail: 927 knav_pool_desc_put(netcp->rx_pool, hwdesc); 928 return -ENOMEM; 929 } 930 931 /* Refill Rx FDQ with descriptors & attached buffers */ 932 static void netcp_rxpool_refill(struct netcp_intf *netcp) 933 { 934 u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0}; 935 int i, ret = 0; 936 937 /* Calculate the FDQ deficit and refill */ 938 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) { 939 fdq_deficit[i] = netcp->rx_queue_depths[i] - 940 knav_queue_get_count(netcp->rx_fdq[i]); 941 942 while (fdq_deficit[i]-- && !ret) 943 ret = netcp_allocate_rx_buf(netcp, i); 944 } /* end for fdqs */ 945 } 946 947 /* NAPI poll */ 948 static int netcp_rx_poll(struct napi_struct *napi, int budget) 949 { 950 struct netcp_intf *netcp = container_of(napi, struct netcp_intf, 951 rx_napi); 952 unsigned int packets; 953 954 packets = netcp_process_rx_packets(netcp, budget); 955 956 netcp_rxpool_refill(netcp); 957 if (packets < budget) { 958 napi_complete_done(&netcp->rx_napi, packets); 959 knav_queue_enable_notify(netcp->rx_queue); 960 } 961 962 return packets; 963 } 964 965 static void netcp_rx_notify(void *arg) 966 { 967 struct netcp_intf *netcp = arg; 968 969 knav_queue_disable_notify(netcp->rx_queue); 970 napi_schedule(&netcp->rx_napi); 971 } 972 973 static void netcp_free_tx_desc_chain(struct netcp_intf *netcp, 974 struct knav_dma_desc *desc, 975 unsigned int desc_sz) 976 { 977 struct knav_dma_desc *ndesc = desc; 978 dma_addr_t dma_desc, dma_buf; 979 unsigned int buf_len; 980 981 while (ndesc) { 982 get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc); 983 984 if (dma_buf && buf_len) 985 dma_unmap_single(netcp->dev, dma_buf, buf_len, 986 DMA_TO_DEVICE); 987 else 988 dev_warn(netcp->ndev_dev, "bad Tx desc buf(%pad), len(%d)\n", 989 &dma_buf, buf_len); 990 991 knav_pool_desc_put(netcp->tx_pool, ndesc); 992 ndesc = NULL; 993 if (dma_desc) { 994 ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc, 995 desc_sz); 996 if (!ndesc) 997 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n"); 998 } 999 } 1000 } 1001 1002 static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, 1003 unsigned int budget) 1004 { 1005 struct netcp_stats *tx_stats = &netcp->stats; 1006 struct knav_dma_desc *desc; 1007 struct netcp_tx_cb *tx_cb; 1008 struct sk_buff *skb; 1009 unsigned int dma_sz; 1010 dma_addr_t dma; 1011 int pkts = 0; 1012 1013 while (budget--) { 1014 dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz); 1015 if (!dma) 1016 break; 1017 desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz); 1018 if (unlikely(!desc)) { 1019 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n"); 1020 tx_stats->tx_errors++; 1021 continue; 1022 } 1023 1024 /* warning!!!! We are retrieving the virtual ptr in the sw_data 1025 * field as a 32bit value. Will not work on 64bit machines 1026 */ 1027 skb = (struct sk_buff *)GET_SW_DATA0(desc); 1028 netcp_free_tx_desc_chain(netcp, desc, dma_sz); 1029 if (!skb) { 1030 dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); 1031 tx_stats->tx_errors++; 1032 continue; 1033 } 1034 1035 tx_cb = (struct netcp_tx_cb *)skb->cb; 1036 if (tx_cb->txtstamp) 1037 tx_cb->txtstamp(tx_cb->ts_context, skb); 1038 1039 if (netif_subqueue_stopped(netcp->ndev, skb) && 1040 netif_running(netcp->ndev) && 1041 (knav_pool_count(netcp->tx_pool) > 1042 netcp->tx_resume_threshold)) { 1043 u16 subqueue = skb_get_queue_mapping(skb); 1044 1045 netif_wake_subqueue(netcp->ndev, subqueue); 1046 } 1047 1048 u64_stats_update_begin(&tx_stats->syncp_tx); 1049 tx_stats->tx_packets++; 1050 tx_stats->tx_bytes += skb->len; 1051 u64_stats_update_end(&tx_stats->syncp_tx); 1052 dev_kfree_skb(skb); 1053 pkts++; 1054 } 1055 return pkts; 1056 } 1057 1058 static int netcp_tx_poll(struct napi_struct *napi, int budget) 1059 { 1060 int packets; 1061 struct netcp_intf *netcp = container_of(napi, struct netcp_intf, 1062 tx_napi); 1063 1064 packets = netcp_process_tx_compl_packets(netcp, budget); 1065 if (packets < budget) { 1066 napi_complete(&netcp->tx_napi); 1067 knav_queue_enable_notify(netcp->tx_compl_q); 1068 } 1069 1070 return packets; 1071 } 1072 1073 static void netcp_tx_notify(void *arg) 1074 { 1075 struct netcp_intf *netcp = arg; 1076 1077 knav_queue_disable_notify(netcp->tx_compl_q); 1078 napi_schedule(&netcp->tx_napi); 1079 } 1080 1081 static struct knav_dma_desc* 1082 netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) 1083 { 1084 struct knav_dma_desc *desc, *ndesc, *pdesc; 1085 unsigned int pkt_len = skb_headlen(skb); 1086 struct device *dev = netcp->dev; 1087 dma_addr_t dma_addr; 1088 unsigned int dma_sz; 1089 int i; 1090 1091 /* Map the linear buffer */ 1092 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE); 1093 if (unlikely(dma_mapping_error(dev, dma_addr))) { 1094 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n"); 1095 return NULL; 1096 } 1097 1098 desc = knav_pool_desc_get(netcp->tx_pool); 1099 if (IS_ERR_OR_NULL(desc)) { 1100 dev_err(netcp->ndev_dev, "out of TX desc\n"); 1101 dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE); 1102 return NULL; 1103 } 1104 1105 set_pkt_info(dma_addr, pkt_len, 0, desc); 1106 if (skb_is_nonlinear(skb)) { 1107 prefetchw(skb_shinfo(skb)); 1108 } else { 1109 desc->next_desc = 0; 1110 goto upd_pkt_len; 1111 } 1112 1113 pdesc = desc; 1114 1115 /* Handle the case where skb is fragmented in pages */ 1116 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1117 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1118 struct page *page = skb_frag_page(frag); 1119 u32 page_offset = skb_frag_off(frag); 1120 u32 buf_len = skb_frag_size(frag); 1121 dma_addr_t desc_dma; 1122 u32 desc_dma_32; 1123 1124 dma_addr = dma_map_page(dev, page, page_offset, buf_len, 1125 DMA_TO_DEVICE); 1126 if (unlikely(!dma_addr)) { 1127 dev_err(netcp->ndev_dev, "Failed to map skb page\n"); 1128 goto free_descs; 1129 } 1130 1131 ndesc = knav_pool_desc_get(netcp->tx_pool); 1132 if (IS_ERR_OR_NULL(ndesc)) { 1133 dev_err(netcp->ndev_dev, "out of TX desc for frags\n"); 1134 dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE); 1135 goto free_descs; 1136 } 1137 1138 desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, ndesc); 1139 set_pkt_info(dma_addr, buf_len, 0, ndesc); 1140 desc_dma_32 = (u32)desc_dma; 1141 set_words(&desc_dma_32, 1, &pdesc->next_desc); 1142 pkt_len += buf_len; 1143 if (pdesc != desc) 1144 knav_pool_desc_map(netcp->tx_pool, pdesc, 1145 sizeof(*pdesc), &desc_dma, &dma_sz); 1146 pdesc = ndesc; 1147 } 1148 if (pdesc != desc) 1149 knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc), 1150 &dma_addr, &dma_sz); 1151 1152 /* frag list based linkage is not supported for now. */ 1153 if (skb_shinfo(skb)->frag_list) { 1154 dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n"); 1155 goto free_descs; 1156 } 1157 1158 upd_pkt_len: 1159 WARN_ON(pkt_len != skb->len); 1160 1161 pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK; 1162 set_words(&pkt_len, 1, &desc->desc_info); 1163 return desc; 1164 1165 free_descs: 1166 netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc)); 1167 return NULL; 1168 } 1169 1170 static int netcp_tx_submit_skb(struct netcp_intf *netcp, 1171 struct sk_buff *skb, 1172 struct knav_dma_desc *desc) 1173 { 1174 struct netcp_tx_pipe *tx_pipe = NULL; 1175 struct netcp_hook_list *tx_hook; 1176 struct netcp_packet p_info; 1177 struct netcp_tx_cb *tx_cb; 1178 unsigned int dma_sz; 1179 dma_addr_t dma; 1180 u32 tmp = 0; 1181 int ret = 0; 1182 1183 p_info.netcp = netcp; 1184 p_info.skb = skb; 1185 p_info.tx_pipe = NULL; 1186 p_info.psdata_len = 0; 1187 p_info.ts_context = NULL; 1188 p_info.txtstamp = NULL; 1189 p_info.epib = desc->epib; 1190 p_info.psdata = (u32 __force *)desc->psdata; 1191 memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(__le32)); 1192 1193 /* Find out where to inject the packet for transmission */ 1194 list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) { 1195 ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data, 1196 &p_info); 1197 if (unlikely(ret != 0)) { 1198 dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n", 1199 tx_hook->order, ret); 1200 ret = (ret < 0) ? ret : NETDEV_TX_OK; 1201 goto out; 1202 } 1203 } 1204 1205 /* Make sure some TX hook claimed the packet */ 1206 tx_pipe = p_info.tx_pipe; 1207 if (!tx_pipe) { 1208 dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n"); 1209 ret = -ENXIO; 1210 goto out; 1211 } 1212 1213 tx_cb = (struct netcp_tx_cb *)skb->cb; 1214 tx_cb->ts_context = p_info.ts_context; 1215 tx_cb->txtstamp = p_info.txtstamp; 1216 1217 /* update descriptor */ 1218 if (p_info.psdata_len) { 1219 /* psdata points to both native-endian and device-endian data */ 1220 __le32 *psdata = (void __force *)p_info.psdata; 1221 1222 set_words((u32 *)psdata + 1223 (KNAV_DMA_NUM_PS_WORDS - p_info.psdata_len), 1224 p_info.psdata_len, psdata); 1225 tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) << 1226 KNAV_DMA_DESC_PSLEN_SHIFT; 1227 } 1228 1229 tmp |= KNAV_DMA_DESC_HAS_EPIB | 1230 ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) << 1231 KNAV_DMA_DESC_RETQ_SHIFT); 1232 1233 if (!(tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO)) { 1234 tmp |= ((tx_pipe->switch_to_port & KNAV_DMA_DESC_PSFLAG_MASK) << 1235 KNAV_DMA_DESC_PSFLAG_SHIFT); 1236 } 1237 1238 set_words(&tmp, 1, &desc->packet_info); 1239 /* warning!!!! We are saving the virtual ptr in the sw_data 1240 * field as a 32bit value. Will not work on 64bit machines 1241 */ 1242 SET_SW_DATA0((u32)skb, desc); 1243 1244 if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { 1245 tmp = tx_pipe->switch_to_port; 1246 set_words(&tmp, 1, &desc->tag_info); 1247 } 1248 1249 /* submit packet descriptor */ 1250 ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma, 1251 &dma_sz); 1252 if (unlikely(ret)) { 1253 dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__); 1254 ret = -ENOMEM; 1255 goto out; 1256 } 1257 skb_tx_timestamp(skb); 1258 knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0); 1259 1260 out: 1261 return ret; 1262 } 1263 1264 /* Submit the packet */ 1265 static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1266 { 1267 struct netcp_intf *netcp = netdev_priv(ndev); 1268 struct netcp_stats *tx_stats = &netcp->stats; 1269 int subqueue = skb_get_queue_mapping(skb); 1270 struct knav_dma_desc *desc; 1271 int desc_count, ret = 0; 1272 1273 if (unlikely(skb->len <= 0)) { 1274 dev_kfree_skb(skb); 1275 return NETDEV_TX_OK; 1276 } 1277 1278 if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) { 1279 ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE); 1280 if (ret < 0) { 1281 /* If we get here, the skb has already been dropped */ 1282 dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n", 1283 ret); 1284 tx_stats->tx_dropped++; 1285 return ret; 1286 } 1287 skb->len = NETCP_MIN_PACKET_SIZE; 1288 } 1289 1290 desc = netcp_tx_map_skb(skb, netcp); 1291 if (unlikely(!desc)) { 1292 netif_stop_subqueue(ndev, subqueue); 1293 ret = -ENOBUFS; 1294 goto drop; 1295 } 1296 1297 ret = netcp_tx_submit_skb(netcp, skb, desc); 1298 if (ret) 1299 goto drop; 1300 1301 /* Check Tx pool count & stop subqueue if needed */ 1302 desc_count = knav_pool_count(netcp->tx_pool); 1303 if (desc_count < netcp->tx_pause_threshold) { 1304 dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count); 1305 netif_stop_subqueue(ndev, subqueue); 1306 } 1307 return NETDEV_TX_OK; 1308 1309 drop: 1310 tx_stats->tx_dropped++; 1311 if (desc) 1312 netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc)); 1313 dev_kfree_skb(skb); 1314 return ret; 1315 } 1316 1317 int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe) 1318 { 1319 if (tx_pipe->dma_channel) { 1320 knav_dma_close_channel(tx_pipe->dma_channel); 1321 tx_pipe->dma_channel = NULL; 1322 } 1323 return 0; 1324 } 1325 EXPORT_SYMBOL_GPL(netcp_txpipe_close); 1326 1327 int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe) 1328 { 1329 struct device *dev = tx_pipe->netcp_device->device; 1330 struct knav_dma_cfg config; 1331 int ret = 0; 1332 u8 name[16]; 1333 1334 memset(&config, 0, sizeof(config)); 1335 config.direction = DMA_MEM_TO_DEV; 1336 config.u.tx.filt_einfo = false; 1337 config.u.tx.filt_pswords = false; 1338 config.u.tx.priority = DMA_PRIO_MED_L; 1339 1340 tx_pipe->dma_channel = knav_dma_open_channel(dev, 1341 tx_pipe->dma_chan_name, &config); 1342 if (IS_ERR(tx_pipe->dma_channel)) { 1343 dev_err(dev, "failed opening tx chan(%s)\n", 1344 tx_pipe->dma_chan_name); 1345 ret = PTR_ERR(tx_pipe->dma_channel); 1346 goto err; 1347 } 1348 1349 snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev)); 1350 tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id, 1351 KNAV_QUEUE_SHARED); 1352 if (IS_ERR(tx_pipe->dma_queue)) { 1353 dev_err(dev, "Could not open DMA queue for channel \"%s\": %pe\n", 1354 name, tx_pipe->dma_queue); 1355 ret = PTR_ERR(tx_pipe->dma_queue); 1356 goto err; 1357 } 1358 1359 dev_dbg(dev, "opened tx pipe %s\n", name); 1360 return 0; 1361 1362 err: 1363 if (!IS_ERR_OR_NULL(tx_pipe->dma_channel)) 1364 knav_dma_close_channel(tx_pipe->dma_channel); 1365 tx_pipe->dma_channel = NULL; 1366 return ret; 1367 } 1368 EXPORT_SYMBOL_GPL(netcp_txpipe_open); 1369 1370 int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe, 1371 struct netcp_device *netcp_device, 1372 const char *dma_chan_name, unsigned int dma_queue_id) 1373 { 1374 memset(tx_pipe, 0, sizeof(*tx_pipe)); 1375 tx_pipe->netcp_device = netcp_device; 1376 tx_pipe->dma_chan_name = dma_chan_name; 1377 tx_pipe->dma_queue_id = dma_queue_id; 1378 return 0; 1379 } 1380 EXPORT_SYMBOL_GPL(netcp_txpipe_init); 1381 1382 static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp, 1383 const u8 *addr, 1384 enum netcp_addr_type type) 1385 { 1386 struct netcp_addr *naddr; 1387 1388 list_for_each_entry(naddr, &netcp->addr_list, node) { 1389 if (naddr->type != type) 1390 continue; 1391 if (addr && memcmp(addr, naddr->addr, ETH_ALEN)) 1392 continue; 1393 return naddr; 1394 } 1395 1396 return NULL; 1397 } 1398 1399 static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp, 1400 const u8 *addr, 1401 enum netcp_addr_type type) 1402 { 1403 struct netcp_addr *naddr; 1404 1405 naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC); 1406 if (!naddr) 1407 return NULL; 1408 1409 naddr->type = type; 1410 naddr->flags = 0; 1411 naddr->netcp = netcp; 1412 if (addr) 1413 ether_addr_copy(naddr->addr, addr); 1414 else 1415 eth_zero_addr(naddr->addr); 1416 list_add_tail(&naddr->node, &netcp->addr_list); 1417 1418 return naddr; 1419 } 1420 1421 static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr) 1422 { 1423 list_del(&naddr->node); 1424 devm_kfree(netcp->dev, naddr); 1425 } 1426 1427 static void netcp_addr_clear_mark(struct netcp_intf *netcp) 1428 { 1429 struct netcp_addr *naddr; 1430 1431 list_for_each_entry(naddr, &netcp->addr_list, node) 1432 naddr->flags = 0; 1433 } 1434 1435 static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr, 1436 enum netcp_addr_type type) 1437 { 1438 struct netcp_addr *naddr; 1439 1440 naddr = netcp_addr_find(netcp, addr, type); 1441 if (naddr) { 1442 naddr->flags |= ADDR_VALID; 1443 return; 1444 } 1445 1446 naddr = netcp_addr_add(netcp, addr, type); 1447 if (!WARN_ON(!naddr)) 1448 naddr->flags |= ADDR_NEW; 1449 } 1450 1451 static void netcp_addr_sweep_del(struct netcp_intf *netcp) 1452 { 1453 struct netcp_addr *naddr, *tmp; 1454 struct netcp_intf_modpriv *priv; 1455 struct netcp_module *module; 1456 int error; 1457 1458 list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) { 1459 if (naddr->flags & (ADDR_VALID | ADDR_NEW)) 1460 continue; 1461 dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n", 1462 naddr->addr, naddr->type); 1463 for_each_module(netcp, priv) { 1464 module = priv->netcp_module; 1465 if (!module->del_addr) 1466 continue; 1467 error = module->del_addr(priv->module_priv, 1468 naddr); 1469 WARN_ON(error); 1470 } 1471 netcp_addr_del(netcp, naddr); 1472 } 1473 } 1474 1475 static void netcp_addr_sweep_add(struct netcp_intf *netcp) 1476 { 1477 struct netcp_addr *naddr, *tmp; 1478 struct netcp_intf_modpriv *priv; 1479 struct netcp_module *module; 1480 int error; 1481 1482 list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) { 1483 if (!(naddr->flags & ADDR_NEW)) 1484 continue; 1485 dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n", 1486 naddr->addr, naddr->type); 1487 1488 for_each_module(netcp, priv) { 1489 module = priv->netcp_module; 1490 if (!module->add_addr) 1491 continue; 1492 error = module->add_addr(priv->module_priv, naddr); 1493 WARN_ON(error); 1494 } 1495 } 1496 } 1497 1498 static int netcp_set_promiscuous(struct netcp_intf *netcp, bool promisc) 1499 { 1500 struct netcp_intf_modpriv *priv; 1501 struct netcp_module *module; 1502 int error; 1503 1504 for_each_module(netcp, priv) { 1505 module = priv->netcp_module; 1506 if (!module->set_rx_mode) 1507 continue; 1508 1509 error = module->set_rx_mode(priv->module_priv, promisc); 1510 if (error) 1511 return error; 1512 } 1513 return 0; 1514 } 1515 1516 static void netcp_set_rx_mode(struct net_device *ndev) 1517 { 1518 struct netcp_intf *netcp = netdev_priv(ndev); 1519 struct netdev_hw_addr *ndev_addr; 1520 bool promisc; 1521 1522 promisc = (ndev->flags & IFF_PROMISC || 1523 ndev->flags & IFF_ALLMULTI || 1524 netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR); 1525 1526 spin_lock(&netcp->lock); 1527 /* first clear all marks */ 1528 netcp_addr_clear_mark(netcp); 1529 1530 /* next add new entries, mark existing ones */ 1531 netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST); 1532 for_each_dev_addr(ndev, ndev_addr) 1533 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV); 1534 netdev_for_each_uc_addr(ndev_addr, ndev) 1535 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST); 1536 netdev_for_each_mc_addr(ndev_addr, ndev) 1537 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST); 1538 1539 if (promisc) 1540 netcp_addr_add_mark(netcp, NULL, ADDR_ANY); 1541 1542 /* finally sweep and callout into modules */ 1543 netcp_addr_sweep_del(netcp); 1544 netcp_addr_sweep_add(netcp); 1545 netcp_set_promiscuous(netcp, promisc); 1546 spin_unlock(&netcp->lock); 1547 } 1548 1549 static void netcp_free_navigator_resources(struct netcp_intf *netcp) 1550 { 1551 int i; 1552 1553 if (netcp->rx_channel) { 1554 knav_dma_close_channel(netcp->rx_channel); 1555 netcp->rx_channel = NULL; 1556 } 1557 1558 if (!IS_ERR_OR_NULL(netcp->rx_pool)) 1559 netcp_rxpool_free(netcp); 1560 1561 if (!IS_ERR_OR_NULL(netcp->rx_queue)) { 1562 knav_queue_close(netcp->rx_queue); 1563 netcp->rx_queue = NULL; 1564 } 1565 1566 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && 1567 !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) { 1568 knav_queue_close(netcp->rx_fdq[i]); 1569 netcp->rx_fdq[i] = NULL; 1570 } 1571 1572 if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) { 1573 knav_queue_close(netcp->tx_compl_q); 1574 netcp->tx_compl_q = NULL; 1575 } 1576 1577 if (!IS_ERR_OR_NULL(netcp->tx_pool)) { 1578 knav_pool_destroy(netcp->tx_pool); 1579 netcp->tx_pool = NULL; 1580 } 1581 } 1582 1583 static int netcp_setup_navigator_resources(struct net_device *ndev) 1584 { 1585 struct netcp_intf *netcp = netdev_priv(ndev); 1586 struct knav_queue_notify_config notify_cfg; 1587 struct knav_dma_cfg config; 1588 u32 last_fdq = 0; 1589 u8 name[16]; 1590 int ret; 1591 int i; 1592 1593 /* Create Rx/Tx descriptor pools */ 1594 snprintf(name, sizeof(name), "rx-pool-%s", ndev->name); 1595 netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size, 1596 netcp->rx_pool_region_id); 1597 if (IS_ERR_OR_NULL(netcp->rx_pool)) { 1598 dev_err(netcp->ndev_dev, "Couldn't create rx pool\n"); 1599 ret = PTR_ERR(netcp->rx_pool); 1600 goto fail; 1601 } 1602 1603 snprintf(name, sizeof(name), "tx-pool-%s", ndev->name); 1604 netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size, 1605 netcp->tx_pool_region_id); 1606 if (IS_ERR_OR_NULL(netcp->tx_pool)) { 1607 dev_err(netcp->ndev_dev, "Couldn't create tx pool\n"); 1608 ret = PTR_ERR(netcp->tx_pool); 1609 goto fail; 1610 } 1611 1612 /* open Tx completion queue */ 1613 snprintf(name, sizeof(name), "tx-compl-%s", ndev->name); 1614 netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0); 1615 if (IS_ERR(netcp->tx_compl_q)) { 1616 ret = PTR_ERR(netcp->tx_compl_q); 1617 goto fail; 1618 } 1619 netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q); 1620 1621 /* Set notification for Tx completion */ 1622 notify_cfg.fn = netcp_tx_notify; 1623 notify_cfg.fn_arg = netcp; 1624 ret = knav_queue_device_control(netcp->tx_compl_q, 1625 KNAV_QUEUE_SET_NOTIFIER, 1626 (unsigned long)¬ify_cfg); 1627 if (ret) 1628 goto fail; 1629 1630 knav_queue_disable_notify(netcp->tx_compl_q); 1631 1632 /* open Rx completion queue */ 1633 snprintf(name, sizeof(name), "rx-compl-%s", ndev->name); 1634 netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0); 1635 if (IS_ERR(netcp->rx_queue)) { 1636 ret = PTR_ERR(netcp->rx_queue); 1637 goto fail; 1638 } 1639 netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue); 1640 1641 /* Set notification for Rx completion */ 1642 notify_cfg.fn = netcp_rx_notify; 1643 notify_cfg.fn_arg = netcp; 1644 ret = knav_queue_device_control(netcp->rx_queue, 1645 KNAV_QUEUE_SET_NOTIFIER, 1646 (unsigned long)¬ify_cfg); 1647 if (ret) 1648 goto fail; 1649 1650 knav_queue_disable_notify(netcp->rx_queue); 1651 1652 /* open Rx FDQs */ 1653 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i]; 1654 ++i) { 1655 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i); 1656 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0); 1657 if (IS_ERR(netcp->rx_fdq[i])) { 1658 ret = PTR_ERR(netcp->rx_fdq[i]); 1659 goto fail; 1660 } 1661 } 1662 1663 memset(&config, 0, sizeof(config)); 1664 config.direction = DMA_DEV_TO_MEM; 1665 config.u.rx.einfo_present = true; 1666 config.u.rx.psinfo_present = true; 1667 config.u.rx.err_mode = DMA_DROP; 1668 config.u.rx.desc_type = DMA_DESC_HOST; 1669 config.u.rx.psinfo_at_sop = false; 1670 config.u.rx.sop_offset = NETCP_SOP_OFFSET; 1671 config.u.rx.dst_q = netcp->rx_queue_id; 1672 config.u.rx.thresh = DMA_THRESH_NONE; 1673 1674 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) { 1675 if (netcp->rx_fdq[i]) 1676 last_fdq = knav_queue_get_id(netcp->rx_fdq[i]); 1677 config.u.rx.fdq[i] = last_fdq; 1678 } 1679 1680 netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device, 1681 netcp->dma_chan_name, &config); 1682 if (IS_ERR(netcp->rx_channel)) { 1683 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n", 1684 netcp->dma_chan_name); 1685 ret = PTR_ERR(netcp->rx_channel); 1686 goto fail; 1687 } 1688 1689 dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel); 1690 return 0; 1691 1692 fail: 1693 netcp_free_navigator_resources(netcp); 1694 return ret; 1695 } 1696 1697 /* Open the device */ 1698 static int netcp_ndo_open(struct net_device *ndev) 1699 { 1700 struct netcp_intf *netcp = netdev_priv(ndev); 1701 struct netcp_intf_modpriv *intf_modpriv; 1702 struct netcp_module *module; 1703 int ret; 1704 1705 netif_carrier_off(ndev); 1706 ret = netcp_setup_navigator_resources(ndev); 1707 if (ret) { 1708 dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n"); 1709 goto fail; 1710 } 1711 1712 for_each_module(netcp, intf_modpriv) { 1713 module = intf_modpriv->netcp_module; 1714 if (module->open) { 1715 ret = module->open(intf_modpriv->module_priv, ndev); 1716 if (ret != 0) { 1717 dev_err(netcp->ndev_dev, "module open failed\n"); 1718 goto fail_open; 1719 } 1720 } 1721 } 1722 1723 napi_enable(&netcp->rx_napi); 1724 napi_enable(&netcp->tx_napi); 1725 knav_queue_enable_notify(netcp->tx_compl_q); 1726 knav_queue_enable_notify(netcp->rx_queue); 1727 netcp_rxpool_refill(netcp); 1728 netif_tx_wake_all_queues(ndev); 1729 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); 1730 return 0; 1731 1732 fail_open: 1733 for_each_module(netcp, intf_modpriv) { 1734 module = intf_modpriv->netcp_module; 1735 if (module->close) 1736 module->close(intf_modpriv->module_priv, ndev); 1737 } 1738 1739 fail: 1740 netcp_free_navigator_resources(netcp); 1741 return ret; 1742 } 1743 1744 /* Close the device */ 1745 static int netcp_ndo_stop(struct net_device *ndev) 1746 { 1747 struct netcp_intf *netcp = netdev_priv(ndev); 1748 struct netcp_intf_modpriv *intf_modpriv; 1749 struct netcp_module *module; 1750 int err = 0; 1751 1752 netif_tx_stop_all_queues(ndev); 1753 netif_carrier_off(ndev); 1754 netcp_addr_clear_mark(netcp); 1755 netcp_addr_sweep_del(netcp); 1756 knav_queue_disable_notify(netcp->rx_queue); 1757 knav_queue_disable_notify(netcp->tx_compl_q); 1758 napi_disable(&netcp->rx_napi); 1759 napi_disable(&netcp->tx_napi); 1760 1761 for_each_module(netcp, intf_modpriv) { 1762 module = intf_modpriv->netcp_module; 1763 if (module->close) { 1764 err = module->close(intf_modpriv->module_priv, ndev); 1765 if (err != 0) 1766 dev_err(netcp->ndev_dev, "Close failed\n"); 1767 } 1768 } 1769 1770 /* Recycle Rx descriptors from completion queue */ 1771 netcp_empty_rx_queue(netcp); 1772 1773 /* Recycle Tx descriptors from completion queue */ 1774 netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size); 1775 1776 if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size) 1777 dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n", 1778 netcp->tx_pool_size - knav_pool_count(netcp->tx_pool)); 1779 1780 netcp_free_navigator_resources(netcp); 1781 dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name); 1782 return 0; 1783 } 1784 1785 static int netcp_ndo_ioctl(struct net_device *ndev, 1786 struct ifreq *req, int cmd) 1787 { 1788 struct netcp_intf *netcp = netdev_priv(ndev); 1789 struct netcp_intf_modpriv *intf_modpriv; 1790 struct netcp_module *module; 1791 int ret = -1, err = -EOPNOTSUPP; 1792 1793 if (!netif_running(ndev)) 1794 return -EINVAL; 1795 1796 for_each_module(netcp, intf_modpriv) { 1797 module = intf_modpriv->netcp_module; 1798 if (!module->ioctl) 1799 continue; 1800 1801 err = module->ioctl(intf_modpriv->module_priv, req, cmd); 1802 if ((err < 0) && (err != -EOPNOTSUPP)) { 1803 ret = err; 1804 goto out; 1805 } 1806 if (err == 0) 1807 ret = err; 1808 } 1809 1810 out: 1811 return (ret == 0) ? 0 : err; 1812 } 1813 1814 static void netcp_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) 1815 { 1816 struct netcp_intf *netcp = netdev_priv(ndev); 1817 unsigned int descs = knav_pool_count(netcp->tx_pool); 1818 1819 dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs); 1820 netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size); 1821 netif_trans_update(ndev); 1822 netif_tx_wake_all_queues(ndev); 1823 } 1824 1825 static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) 1826 { 1827 struct netcp_intf *netcp = netdev_priv(ndev); 1828 struct netcp_intf_modpriv *intf_modpriv; 1829 struct netcp_module *module; 1830 unsigned long flags; 1831 int err = 0; 1832 1833 dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid); 1834 1835 spin_lock_irqsave(&netcp->lock, flags); 1836 for_each_module(netcp, intf_modpriv) { 1837 module = intf_modpriv->netcp_module; 1838 if ((module->add_vid) && (vid != 0)) { 1839 err = module->add_vid(intf_modpriv->module_priv, vid); 1840 if (err != 0) { 1841 dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n", 1842 vid); 1843 break; 1844 } 1845 } 1846 } 1847 spin_unlock_irqrestore(&netcp->lock, flags); 1848 1849 return err; 1850 } 1851 1852 static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) 1853 { 1854 struct netcp_intf *netcp = netdev_priv(ndev); 1855 struct netcp_intf_modpriv *intf_modpriv; 1856 struct netcp_module *module; 1857 unsigned long flags; 1858 int err = 0; 1859 1860 dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid); 1861 1862 spin_lock_irqsave(&netcp->lock, flags); 1863 for_each_module(netcp, intf_modpriv) { 1864 module = intf_modpriv->netcp_module; 1865 if (module->del_vid) { 1866 err = module->del_vid(intf_modpriv->module_priv, vid); 1867 if (err != 0) { 1868 dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n", 1869 vid); 1870 break; 1871 } 1872 } 1873 } 1874 spin_unlock_irqrestore(&netcp->lock, flags); 1875 return err; 1876 } 1877 1878 static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1879 void *type_data) 1880 { 1881 struct tc_mqprio_qopt *mqprio = type_data; 1882 u8 num_tc; 1883 int i; 1884 1885 /* setup tc must be called under rtnl lock */ 1886 ASSERT_RTNL(); 1887 1888 if (type != TC_SETUP_QDISC_MQPRIO) 1889 return -EOPNOTSUPP; 1890 1891 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 1892 num_tc = mqprio->num_tc; 1893 1894 /* Sanity-check the number of traffic classes requested */ 1895 if ((dev->real_num_tx_queues <= 1) || 1896 (dev->real_num_tx_queues < num_tc)) 1897 return -EINVAL; 1898 1899 /* Configure traffic class to queue mappings */ 1900 if (num_tc) { 1901 netdev_set_num_tc(dev, num_tc); 1902 for (i = 0; i < num_tc; i++) 1903 netdev_set_tc_queue(dev, i, 1, i); 1904 } else { 1905 netdev_reset_tc(dev); 1906 } 1907 1908 return 0; 1909 } 1910 1911 static void 1912 netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats) 1913 { 1914 struct netcp_intf *netcp = netdev_priv(ndev); 1915 struct netcp_stats *p = &netcp->stats; 1916 u64 rxpackets, rxbytes, txpackets, txbytes; 1917 unsigned int start; 1918 1919 do { 1920 start = u64_stats_fetch_begin_irq(&p->syncp_rx); 1921 rxpackets = p->rx_packets; 1922 rxbytes = p->rx_bytes; 1923 } while (u64_stats_fetch_retry_irq(&p->syncp_rx, start)); 1924 1925 do { 1926 start = u64_stats_fetch_begin_irq(&p->syncp_tx); 1927 txpackets = p->tx_packets; 1928 txbytes = p->tx_bytes; 1929 } while (u64_stats_fetch_retry_irq(&p->syncp_tx, start)); 1930 1931 stats->rx_packets = rxpackets; 1932 stats->rx_bytes = rxbytes; 1933 stats->tx_packets = txpackets; 1934 stats->tx_bytes = txbytes; 1935 1936 /* The following are stored as 32 bit */ 1937 stats->rx_errors = p->rx_errors; 1938 stats->rx_dropped = p->rx_dropped; 1939 stats->tx_dropped = p->tx_dropped; 1940 } 1941 1942 static const struct net_device_ops netcp_netdev_ops = { 1943 .ndo_open = netcp_ndo_open, 1944 .ndo_stop = netcp_ndo_stop, 1945 .ndo_start_xmit = netcp_ndo_start_xmit, 1946 .ndo_set_rx_mode = netcp_set_rx_mode, 1947 .ndo_eth_ioctl = netcp_ndo_ioctl, 1948 .ndo_get_stats64 = netcp_get_stats, 1949 .ndo_set_mac_address = eth_mac_addr, 1950 .ndo_validate_addr = eth_validate_addr, 1951 .ndo_vlan_rx_add_vid = netcp_rx_add_vid, 1952 .ndo_vlan_rx_kill_vid = netcp_rx_kill_vid, 1953 .ndo_tx_timeout = netcp_ndo_tx_timeout, 1954 .ndo_select_queue = dev_pick_tx_zero, 1955 .ndo_setup_tc = netcp_setup_tc, 1956 }; 1957 1958 static int netcp_create_interface(struct netcp_device *netcp_device, 1959 struct device_node *node_interface) 1960 { 1961 struct device *dev = netcp_device->device; 1962 struct device_node *node = dev->of_node; 1963 struct netcp_intf *netcp; 1964 struct net_device *ndev; 1965 resource_size_t size; 1966 struct resource res; 1967 void __iomem *efuse = NULL; 1968 u32 efuse_mac = 0; 1969 u8 efuse_mac_addr[6]; 1970 u32 temp[2]; 1971 int ret = 0; 1972 1973 ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1); 1974 if (!ndev) { 1975 dev_err(dev, "Error allocating netdev\n"); 1976 return -ENOMEM; 1977 } 1978 1979 ndev->features |= NETIF_F_SG; 1980 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1981 ndev->hw_features = ndev->features; 1982 ndev->vlan_features |= NETIF_F_SG; 1983 1984 /* MTU range: 68 - 9486 */ 1985 ndev->min_mtu = ETH_MIN_MTU; 1986 ndev->max_mtu = NETCP_MAX_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); 1987 1988 netcp = netdev_priv(ndev); 1989 spin_lock_init(&netcp->lock); 1990 INIT_LIST_HEAD(&netcp->module_head); 1991 INIT_LIST_HEAD(&netcp->txhook_list_head); 1992 INIT_LIST_HEAD(&netcp->rxhook_list_head); 1993 INIT_LIST_HEAD(&netcp->addr_list); 1994 u64_stats_init(&netcp->stats.syncp_rx); 1995 u64_stats_init(&netcp->stats.syncp_tx); 1996 netcp->netcp_device = netcp_device; 1997 netcp->dev = netcp_device->device; 1998 netcp->ndev = ndev; 1999 netcp->ndev_dev = &ndev->dev; 2000 netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG); 2001 netcp->tx_pause_threshold = MAX_SKB_FRAGS; 2002 netcp->tx_resume_threshold = netcp->tx_pause_threshold; 2003 netcp->node_interface = node_interface; 2004 2005 ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac); 2006 if (efuse_mac) { 2007 if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) { 2008 dev_err(dev, "could not find efuse-mac reg resource\n"); 2009 ret = -ENODEV; 2010 goto quit; 2011 } 2012 size = resource_size(&res); 2013 2014 if (!devm_request_mem_region(dev, res.start, size, 2015 dev_name(dev))) { 2016 dev_err(dev, "could not reserve resource\n"); 2017 ret = -ENOMEM; 2018 goto quit; 2019 } 2020 2021 efuse = devm_ioremap(dev, res.start, size); 2022 if (!efuse) { 2023 dev_err(dev, "could not map resource\n"); 2024 devm_release_mem_region(dev, res.start, size); 2025 ret = -ENOMEM; 2026 goto quit; 2027 } 2028 2029 emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac); 2030 if (is_valid_ether_addr(efuse_mac_addr)) 2031 ether_addr_copy(ndev->dev_addr, efuse_mac_addr); 2032 else 2033 eth_random_addr(ndev->dev_addr); 2034 2035 devm_iounmap(dev, efuse); 2036 devm_release_mem_region(dev, res.start, size); 2037 } else { 2038 ret = of_get_mac_address(node_interface, ndev->dev_addr); 2039 if (ret) 2040 eth_random_addr(ndev->dev_addr); 2041 } 2042 2043 ret = of_property_read_string(node_interface, "rx-channel", 2044 &netcp->dma_chan_name); 2045 if (ret < 0) { 2046 dev_err(dev, "missing \"rx-channel\" parameter\n"); 2047 ret = -ENODEV; 2048 goto quit; 2049 } 2050 2051 ret = of_property_read_u32(node_interface, "rx-queue", 2052 &netcp->rx_queue_id); 2053 if (ret < 0) { 2054 dev_warn(dev, "missing \"rx-queue\" parameter\n"); 2055 netcp->rx_queue_id = KNAV_QUEUE_QPEND; 2056 } 2057 2058 ret = of_property_read_u32_array(node_interface, "rx-queue-depth", 2059 netcp->rx_queue_depths, 2060 KNAV_DMA_FDQ_PER_CHAN); 2061 if (ret < 0) { 2062 dev_err(dev, "missing \"rx-queue-depth\" parameter\n"); 2063 netcp->rx_queue_depths[0] = 128; 2064 } 2065 2066 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2); 2067 if (ret < 0) { 2068 dev_err(dev, "missing \"rx-pool\" parameter\n"); 2069 ret = -ENODEV; 2070 goto quit; 2071 } 2072 netcp->rx_pool_size = temp[0]; 2073 netcp->rx_pool_region_id = temp[1]; 2074 2075 ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2); 2076 if (ret < 0) { 2077 dev_err(dev, "missing \"tx-pool\" parameter\n"); 2078 ret = -ENODEV; 2079 goto quit; 2080 } 2081 netcp->tx_pool_size = temp[0]; 2082 netcp->tx_pool_region_id = temp[1]; 2083 2084 if (netcp->tx_pool_size < MAX_SKB_FRAGS) { 2085 dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n", 2086 MAX_SKB_FRAGS); 2087 ret = -ENODEV; 2088 goto quit; 2089 } 2090 2091 ret = of_property_read_u32(node_interface, "tx-completion-queue", 2092 &netcp->tx_compl_qid); 2093 if (ret < 0) { 2094 dev_warn(dev, "missing \"tx-completion-queue\" parameter\n"); 2095 netcp->tx_compl_qid = KNAV_QUEUE_QPEND; 2096 } 2097 2098 /* NAPI register */ 2099 netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT); 2100 netif_tx_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT); 2101 2102 /* Register the network device */ 2103 ndev->dev_id = 0; 2104 ndev->watchdog_timeo = NETCP_TX_TIMEOUT; 2105 ndev->netdev_ops = &netcp_netdev_ops; 2106 SET_NETDEV_DEV(ndev, dev); 2107 2108 list_add_tail(&netcp->interface_list, &netcp_device->interface_head); 2109 return 0; 2110 2111 quit: 2112 free_netdev(ndev); 2113 return ret; 2114 } 2115 2116 static void netcp_delete_interface(struct netcp_device *netcp_device, 2117 struct net_device *ndev) 2118 { 2119 struct netcp_intf_modpriv *intf_modpriv, *tmp; 2120 struct netcp_intf *netcp = netdev_priv(ndev); 2121 struct netcp_module *module; 2122 2123 dev_dbg(netcp_device->device, "Removing interface \"%s\"\n", 2124 ndev->name); 2125 2126 /* Notify each of the modules that the interface is going away */ 2127 list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head, 2128 intf_list) { 2129 module = intf_modpriv->netcp_module; 2130 dev_dbg(netcp_device->device, "Releasing module \"%s\"\n", 2131 module->name); 2132 if (module->release) 2133 module->release(intf_modpriv->module_priv); 2134 list_del(&intf_modpriv->intf_list); 2135 } 2136 WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n", 2137 ndev->name); 2138 2139 list_del(&netcp->interface_list); 2140 2141 of_node_put(netcp->node_interface); 2142 unregister_netdev(ndev); 2143 free_netdev(ndev); 2144 } 2145 2146 static int netcp_probe(struct platform_device *pdev) 2147 { 2148 struct device_node *node = pdev->dev.of_node; 2149 struct netcp_intf *netcp_intf, *netcp_tmp; 2150 struct device_node *child, *interfaces; 2151 struct netcp_device *netcp_device; 2152 struct device *dev = &pdev->dev; 2153 struct netcp_module *module; 2154 int ret; 2155 2156 if (!knav_dma_device_ready() || 2157 !knav_qmss_device_ready()) 2158 return -EPROBE_DEFER; 2159 2160 if (!node) { 2161 dev_err(dev, "could not find device info\n"); 2162 return -ENODEV; 2163 } 2164 2165 /* Allocate a new NETCP device instance */ 2166 netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL); 2167 if (!netcp_device) 2168 return -ENOMEM; 2169 2170 pm_runtime_enable(&pdev->dev); 2171 ret = pm_runtime_get_sync(&pdev->dev); 2172 if (ret < 0) { 2173 dev_err(dev, "Failed to enable NETCP power-domain\n"); 2174 pm_runtime_disable(&pdev->dev); 2175 return ret; 2176 } 2177 2178 /* Initialize the NETCP device instance */ 2179 INIT_LIST_HEAD(&netcp_device->interface_head); 2180 INIT_LIST_HEAD(&netcp_device->modpriv_head); 2181 netcp_device->device = dev; 2182 platform_set_drvdata(pdev, netcp_device); 2183 2184 /* create interfaces */ 2185 interfaces = of_get_child_by_name(node, "netcp-interfaces"); 2186 if (!interfaces) { 2187 dev_err(dev, "could not find netcp-interfaces node\n"); 2188 ret = -ENODEV; 2189 goto probe_quit; 2190 } 2191 2192 for_each_available_child_of_node(interfaces, child) { 2193 ret = netcp_create_interface(netcp_device, child); 2194 if (ret) { 2195 dev_err(dev, "could not create interface(%pOFn)\n", 2196 child); 2197 goto probe_quit_interface; 2198 } 2199 } 2200 2201 of_node_put(interfaces); 2202 2203 /* Add the device instance to the list */ 2204 list_add_tail(&netcp_device->device_list, &netcp_devices); 2205 2206 /* Probe & attach any modules already registered */ 2207 mutex_lock(&netcp_modules_lock); 2208 for_each_netcp_module(module) { 2209 ret = netcp_module_probe(netcp_device, module); 2210 if (ret < 0) 2211 dev_err(dev, "module(%s) probe failed\n", module->name); 2212 } 2213 mutex_unlock(&netcp_modules_lock); 2214 return 0; 2215 2216 probe_quit_interface: 2217 list_for_each_entry_safe(netcp_intf, netcp_tmp, 2218 &netcp_device->interface_head, 2219 interface_list) { 2220 netcp_delete_interface(netcp_device, netcp_intf->ndev); 2221 } 2222 2223 of_node_put(interfaces); 2224 2225 probe_quit: 2226 pm_runtime_put_sync(&pdev->dev); 2227 pm_runtime_disable(&pdev->dev); 2228 platform_set_drvdata(pdev, NULL); 2229 return ret; 2230 } 2231 2232 static int netcp_remove(struct platform_device *pdev) 2233 { 2234 struct netcp_device *netcp_device = platform_get_drvdata(pdev); 2235 struct netcp_intf *netcp_intf, *netcp_tmp; 2236 struct netcp_inst_modpriv *inst_modpriv, *tmp; 2237 struct netcp_module *module; 2238 2239 list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head, 2240 inst_list) { 2241 module = inst_modpriv->netcp_module; 2242 dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name); 2243 module->remove(netcp_device, inst_modpriv->module_priv); 2244 list_del(&inst_modpriv->inst_list); 2245 } 2246 2247 /* now that all modules are removed, clean up the interfaces */ 2248 list_for_each_entry_safe(netcp_intf, netcp_tmp, 2249 &netcp_device->interface_head, 2250 interface_list) { 2251 netcp_delete_interface(netcp_device, netcp_intf->ndev); 2252 } 2253 2254 WARN(!list_empty(&netcp_device->interface_head), 2255 "%s interface list not empty!\n", pdev->name); 2256 2257 pm_runtime_put_sync(&pdev->dev); 2258 pm_runtime_disable(&pdev->dev); 2259 platform_set_drvdata(pdev, NULL); 2260 return 0; 2261 } 2262 2263 static const struct of_device_id of_match[] = { 2264 { .compatible = "ti,netcp-1.0", }, 2265 {}, 2266 }; 2267 MODULE_DEVICE_TABLE(of, of_match); 2268 2269 static struct platform_driver netcp_driver = { 2270 .driver = { 2271 .name = "netcp-1.0", 2272 .of_match_table = of_match, 2273 }, 2274 .probe = netcp_probe, 2275 .remove = netcp_remove, 2276 }; 2277 module_platform_driver(netcp_driver); 2278 2279 MODULE_LICENSE("GPL v2"); 2280 MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs"); 2281 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com"); 2282