1 /* Copyright 2008 - 2016 Freescale Semiconductor Inc. 2 * 3 * Redistribution and use in source and binary forms, with or without 4 * modification, are permitted provided that the following conditions are met: 5 * * Redistributions of source code must retain the above copyright 6 * notice, this list of conditions and the following disclaimer. 7 * * Redistributions in binary form must reproduce the above copyright 8 * notice, this list of conditions and the following disclaimer in the 9 * documentation and/or other materials provided with the distribution. 10 * * Neither the name of Freescale Semiconductor nor the 11 * names of its contributors may be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * GNU General Public License ("GPL") as published by the Free Software 16 * Foundation, either version 2 of that License or (at your option) any 17 * later version. 18 * 19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32 33 #include <linux/init.h> 34 #include <linux/module.h> 35 #include <linux/of_platform.h> 36 #include <linux/of_mdio.h> 37 #include <linux/of_net.h> 38 #include <linux/io.h> 39 #include <linux/if_arp.h> 40 #include <linux/if_vlan.h> 41 #include <linux/icmp.h> 42 #include <linux/ip.h> 43 #include <linux/ipv6.h> 44 #include <linux/udp.h> 45 #include <linux/tcp.h> 46 #include <linux/net.h> 47 #include <linux/skbuff.h> 48 #include <linux/etherdevice.h> 49 #include <linux/if_ether.h> 50 #include <linux/highmem.h> 51 #include <linux/percpu.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/sort.h> 54 #include <linux/phy_fixed.h> 55 #include <soc/fsl/bman.h> 56 #include <soc/fsl/qman.h> 57 #include "fman.h" 58 #include "fman_port.h" 59 #include "mac.h" 60 #include "dpaa_eth.h" 61 62 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files 63 * using trace events only need to #include <trace/events/sched.h> 64 */ 65 #define CREATE_TRACE_POINTS 66 #include "dpaa_eth_trace.h" 67 68 static int debug = -1; 69 module_param(debug, int, 0444); 70 MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)"); 71 72 static u16 tx_timeout = 1000; 73 module_param(tx_timeout, ushort, 0444); 74 MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); 75 76 #define FM_FD_STAT_RX_ERRORS \ 77 (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \ 78 FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \ 79 FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \ 80 FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \ 81 FM_FD_ERR_PRS_HDR_ERR) 82 83 #define FM_FD_STAT_TX_ERRORS \ 84 (FM_FD_ERR_UNSUPPORTED_FORMAT | \ 85 FM_FD_ERR_LENGTH | FM_FD_ERR_DMA) 86 87 #define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ 88 NETIF_MSG_LINK | NETIF_MSG_IFUP | \ 89 NETIF_MSG_IFDOWN) 90 91 #define DPAA_INGRESS_CS_THRESHOLD 0x10000000 92 /* Ingress congestion threshold on FMan ports 93 * The size in bytes of the ingress tail-drop threshold on FMan ports. 94 * Traffic piling up above this value will be rejected by QMan and discarded 95 * by FMan. 96 */ 97 98 /* Size in bytes of the FQ taildrop threshold */ 99 #define DPAA_FQ_TD 0x200000 100 101 #define DPAA_CS_THRESHOLD_1G 0x06000000 102 /* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000 103 * The size in bytes of the egress Congestion State notification threshold on 104 * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a 105 * tight loop (e.g. by sending UDP datagrams at "while(1) speed"), 106 * and the larger the frame size, the more acute the problem. 107 * So we have to find a balance between these factors: 108 * - avoiding the device staying congested for a prolonged time (risking 109 * the netdev watchdog to fire - see also the tx_timeout module param); 110 * - affecting performance of protocols such as TCP, which otherwise 111 * behave well under the congestion notification mechanism; 112 * - preventing the Tx cores from tightly-looping (as if the congestion 113 * threshold was too low to be effective); 114 * - running out of memory if the CS threshold is set too high. 115 */ 116 117 #define DPAA_CS_THRESHOLD_10G 0x10000000 118 /* The size in bytes of the egress Congestion State notification threshold on 119 * 10G ports, range 0x1000 .. 0x10000000 120 */ 121 122 /* Largest value that the FQD's OAL field can hold */ 123 #define FSL_QMAN_MAX_OAL 127 124 125 /* Default alignment for start of data in an Rx FD */ 126 #define DPAA_FD_DATA_ALIGNMENT 16 127 128 /* The DPAA requires 256 bytes reserved and mapped for the SGT */ 129 #define DPAA_SGT_SIZE 256 130 131 /* Values for the L3R field of the FM Parse Results 132 */ 133 /* L3 Type field: First IP Present IPv4 */ 134 #define FM_L3_PARSE_RESULT_IPV4 0x8000 135 /* L3 Type field: First IP Present IPv6 */ 136 #define FM_L3_PARSE_RESULT_IPV6 0x4000 137 /* Values for the L4R field of the FM Parse Results */ 138 /* L4 Type field: UDP */ 139 #define FM_L4_PARSE_RESULT_UDP 0x40 140 /* L4 Type field: TCP */ 141 #define FM_L4_PARSE_RESULT_TCP 0x20 142 143 /* FD status field indicating whether the FM Parser has attempted to validate 144 * the L4 csum of the frame. 145 * Note that having this bit set doesn't necessarily imply that the checksum 146 * is valid. One would have to check the parse results to find that out. 147 */ 148 #define FM_FD_STAT_L4CV 0x00000004 149 150 #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ 151 #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */ 152 153 #define FSL_DPAA_BPID_INV 0xff 154 #define FSL_DPAA_ETH_MAX_BUF_COUNT 128 155 #define FSL_DPAA_ETH_REFILL_THRESHOLD 80 156 157 #define DPAA_TX_PRIV_DATA_SIZE 16 158 #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result) 159 #define DPAA_TIME_STAMP_SIZE 8 160 #define DPAA_HASH_RESULTS_SIZE 8 161 #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \ 162 dpaa_rx_extra_headroom) 163 164 #define DPAA_ETH_PCD_RXQ_NUM 128 165 166 #define DPAA_ENQUEUE_RETRIES 100000 167 168 enum port_type {RX, TX}; 169 170 struct fm_port_fqs { 171 struct dpaa_fq *tx_defq; 172 struct dpaa_fq *tx_errq; 173 struct dpaa_fq *rx_defq; 174 struct dpaa_fq *rx_errq; 175 struct dpaa_fq *rx_pcdq; 176 }; 177 178 /* All the dpa bps in use at any moment */ 179 static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS]; 180 181 #define DPAA_BP_RAW_SIZE 4096 182 183 #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size) 184 185 static int dpaa_max_frm; 186 187 static int dpaa_rx_extra_headroom; 188 189 #define dpaa_get_max_mtu() \ 190 (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN)) 191 192 static int dpaa_netdev_init(struct net_device *net_dev, 193 const struct net_device_ops *dpaa_ops, 194 u16 tx_timeout) 195 { 196 struct dpaa_priv *priv = netdev_priv(net_dev); 197 struct device *dev = net_dev->dev.parent; 198 struct dpaa_percpu_priv *percpu_priv; 199 const u8 *mac_addr; 200 int i, err; 201 202 /* Although we access another CPU's private data here 203 * we do it at initialization so it is safe 204 */ 205 for_each_possible_cpu(i) { 206 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 207 percpu_priv->net_dev = net_dev; 208 } 209 210 net_dev->netdev_ops = dpaa_ops; 211 mac_addr = priv->mac_dev->addr; 212 213 net_dev->mem_start = priv->mac_dev->res->start; 214 net_dev->mem_end = priv->mac_dev->res->end; 215 216 net_dev->min_mtu = ETH_MIN_MTU; 217 net_dev->max_mtu = dpaa_get_max_mtu(); 218 219 net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 220 NETIF_F_LLTX | NETIF_F_RXHASH); 221 222 net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; 223 /* The kernels enables GSO automatically, if we declare NETIF_F_SG. 224 * For conformity, we'll still declare GSO explicitly. 225 */ 226 net_dev->features |= NETIF_F_GSO; 227 net_dev->features |= NETIF_F_RXCSUM; 228 229 net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 230 /* we do not want shared skbs on TX */ 231 net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; 232 233 net_dev->features |= net_dev->hw_features; 234 net_dev->vlan_features = net_dev->features; 235 236 memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); 237 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); 238 239 net_dev->ethtool_ops = &dpaa_ethtool_ops; 240 241 net_dev->needed_headroom = priv->tx_headroom; 242 net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); 243 244 /* start without the RUNNING flag, phylib controls it later */ 245 netif_carrier_off(net_dev); 246 247 err = register_netdev(net_dev); 248 if (err < 0) { 249 dev_err(dev, "register_netdev() = %d\n", err); 250 return err; 251 } 252 253 return 0; 254 } 255 256 static int dpaa_stop(struct net_device *net_dev) 257 { 258 struct mac_device *mac_dev; 259 struct dpaa_priv *priv; 260 int i, err, error; 261 262 priv = netdev_priv(net_dev); 263 mac_dev = priv->mac_dev; 264 265 netif_tx_stop_all_queues(net_dev); 266 /* Allow the Fman (Tx) port to process in-flight frames before we 267 * try switching it off. 268 */ 269 msleep(200); 270 271 err = mac_dev->stop(mac_dev); 272 if (err < 0) 273 netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n", 274 err); 275 276 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { 277 error = fman_port_disable(mac_dev->port[i]); 278 if (error) 279 err = error; 280 } 281 282 if (net_dev->phydev) 283 phy_disconnect(net_dev->phydev); 284 net_dev->phydev = NULL; 285 286 msleep(200); 287 288 return err; 289 } 290 291 static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue) 292 { 293 struct dpaa_percpu_priv *percpu_priv; 294 const struct dpaa_priv *priv; 295 296 priv = netdev_priv(net_dev); 297 percpu_priv = this_cpu_ptr(priv->percpu_priv); 298 299 netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n", 300 jiffies_to_msecs(jiffies - dev_trans_start(net_dev))); 301 302 percpu_priv->stats.tx_errors++; 303 } 304 305 /* Calculates the statistics for the given device by adding the statistics 306 * collected by each CPU. 307 */ 308 static void dpaa_get_stats64(struct net_device *net_dev, 309 struct rtnl_link_stats64 *s) 310 { 311 int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); 312 struct dpaa_priv *priv = netdev_priv(net_dev); 313 struct dpaa_percpu_priv *percpu_priv; 314 u64 *netstats = (u64 *)s; 315 u64 *cpustats; 316 int i, j; 317 318 for_each_possible_cpu(i) { 319 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 320 321 cpustats = (u64 *)&percpu_priv->stats; 322 323 /* add stats from all CPUs */ 324 for (j = 0; j < numstats; j++) 325 netstats[j] += cpustats[j]; 326 } 327 } 328 329 static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, 330 void *type_data) 331 { 332 struct dpaa_priv *priv = netdev_priv(net_dev); 333 struct tc_mqprio_qopt *mqprio = type_data; 334 u8 num_tc; 335 int i; 336 337 if (type != TC_SETUP_QDISC_MQPRIO) 338 return -EOPNOTSUPP; 339 340 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 341 num_tc = mqprio->num_tc; 342 343 if (num_tc == priv->num_tc) 344 return 0; 345 346 if (!num_tc) { 347 netdev_reset_tc(net_dev); 348 goto out; 349 } 350 351 if (num_tc > DPAA_TC_NUM) { 352 netdev_err(net_dev, "Too many traffic classes: max %d supported.\n", 353 DPAA_TC_NUM); 354 return -EINVAL; 355 } 356 357 netdev_set_num_tc(net_dev, num_tc); 358 359 for (i = 0; i < num_tc; i++) 360 netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM, 361 i * DPAA_TC_TXQ_NUM); 362 363 out: 364 priv->num_tc = num_tc ? : 1; 365 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); 366 return 0; 367 } 368 369 static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) 370 { 371 struct dpaa_eth_data *eth_data; 372 struct device *dpaa_dev; 373 struct mac_device *mac_dev; 374 375 dpaa_dev = &pdev->dev; 376 eth_data = dpaa_dev->platform_data; 377 if (!eth_data) { 378 dev_err(dpaa_dev, "eth_data missing\n"); 379 return ERR_PTR(-ENODEV); 380 } 381 mac_dev = eth_data->mac_dev; 382 if (!mac_dev) { 383 dev_err(dpaa_dev, "mac_dev missing\n"); 384 return ERR_PTR(-EINVAL); 385 } 386 387 return mac_dev; 388 } 389 390 static int dpaa_set_mac_address(struct net_device *net_dev, void *addr) 391 { 392 const struct dpaa_priv *priv; 393 struct mac_device *mac_dev; 394 struct sockaddr old_addr; 395 int err; 396 397 priv = netdev_priv(net_dev); 398 399 memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN); 400 401 err = eth_mac_addr(net_dev, addr); 402 if (err < 0) { 403 netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err); 404 return err; 405 } 406 407 mac_dev = priv->mac_dev; 408 409 err = mac_dev->change_addr(mac_dev->fman_mac, 410 (enet_addr_t *)net_dev->dev_addr); 411 if (err < 0) { 412 netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", 413 err); 414 /* reverting to previous address */ 415 eth_mac_addr(net_dev, &old_addr); 416 417 return err; 418 } 419 420 return 0; 421 } 422 423 static void dpaa_set_rx_mode(struct net_device *net_dev) 424 { 425 const struct dpaa_priv *priv; 426 int err; 427 428 priv = netdev_priv(net_dev); 429 430 if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { 431 priv->mac_dev->promisc = !priv->mac_dev->promisc; 432 err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac, 433 priv->mac_dev->promisc); 434 if (err < 0) 435 netif_err(priv, drv, net_dev, 436 "mac_dev->set_promisc() = %d\n", 437 err); 438 } 439 440 if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) { 441 priv->mac_dev->allmulti = !priv->mac_dev->allmulti; 442 err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac, 443 priv->mac_dev->allmulti); 444 if (err < 0) 445 netif_err(priv, drv, net_dev, 446 "mac_dev->set_allmulti() = %d\n", 447 err); 448 } 449 450 err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); 451 if (err < 0) 452 netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", 453 err); 454 } 455 456 static struct dpaa_bp *dpaa_bpid2pool(int bpid) 457 { 458 if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS)) 459 return NULL; 460 461 return dpaa_bp_array[bpid]; 462 } 463 464 /* checks if this bpool is already allocated */ 465 static bool dpaa_bpid2pool_use(int bpid) 466 { 467 if (dpaa_bpid2pool(bpid)) { 468 refcount_inc(&dpaa_bp_array[bpid]->refs); 469 return true; 470 } 471 472 return false; 473 } 474 475 /* called only once per bpid by dpaa_bp_alloc_pool() */ 476 static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp) 477 { 478 dpaa_bp_array[bpid] = dpaa_bp; 479 refcount_set(&dpaa_bp->refs, 1); 480 } 481 482 static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp) 483 { 484 int err; 485 486 if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) { 487 pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n", 488 __func__); 489 return -EINVAL; 490 } 491 492 /* If the pool is already specified, we only create one per bpid */ 493 if (dpaa_bp->bpid != FSL_DPAA_BPID_INV && 494 dpaa_bpid2pool_use(dpaa_bp->bpid)) 495 return 0; 496 497 if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) { 498 dpaa_bp->pool = bman_new_pool(); 499 if (!dpaa_bp->pool) { 500 pr_err("%s: bman_new_pool() failed\n", 501 __func__); 502 return -ENODEV; 503 } 504 505 dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool); 506 } 507 508 if (dpaa_bp->seed_cb) { 509 err = dpaa_bp->seed_cb(dpaa_bp); 510 if (err) 511 goto pool_seed_failed; 512 } 513 514 dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp); 515 516 return 0; 517 518 pool_seed_failed: 519 pr_err("%s: pool seeding failed\n", __func__); 520 bman_free_pool(dpaa_bp->pool); 521 522 return err; 523 } 524 525 /* remove and free all the buffers from the given buffer pool */ 526 static void dpaa_bp_drain(struct dpaa_bp *bp) 527 { 528 u8 num = 8; 529 int ret; 530 531 do { 532 struct bm_buffer bmb[8]; 533 int i; 534 535 ret = bman_acquire(bp->pool, bmb, num); 536 if (ret < 0) { 537 if (num == 8) { 538 /* we have less than 8 buffers left; 539 * drain them one by one 540 */ 541 num = 1; 542 ret = 1; 543 continue; 544 } else { 545 /* Pool is fully drained */ 546 break; 547 } 548 } 549 550 if (bp->free_buf_cb) 551 for (i = 0; i < num; i++) 552 bp->free_buf_cb(bp, &bmb[i]); 553 } while (ret > 0); 554 } 555 556 static void dpaa_bp_free(struct dpaa_bp *dpaa_bp) 557 { 558 struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid); 559 560 /* the mapping between bpid and dpaa_bp is done very late in the 561 * allocation procedure; if something failed before the mapping, the bp 562 * was not configured, therefore we don't need the below instructions 563 */ 564 if (!bp) 565 return; 566 567 if (!refcount_dec_and_test(&bp->refs)) 568 return; 569 570 if (bp->free_buf_cb) 571 dpaa_bp_drain(bp); 572 573 dpaa_bp_array[bp->bpid] = NULL; 574 bman_free_pool(bp->pool); 575 } 576 577 static void dpaa_bps_free(struct dpaa_priv *priv) 578 { 579 dpaa_bp_free(priv->dpaa_bp); 580 } 581 582 /* Use multiple WQs for FQ assignment: 583 * - Tx Confirmation queues go to WQ1. 584 * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance 585 * to be scheduled, in case there are many more FQs in WQ6). 586 * - Rx Default goes to WQ6. 587 * - Tx queues go to different WQs depending on their priority. Equal 588 * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and 589 * WQ0 (highest priority). 590 * This ensures that Tx-confirmed buffers are timely released. In particular, 591 * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they 592 * are greatly outnumbered by other FQs in the system, while 593 * dequeue scheduling is round-robin. 594 */ 595 static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) 596 { 597 switch (fq->fq_type) { 598 case FQ_TYPE_TX_CONFIRM: 599 case FQ_TYPE_TX_CONF_MQ: 600 fq->wq = 1; 601 break; 602 case FQ_TYPE_RX_ERROR: 603 case FQ_TYPE_TX_ERROR: 604 fq->wq = 5; 605 break; 606 case FQ_TYPE_RX_DEFAULT: 607 case FQ_TYPE_RX_PCD: 608 fq->wq = 6; 609 break; 610 case FQ_TYPE_TX: 611 switch (idx / DPAA_TC_TXQ_NUM) { 612 case 0: 613 /* Low priority (best effort) */ 614 fq->wq = 6; 615 break; 616 case 1: 617 /* Medium priority */ 618 fq->wq = 2; 619 break; 620 case 2: 621 /* High priority */ 622 fq->wq = 1; 623 break; 624 case 3: 625 /* Very high priority */ 626 fq->wq = 0; 627 break; 628 default: 629 WARN(1, "Too many TX FQs: more than %d!\n", 630 DPAA_ETH_TXQ_NUM); 631 } 632 break; 633 default: 634 WARN(1, "Invalid FQ type %d for FQID %d!\n", 635 fq->fq_type, fq->fqid); 636 } 637 } 638 639 static struct dpaa_fq *dpaa_fq_alloc(struct device *dev, 640 u32 start, u32 count, 641 struct list_head *list, 642 enum dpaa_fq_type fq_type) 643 { 644 struct dpaa_fq *dpaa_fq; 645 int i; 646 647 dpaa_fq = devm_kcalloc(dev, count, sizeof(*dpaa_fq), 648 GFP_KERNEL); 649 if (!dpaa_fq) 650 return NULL; 651 652 for (i = 0; i < count; i++) { 653 dpaa_fq[i].fq_type = fq_type; 654 dpaa_fq[i].fqid = start ? start + i : 0; 655 list_add_tail(&dpaa_fq[i].list, list); 656 } 657 658 for (i = 0; i < count; i++) 659 dpaa_assign_wq(dpaa_fq + i, i); 660 661 return dpaa_fq; 662 } 663 664 static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, 665 struct fm_port_fqs *port_fqs) 666 { 667 struct dpaa_fq *dpaa_fq; 668 u32 fq_base, fq_base_aligned, i; 669 670 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR); 671 if (!dpaa_fq) 672 goto fq_alloc_failed; 673 674 port_fqs->rx_errq = &dpaa_fq[0]; 675 676 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT); 677 if (!dpaa_fq) 678 goto fq_alloc_failed; 679 680 port_fqs->rx_defq = &dpaa_fq[0]; 681 682 /* the PCD FQIDs range needs to be aligned for correct operation */ 683 if (qman_alloc_fqid_range(&fq_base, 2 * DPAA_ETH_PCD_RXQ_NUM)) 684 goto fq_alloc_failed; 685 686 fq_base_aligned = ALIGN(fq_base, DPAA_ETH_PCD_RXQ_NUM); 687 688 for (i = fq_base; i < fq_base_aligned; i++) 689 qman_release_fqid(i); 690 691 for (i = fq_base_aligned + DPAA_ETH_PCD_RXQ_NUM; 692 i < (fq_base + 2 * DPAA_ETH_PCD_RXQ_NUM); i++) 693 qman_release_fqid(i); 694 695 dpaa_fq = dpaa_fq_alloc(dev, fq_base_aligned, DPAA_ETH_PCD_RXQ_NUM, 696 list, FQ_TYPE_RX_PCD); 697 if (!dpaa_fq) 698 goto fq_alloc_failed; 699 700 port_fqs->rx_pcdq = &dpaa_fq[0]; 701 702 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ)) 703 goto fq_alloc_failed; 704 705 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR); 706 if (!dpaa_fq) 707 goto fq_alloc_failed; 708 709 port_fqs->tx_errq = &dpaa_fq[0]; 710 711 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM); 712 if (!dpaa_fq) 713 goto fq_alloc_failed; 714 715 port_fqs->tx_defq = &dpaa_fq[0]; 716 717 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX)) 718 goto fq_alloc_failed; 719 720 return 0; 721 722 fq_alloc_failed: 723 dev_err(dev, "dpaa_fq_alloc() failed\n"); 724 return -ENOMEM; 725 } 726 727 static u32 rx_pool_channel; 728 static DEFINE_SPINLOCK(rx_pool_channel_init); 729 730 static int dpaa_get_channel(void) 731 { 732 spin_lock(&rx_pool_channel_init); 733 if (!rx_pool_channel) { 734 u32 pool; 735 int ret; 736 737 ret = qman_alloc_pool(&pool); 738 739 if (!ret) 740 rx_pool_channel = pool; 741 } 742 spin_unlock(&rx_pool_channel_init); 743 if (!rx_pool_channel) 744 return -ENOMEM; 745 return rx_pool_channel; 746 } 747 748 static void dpaa_release_channel(void) 749 { 750 qman_release_pool(rx_pool_channel); 751 } 752 753 static void dpaa_eth_add_channel(u16 channel, struct device *dev) 754 { 755 u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel); 756 const cpumask_t *cpus = qman_affine_cpus(); 757 struct qman_portal *portal; 758 int cpu; 759 760 for_each_cpu_and(cpu, cpus, cpu_online_mask) { 761 portal = qman_get_affine_portal(cpu); 762 qman_p_static_dequeue_add(portal, pool); 763 qman_start_using_portal(portal, dev); 764 } 765 } 766 767 /* Congestion group state change notification callback. 768 * Stops the device's egress queues while they are congested and 769 * wakes them upon exiting congested state. 770 * Also updates some CGR-related stats. 771 */ 772 static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr, 773 int congested) 774 { 775 struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr, 776 struct dpaa_priv, cgr_data.cgr); 777 778 if (congested) { 779 priv->cgr_data.congestion_start_jiffies = jiffies; 780 netif_tx_stop_all_queues(priv->net_dev); 781 priv->cgr_data.cgr_congested_count++; 782 } else { 783 priv->cgr_data.congested_jiffies += 784 (jiffies - priv->cgr_data.congestion_start_jiffies); 785 netif_tx_wake_all_queues(priv->net_dev); 786 } 787 } 788 789 static int dpaa_eth_cgr_init(struct dpaa_priv *priv) 790 { 791 struct qm_mcc_initcgr initcgr; 792 u32 cs_th; 793 int err; 794 795 err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid); 796 if (err < 0) { 797 if (netif_msg_drv(priv)) 798 pr_err("%s: Error %d allocating CGR ID\n", 799 __func__, err); 800 goto out_error; 801 } 802 priv->cgr_data.cgr.cb = dpaa_eth_cgscn; 803 804 /* Enable Congestion State Change Notifications and CS taildrop */ 805 memset(&initcgr, 0, sizeof(initcgr)); 806 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES); 807 initcgr.cgr.cscn_en = QM_CGR_EN; 808 809 /* Set different thresholds based on the MAC speed. 810 * This may turn suboptimal if the MAC is reconfigured at a speed 811 * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link. 812 * In such cases, we ought to reconfigure the threshold, too. 813 */ 814 if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) 815 cs_th = DPAA_CS_THRESHOLD_10G; 816 else 817 cs_th = DPAA_CS_THRESHOLD_1G; 818 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); 819 820 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); 821 initcgr.cgr.cstd_en = QM_CGR_EN; 822 823 err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, 824 &initcgr); 825 if (err < 0) { 826 if (netif_msg_drv(priv)) 827 pr_err("%s: Error %d creating CGR with ID %d\n", 828 __func__, err, priv->cgr_data.cgr.cgrid); 829 qman_release_cgrid(priv->cgr_data.cgr.cgrid); 830 goto out_error; 831 } 832 if (netif_msg_drv(priv)) 833 pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n", 834 priv->cgr_data.cgr.cgrid, priv->mac_dev->addr, 835 priv->cgr_data.cgr.chan); 836 837 out_error: 838 return err; 839 } 840 841 static inline void dpaa_setup_ingress(const struct dpaa_priv *priv, 842 struct dpaa_fq *fq, 843 const struct qman_fq *template) 844 { 845 fq->fq_base = *template; 846 fq->net_dev = priv->net_dev; 847 848 fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; 849 fq->channel = priv->channel; 850 } 851 852 static inline void dpaa_setup_egress(const struct dpaa_priv *priv, 853 struct dpaa_fq *fq, 854 struct fman_port *port, 855 const struct qman_fq *template) 856 { 857 fq->fq_base = *template; 858 fq->net_dev = priv->net_dev; 859 860 if (port) { 861 fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; 862 fq->channel = (u16)fman_port_get_qman_channel_id(port); 863 } else { 864 fq->flags = QMAN_FQ_FLAG_NO_MODIFY; 865 } 866 } 867 868 static void dpaa_fq_setup(struct dpaa_priv *priv, 869 const struct dpaa_fq_cbs *fq_cbs, 870 struct fman_port *tx_port) 871 { 872 int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu; 873 const cpumask_t *affine_cpus = qman_affine_cpus(); 874 u16 channels[NR_CPUS]; 875 struct dpaa_fq *fq; 876 877 for_each_cpu_and(cpu, affine_cpus, cpu_online_mask) 878 channels[num_portals++] = qman_affine_channel(cpu); 879 880 if (num_portals == 0) 881 dev_err(priv->net_dev->dev.parent, 882 "No Qman software (affine) channels found\n"); 883 884 /* Initialize each FQ in the list */ 885 list_for_each_entry(fq, &priv->dpaa_fq_list, list) { 886 switch (fq->fq_type) { 887 case FQ_TYPE_RX_DEFAULT: 888 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); 889 break; 890 case FQ_TYPE_RX_ERROR: 891 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq); 892 break; 893 case FQ_TYPE_RX_PCD: 894 if (!num_portals) 895 continue; 896 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); 897 fq->channel = channels[portal_cnt++ % num_portals]; 898 break; 899 case FQ_TYPE_TX: 900 dpaa_setup_egress(priv, fq, tx_port, 901 &fq_cbs->egress_ern); 902 /* If we have more Tx queues than the number of cores, 903 * just ignore the extra ones. 904 */ 905 if (egress_cnt < DPAA_ETH_TXQ_NUM) 906 priv->egress_fqs[egress_cnt++] = &fq->fq_base; 907 break; 908 case FQ_TYPE_TX_CONF_MQ: 909 priv->conf_fqs[conf_cnt++] = &fq->fq_base; 910 /* fall through */ 911 case FQ_TYPE_TX_CONFIRM: 912 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq); 913 break; 914 case FQ_TYPE_TX_ERROR: 915 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq); 916 break; 917 default: 918 dev_warn(priv->net_dev->dev.parent, 919 "Unknown FQ type detected!\n"); 920 break; 921 } 922 } 923 924 /* Make sure all CPUs receive a corresponding Tx queue. */ 925 while (egress_cnt < DPAA_ETH_TXQ_NUM) { 926 list_for_each_entry(fq, &priv->dpaa_fq_list, list) { 927 if (fq->fq_type != FQ_TYPE_TX) 928 continue; 929 priv->egress_fqs[egress_cnt++] = &fq->fq_base; 930 if (egress_cnt == DPAA_ETH_TXQ_NUM) 931 break; 932 } 933 } 934 } 935 936 static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, 937 struct qman_fq *tx_fq) 938 { 939 int i; 940 941 for (i = 0; i < DPAA_ETH_TXQ_NUM; i++) 942 if (priv->egress_fqs[i] == tx_fq) 943 return i; 944 945 return -EINVAL; 946 } 947 948 static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable) 949 { 950 const struct dpaa_priv *priv; 951 struct qman_fq *confq = NULL; 952 struct qm_mcc_initfq initfq; 953 struct device *dev; 954 struct qman_fq *fq; 955 int queue_id; 956 int err; 957 958 priv = netdev_priv(dpaa_fq->net_dev); 959 dev = dpaa_fq->net_dev->dev.parent; 960 961 if (dpaa_fq->fqid == 0) 962 dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; 963 964 dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); 965 966 err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base); 967 if (err) { 968 dev_err(dev, "qman_create_fq() failed\n"); 969 return err; 970 } 971 fq = &dpaa_fq->fq_base; 972 973 if (dpaa_fq->init) { 974 memset(&initfq, 0, sizeof(initfq)); 975 976 initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL); 977 /* Note: we may get to keep an empty FQ in cache */ 978 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE); 979 980 /* Try to reduce the number of portal interrupts for 981 * Tx Confirmation FQs. 982 */ 983 if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM) 984 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK); 985 986 /* FQ placement */ 987 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ); 988 989 qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq); 990 991 /* Put all egress queues in a congestion group of their own. 992 * Sensu stricto, the Tx confirmation queues are Rx FQs, 993 * rather than Tx - but they nonetheless account for the 994 * memory footprint on behalf of egress traffic. We therefore 995 * place them in the netdev's CGR, along with the Tx FQs. 996 */ 997 if (dpaa_fq->fq_type == FQ_TYPE_TX || 998 dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM || 999 dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { 1000 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); 1001 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); 1002 initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid; 1003 /* Set a fixed overhead accounting, in an attempt to 1004 * reduce the impact of fixed-size skb shells and the 1005 * driver's needed headroom on system memory. This is 1006 * especially the case when the egress traffic is 1007 * composed of small datagrams. 1008 * Unfortunately, QMan's OAL value is capped to an 1009 * insufficient value, but even that is better than 1010 * no overhead accounting at all. 1011 */ 1012 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); 1013 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); 1014 qm_fqd_set_oal(&initfq.fqd, 1015 min(sizeof(struct sk_buff) + 1016 priv->tx_headroom, 1017 (size_t)FSL_QMAN_MAX_OAL)); 1018 } 1019 1020 if (td_enable) { 1021 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH); 1022 qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1); 1023 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE); 1024 } 1025 1026 if (dpaa_fq->fq_type == FQ_TYPE_TX) { 1027 queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base); 1028 if (queue_id >= 0) 1029 confq = priv->conf_fqs[queue_id]; 1030 if (confq) { 1031 initfq.we_mask |= 1032 cpu_to_be16(QM_INITFQ_WE_CONTEXTA); 1033 /* ContextA: OVOM=1(use contextA2 bits instead of ICAD) 1034 * A2V=1 (contextA A2 field is valid) 1035 * A0V=1 (contextA A0 field is valid) 1036 * B0V=1 (contextB field is valid) 1037 * ContextA A2: EBD=1 (deallocate buffers inside FMan) 1038 * ContextB B0(ASPID): 0 (absolute Virtual Storage ID) 1039 */ 1040 qm_fqd_context_a_set64(&initfq.fqd, 1041 0x1e00000080000000ULL); 1042 } 1043 } 1044 1045 /* Put all the ingress queues in our "ingress CGR". */ 1046 if (priv->use_ingress_cgr && 1047 (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || 1048 dpaa_fq->fq_type == FQ_TYPE_RX_ERROR || 1049 dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) { 1050 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); 1051 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); 1052 initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid; 1053 /* Set a fixed overhead accounting, just like for the 1054 * egress CGR. 1055 */ 1056 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); 1057 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); 1058 qm_fqd_set_oal(&initfq.fqd, 1059 min(sizeof(struct sk_buff) + 1060 priv->tx_headroom, 1061 (size_t)FSL_QMAN_MAX_OAL)); 1062 } 1063 1064 /* Initialization common to all ingress queues */ 1065 if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { 1066 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA); 1067 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE | 1068 QM_FQCTRL_CTXASTASHING); 1069 initfq.fqd.context_a.stashing.exclusive = 1070 QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | 1071 QM_STASHING_EXCL_ANNOTATION; 1072 qm_fqd_set_stashing(&initfq.fqd, 1, 2, 1073 DIV_ROUND_UP(sizeof(struct qman_fq), 1074 64)); 1075 } 1076 1077 err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); 1078 if (err < 0) { 1079 dev_err(dev, "qman_init_fq(%u) = %d\n", 1080 qman_fq_fqid(fq), err); 1081 qman_destroy_fq(fq); 1082 return err; 1083 } 1084 } 1085 1086 dpaa_fq->fqid = qman_fq_fqid(fq); 1087 1088 return 0; 1089 } 1090 1091 static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq) 1092 { 1093 const struct dpaa_priv *priv; 1094 struct dpaa_fq *dpaa_fq; 1095 int err, error; 1096 1097 err = 0; 1098 1099 dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); 1100 priv = netdev_priv(dpaa_fq->net_dev); 1101 1102 if (dpaa_fq->init) { 1103 err = qman_retire_fq(fq, NULL); 1104 if (err < 0 && netif_msg_drv(priv)) 1105 dev_err(dev, "qman_retire_fq(%u) = %d\n", 1106 qman_fq_fqid(fq), err); 1107 1108 error = qman_oos_fq(fq); 1109 if (error < 0 && netif_msg_drv(priv)) { 1110 dev_err(dev, "qman_oos_fq(%u) = %d\n", 1111 qman_fq_fqid(fq), error); 1112 if (err >= 0) 1113 err = error; 1114 } 1115 } 1116 1117 qman_destroy_fq(fq); 1118 list_del(&dpaa_fq->list); 1119 1120 return err; 1121 } 1122 1123 static int dpaa_fq_free(struct device *dev, struct list_head *list) 1124 { 1125 struct dpaa_fq *dpaa_fq, *tmp; 1126 int err, error; 1127 1128 err = 0; 1129 list_for_each_entry_safe(dpaa_fq, tmp, list, list) { 1130 error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq); 1131 if (error < 0 && err >= 0) 1132 err = error; 1133 } 1134 1135 return err; 1136 } 1137 1138 static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, 1139 struct dpaa_fq *defq, 1140 struct dpaa_buffer_layout *buf_layout) 1141 { 1142 struct fman_buffer_prefix_content buf_prefix_content; 1143 struct fman_port_params params; 1144 int err; 1145 1146 memset(¶ms, 0, sizeof(params)); 1147 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); 1148 1149 buf_prefix_content.priv_data_size = buf_layout->priv_data_size; 1150 buf_prefix_content.pass_prs_result = true; 1151 buf_prefix_content.pass_hash_result = true; 1152 buf_prefix_content.pass_time_stamp = true; 1153 buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; 1154 1155 params.specific_params.non_rx_params.err_fqid = errq->fqid; 1156 params.specific_params.non_rx_params.dflt_fqid = defq->fqid; 1157 1158 err = fman_port_config(port, ¶ms); 1159 if (err) { 1160 pr_err("%s: fman_port_config failed\n", __func__); 1161 return err; 1162 } 1163 1164 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); 1165 if (err) { 1166 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", 1167 __func__); 1168 return err; 1169 } 1170 1171 err = fman_port_init(port); 1172 if (err) 1173 pr_err("%s: fm_port_init failed\n", __func__); 1174 1175 return err; 1176 } 1177 1178 static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp, 1179 struct dpaa_fq *errq, 1180 struct dpaa_fq *defq, struct dpaa_fq *pcdq, 1181 struct dpaa_buffer_layout *buf_layout) 1182 { 1183 struct fman_buffer_prefix_content buf_prefix_content; 1184 struct fman_port_rx_params *rx_p; 1185 struct fman_port_params params; 1186 int err; 1187 1188 memset(¶ms, 0, sizeof(params)); 1189 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); 1190 1191 buf_prefix_content.priv_data_size = buf_layout->priv_data_size; 1192 buf_prefix_content.pass_prs_result = true; 1193 buf_prefix_content.pass_hash_result = true; 1194 buf_prefix_content.pass_time_stamp = true; 1195 buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; 1196 1197 rx_p = ¶ms.specific_params.rx_params; 1198 rx_p->err_fqid = errq->fqid; 1199 rx_p->dflt_fqid = defq->fqid; 1200 if (pcdq) { 1201 rx_p->pcd_base_fqid = pcdq->fqid; 1202 rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM; 1203 } 1204 1205 rx_p->ext_buf_pools.num_of_pools_used = 1; 1206 rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid; 1207 rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size; 1208 1209 err = fman_port_config(port, ¶ms); 1210 if (err) { 1211 pr_err("%s: fman_port_config failed\n", __func__); 1212 return err; 1213 } 1214 1215 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); 1216 if (err) { 1217 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", 1218 __func__); 1219 return err; 1220 } 1221 1222 err = fman_port_init(port); 1223 if (err) 1224 pr_err("%s: fm_port_init failed\n", __func__); 1225 1226 return err; 1227 } 1228 1229 static int dpaa_eth_init_ports(struct mac_device *mac_dev, 1230 struct dpaa_bp *bp, 1231 struct fm_port_fqs *port_fqs, 1232 struct dpaa_buffer_layout *buf_layout, 1233 struct device *dev) 1234 { 1235 struct fman_port *rxport = mac_dev->port[RX]; 1236 struct fman_port *txport = mac_dev->port[TX]; 1237 int err; 1238 1239 err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, 1240 port_fqs->tx_defq, &buf_layout[TX]); 1241 if (err) 1242 return err; 1243 1244 err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq, 1245 port_fqs->rx_defq, port_fqs->rx_pcdq, 1246 &buf_layout[RX]); 1247 1248 return err; 1249 } 1250 1251 static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp, 1252 struct bm_buffer *bmb, int cnt) 1253 { 1254 int err; 1255 1256 err = bman_release(dpaa_bp->pool, bmb, cnt); 1257 /* Should never occur, address anyway to avoid leaking the buffers */ 1258 if (WARN_ON(err) && dpaa_bp->free_buf_cb) 1259 while (cnt-- > 0) 1260 dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]); 1261 1262 return cnt; 1263 } 1264 1265 static void dpaa_release_sgt_members(struct qm_sg_entry *sgt) 1266 { 1267 struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX]; 1268 struct dpaa_bp *dpaa_bp; 1269 int i = 0, j; 1270 1271 memset(bmb, 0, sizeof(bmb)); 1272 1273 do { 1274 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); 1275 if (!dpaa_bp) 1276 return; 1277 1278 j = 0; 1279 do { 1280 WARN_ON(qm_sg_entry_is_ext(&sgt[i])); 1281 1282 bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i])); 1283 1284 j++; i++; 1285 } while (j < ARRAY_SIZE(bmb) && 1286 !qm_sg_entry_is_final(&sgt[i - 1]) && 1287 sgt[i - 1].bpid == sgt[i].bpid); 1288 1289 dpaa_bman_release(dpaa_bp, bmb, j); 1290 } while (!qm_sg_entry_is_final(&sgt[i - 1])); 1291 } 1292 1293 static void dpaa_fd_release(const struct net_device *net_dev, 1294 const struct qm_fd *fd) 1295 { 1296 struct qm_sg_entry *sgt; 1297 struct dpaa_bp *dpaa_bp; 1298 struct bm_buffer bmb; 1299 dma_addr_t addr; 1300 void *vaddr; 1301 1302 bmb.data = 0; 1303 bm_buffer_set64(&bmb, qm_fd_addr(fd)); 1304 1305 dpaa_bp = dpaa_bpid2pool(fd->bpid); 1306 if (!dpaa_bp) 1307 return; 1308 1309 if (qm_fd_get_format(fd) == qm_fd_sg) { 1310 vaddr = phys_to_virt(qm_fd_addr(fd)); 1311 sgt = vaddr + qm_fd_get_offset(fd); 1312 1313 dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd), 1314 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); 1315 1316 dpaa_release_sgt_members(sgt); 1317 1318 addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, 1319 virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE, 1320 DMA_FROM_DEVICE); 1321 if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) { 1322 netdev_err(net_dev, "DMA mapping failed\n"); 1323 return; 1324 } 1325 bm_buffer_set64(&bmb, addr); 1326 } 1327 1328 dpaa_bman_release(dpaa_bp, &bmb, 1); 1329 } 1330 1331 static void count_ern(struct dpaa_percpu_priv *percpu_priv, 1332 const union qm_mr_entry *msg) 1333 { 1334 switch (msg->ern.rc & QM_MR_RC_MASK) { 1335 case QM_MR_RC_CGR_TAILDROP: 1336 percpu_priv->ern_cnt.cg_tdrop++; 1337 break; 1338 case QM_MR_RC_WRED: 1339 percpu_priv->ern_cnt.wred++; 1340 break; 1341 case QM_MR_RC_ERROR: 1342 percpu_priv->ern_cnt.err_cond++; 1343 break; 1344 case QM_MR_RC_ORPWINDOW_EARLY: 1345 percpu_priv->ern_cnt.early_window++; 1346 break; 1347 case QM_MR_RC_ORPWINDOW_LATE: 1348 percpu_priv->ern_cnt.late_window++; 1349 break; 1350 case QM_MR_RC_FQ_TAILDROP: 1351 percpu_priv->ern_cnt.fq_tdrop++; 1352 break; 1353 case QM_MR_RC_ORPWINDOW_RETIRED: 1354 percpu_priv->ern_cnt.fq_retired++; 1355 break; 1356 case QM_MR_RC_ORP_ZERO: 1357 percpu_priv->ern_cnt.orp_zero++; 1358 break; 1359 } 1360 } 1361 1362 /* Turn on HW checksum computation for this outgoing frame. 1363 * If the current protocol is not something we support in this regard 1364 * (or if the stack has already computed the SW checksum), we do nothing. 1365 * 1366 * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value 1367 * otherwise. 1368 * 1369 * Note that this function may modify the fd->cmd field and the skb data buffer 1370 * (the Parse Results area). 1371 */ 1372 static int dpaa_enable_tx_csum(struct dpaa_priv *priv, 1373 struct sk_buff *skb, 1374 struct qm_fd *fd, 1375 void *parse_results) 1376 { 1377 struct fman_prs_result *parse_result; 1378 u16 ethertype = ntohs(skb->protocol); 1379 struct ipv6hdr *ipv6h = NULL; 1380 struct iphdr *iph; 1381 int retval = 0; 1382 u8 l4_proto; 1383 1384 if (skb->ip_summed != CHECKSUM_PARTIAL) 1385 return 0; 1386 1387 /* Note: L3 csum seems to be already computed in sw, but we can't choose 1388 * L4 alone from the FM configuration anyway. 1389 */ 1390 1391 /* Fill in some fields of the Parse Results array, so the FMan 1392 * can find them as if they came from the FMan Parser. 1393 */ 1394 parse_result = (struct fman_prs_result *)parse_results; 1395 1396 /* If we're dealing with VLAN, get the real Ethernet type */ 1397 if (ethertype == ETH_P_8021Q) { 1398 /* We can't always assume the MAC header is set correctly 1399 * by the stack, so reset to beginning of skb->data 1400 */ 1401 skb_reset_mac_header(skb); 1402 ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); 1403 } 1404 1405 /* Fill in the relevant L3 parse result fields 1406 * and read the L4 protocol type 1407 */ 1408 switch (ethertype) { 1409 case ETH_P_IP: 1410 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4); 1411 iph = ip_hdr(skb); 1412 WARN_ON(!iph); 1413 l4_proto = iph->protocol; 1414 break; 1415 case ETH_P_IPV6: 1416 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6); 1417 ipv6h = ipv6_hdr(skb); 1418 WARN_ON(!ipv6h); 1419 l4_proto = ipv6h->nexthdr; 1420 break; 1421 default: 1422 /* We shouldn't even be here */ 1423 if (net_ratelimit()) 1424 netif_alert(priv, tx_err, priv->net_dev, 1425 "Can't compute HW csum for L3 proto 0x%x\n", 1426 ntohs(skb->protocol)); 1427 retval = -EIO; 1428 goto return_error; 1429 } 1430 1431 /* Fill in the relevant L4 parse result fields */ 1432 switch (l4_proto) { 1433 case IPPROTO_UDP: 1434 parse_result->l4r = FM_L4_PARSE_RESULT_UDP; 1435 break; 1436 case IPPROTO_TCP: 1437 parse_result->l4r = FM_L4_PARSE_RESULT_TCP; 1438 break; 1439 default: 1440 if (net_ratelimit()) 1441 netif_alert(priv, tx_err, priv->net_dev, 1442 "Can't compute HW csum for L4 proto 0x%x\n", 1443 l4_proto); 1444 retval = -EIO; 1445 goto return_error; 1446 } 1447 1448 /* At index 0 is IPOffset_1 as defined in the Parse Results */ 1449 parse_result->ip_off[0] = (u8)skb_network_offset(skb); 1450 parse_result->l4_off = (u8)skb_transport_offset(skb); 1451 1452 /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ 1453 fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC); 1454 1455 /* On P1023 and similar platforms fd->cmd interpretation could 1456 * be disabled by setting CONTEXT_A bit ICMD; currently this bit 1457 * is not set so we do not need to check; in the future, if/when 1458 * using context_a we need to check this bit 1459 */ 1460 1461 return_error: 1462 return retval; 1463 } 1464 1465 static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp) 1466 { 1467 struct net_device *net_dev = dpaa_bp->priv->net_dev; 1468 struct bm_buffer bmb[8]; 1469 dma_addr_t addr; 1470 struct page *p; 1471 u8 i; 1472 1473 for (i = 0; i < 8; i++) { 1474 p = dev_alloc_pages(0); 1475 if (unlikely(!p)) { 1476 netdev_err(net_dev, "dev_alloc_pages() failed\n"); 1477 goto release_previous_buffs; 1478 } 1479 1480 addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0, 1481 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); 1482 if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev, 1483 addr))) { 1484 netdev_err(net_dev, "DMA map failed\n"); 1485 goto release_previous_buffs; 1486 } 1487 1488 bmb[i].data = 0; 1489 bm_buffer_set64(&bmb[i], addr); 1490 } 1491 1492 release_bufs: 1493 return dpaa_bman_release(dpaa_bp, bmb, i); 1494 1495 release_previous_buffs: 1496 WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n"); 1497 1498 bm_buffer_set64(&bmb[i], 0); 1499 /* Avoid releasing a completely null buffer; bman_release() requires 1500 * at least one buffer. 1501 */ 1502 if (likely(i)) 1503 goto release_bufs; 1504 1505 return 0; 1506 } 1507 1508 static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp) 1509 { 1510 int i; 1511 1512 /* Give each CPU an allotment of "config_count" buffers */ 1513 for_each_possible_cpu(i) { 1514 int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i); 1515 int j; 1516 1517 /* Although we access another CPU's counters here 1518 * we do it at boot time so it is safe 1519 */ 1520 for (j = 0; j < dpaa_bp->config_count; j += 8) 1521 *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp); 1522 } 1523 return 0; 1524 } 1525 1526 /* Add buffers/(pages) for Rx processing whenever bpool count falls below 1527 * REFILL_THRESHOLD. 1528 */ 1529 static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr) 1530 { 1531 int count = *countptr; 1532 int new_bufs; 1533 1534 if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) { 1535 do { 1536 new_bufs = dpaa_bp_add_8_bufs(dpaa_bp); 1537 if (unlikely(!new_bufs)) { 1538 /* Avoid looping forever if we've temporarily 1539 * run out of memory. We'll try again at the 1540 * next NAPI cycle. 1541 */ 1542 break; 1543 } 1544 count += new_bufs; 1545 } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT); 1546 1547 *countptr = count; 1548 if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT)) 1549 return -ENOMEM; 1550 } 1551 1552 return 0; 1553 } 1554 1555 static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) 1556 { 1557 struct dpaa_bp *dpaa_bp; 1558 int *countptr; 1559 int res; 1560 1561 dpaa_bp = priv->dpaa_bp; 1562 if (!dpaa_bp) 1563 return -EINVAL; 1564 countptr = this_cpu_ptr(dpaa_bp->percpu_count); 1565 res = dpaa_eth_refill_bpool(dpaa_bp, countptr); 1566 if (res) 1567 return res; 1568 1569 return 0; 1570 } 1571 1572 /* Cleanup function for outgoing frame descriptors that were built on Tx path, 1573 * either contiguous frames or scatter/gather ones. 1574 * Skb freeing is not handled here. 1575 * 1576 * This function may be called on error paths in the Tx function, so guard 1577 * against cases when not all fd relevant fields were filled in. To avoid 1578 * reading the invalid transmission timestamp for the error paths set ts to 1579 * false. 1580 * 1581 * Return the skb backpointer, since for S/G frames the buffer containing it 1582 * gets freed here. 1583 */ 1584 static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, 1585 const struct qm_fd *fd, bool ts) 1586 { 1587 const enum dma_data_direction dma_dir = DMA_TO_DEVICE; 1588 struct device *dev = priv->net_dev->dev.parent; 1589 struct skb_shared_hwtstamps shhwtstamps; 1590 dma_addr_t addr = qm_fd_addr(fd); 1591 void *vaddr = phys_to_virt(addr); 1592 const struct qm_sg_entry *sgt; 1593 struct sk_buff *skb; 1594 u64 ns; 1595 int i; 1596 1597 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { 1598 dma_unmap_page(priv->tx_dma_dev, addr, 1599 qm_fd_get_offset(fd) + DPAA_SGT_SIZE, 1600 dma_dir); 1601 1602 /* The sgt buffer has been allocated with netdev_alloc_frag(), 1603 * it's from lowmem. 1604 */ 1605 sgt = vaddr + qm_fd_get_offset(fd); 1606 1607 /* sgt[0] is from lowmem, was dma_map_single()-ed */ 1608 dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]), 1609 qm_sg_entry_get_len(&sgt[0]), dma_dir); 1610 1611 /* remaining pages were mapped with skb_frag_dma_map() */ 1612 for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) && 1613 !qm_sg_entry_is_final(&sgt[i - 1]); i++) { 1614 WARN_ON(qm_sg_entry_is_ext(&sgt[i])); 1615 1616 dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]), 1617 qm_sg_entry_get_len(&sgt[i]), dma_dir); 1618 } 1619 } else { 1620 dma_unmap_single(priv->tx_dma_dev, addr, 1621 priv->tx_headroom + qm_fd_get_length(fd), 1622 dma_dir); 1623 } 1624 1625 skb = *(struct sk_buff **)vaddr; 1626 1627 /* DMA unmapping is required before accessing the HW provided info */ 1628 if (ts && priv->tx_tstamp && 1629 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 1630 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 1631 1632 if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr, 1633 &ns)) { 1634 shhwtstamps.hwtstamp = ns_to_ktime(ns); 1635 skb_tstamp_tx(skb, &shhwtstamps); 1636 } else { 1637 dev_warn(dev, "fman_port_get_tstamp failed!\n"); 1638 } 1639 } 1640 1641 if (qm_fd_get_format(fd) == qm_fd_sg) 1642 /* Free the page that we allocated on Tx for the SGT */ 1643 free_pages((unsigned long)vaddr, 0); 1644 1645 return skb; 1646 } 1647 1648 static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd) 1649 { 1650 /* The parser has run and performed L4 checksum validation. 1651 * We know there were no parser errors (and implicitly no 1652 * L4 csum error), otherwise we wouldn't be here. 1653 */ 1654 if ((priv->net_dev->features & NETIF_F_RXCSUM) && 1655 (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV)) 1656 return CHECKSUM_UNNECESSARY; 1657 1658 /* We're here because either the parser didn't run or the L4 checksum 1659 * was not verified. This may include the case of a UDP frame with 1660 * checksum zero or an L4 proto other than TCP/UDP 1661 */ 1662 return CHECKSUM_NONE; 1663 } 1664 1665 /* Build a linear skb around the received buffer. 1666 * We are guaranteed there is enough room at the end of the data buffer to 1667 * accommodate the shared info area of the skb. 1668 */ 1669 static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, 1670 const struct qm_fd *fd) 1671 { 1672 ssize_t fd_off = qm_fd_get_offset(fd); 1673 dma_addr_t addr = qm_fd_addr(fd); 1674 struct dpaa_bp *dpaa_bp; 1675 struct sk_buff *skb; 1676 void *vaddr; 1677 1678 vaddr = phys_to_virt(addr); 1679 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); 1680 1681 dpaa_bp = dpaa_bpid2pool(fd->bpid); 1682 if (!dpaa_bp) 1683 goto free_buffer; 1684 1685 skb = build_skb(vaddr, dpaa_bp->size + 1686 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 1687 if (WARN_ONCE(!skb, "Build skb failure on Rx\n")) 1688 goto free_buffer; 1689 WARN_ON(fd_off != priv->rx_headroom); 1690 skb_reserve(skb, fd_off); 1691 skb_put(skb, qm_fd_get_length(fd)); 1692 1693 skb->ip_summed = rx_csum_offload(priv, fd); 1694 1695 return skb; 1696 1697 free_buffer: 1698 free_pages((unsigned long)vaddr, 0); 1699 return NULL; 1700 } 1701 1702 /* Build an skb with the data of the first S/G entry in the linear portion and 1703 * the rest of the frame as skb fragments. 1704 * 1705 * The page fragment holding the S/G Table is recycled here. 1706 */ 1707 static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, 1708 const struct qm_fd *fd) 1709 { 1710 ssize_t fd_off = qm_fd_get_offset(fd); 1711 dma_addr_t addr = qm_fd_addr(fd); 1712 const struct qm_sg_entry *sgt; 1713 struct page *page, *head_page; 1714 struct dpaa_bp *dpaa_bp; 1715 void *vaddr, *sg_vaddr; 1716 int frag_off, frag_len; 1717 struct sk_buff *skb; 1718 dma_addr_t sg_addr; 1719 int page_offset; 1720 unsigned int sz; 1721 int *count_ptr; 1722 int i, j; 1723 1724 vaddr = phys_to_virt(addr); 1725 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); 1726 1727 /* Iterate through the SGT entries and add data buffers to the skb */ 1728 sgt = vaddr + fd_off; 1729 skb = NULL; 1730 for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) { 1731 /* Extension bit is not supported */ 1732 WARN_ON(qm_sg_entry_is_ext(&sgt[i])); 1733 1734 sg_addr = qm_sg_addr(&sgt[i]); 1735 sg_vaddr = phys_to_virt(sg_addr); 1736 WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, 1737 SMP_CACHE_BYTES)); 1738 1739 dma_unmap_page(priv->rx_dma_dev, sg_addr, 1740 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); 1741 1742 /* We may use multiple Rx pools */ 1743 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); 1744 if (!dpaa_bp) 1745 goto free_buffers; 1746 1747 if (!skb) { 1748 sz = dpaa_bp->size + 1749 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1750 skb = build_skb(sg_vaddr, sz); 1751 if (WARN_ON(!skb)) 1752 goto free_buffers; 1753 1754 skb->ip_summed = rx_csum_offload(priv, fd); 1755 1756 /* Make sure forwarded skbs will have enough space 1757 * on Tx, if extra headers are added. 1758 */ 1759 WARN_ON(fd_off != priv->rx_headroom); 1760 skb_reserve(skb, fd_off); 1761 skb_put(skb, qm_sg_entry_get_len(&sgt[i])); 1762 } else { 1763 /* Not the first S/G entry; all data from buffer will 1764 * be added in an skb fragment; fragment index is offset 1765 * by one since first S/G entry was incorporated in the 1766 * linear part of the skb. 1767 * 1768 * Caution: 'page' may be a tail page. 1769 */ 1770 page = virt_to_page(sg_vaddr); 1771 head_page = virt_to_head_page(sg_vaddr); 1772 1773 /* Compute offset in (possibly tail) page */ 1774 page_offset = ((unsigned long)sg_vaddr & 1775 (PAGE_SIZE - 1)) + 1776 (page_address(page) - page_address(head_page)); 1777 /* page_offset only refers to the beginning of sgt[i]; 1778 * but the buffer itself may have an internal offset. 1779 */ 1780 frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset; 1781 frag_len = qm_sg_entry_get_len(&sgt[i]); 1782 /* skb_add_rx_frag() does no checking on the page; if 1783 * we pass it a tail page, we'll end up with 1784 * bad page accounting and eventually with segafults. 1785 */ 1786 skb_add_rx_frag(skb, i - 1, head_page, frag_off, 1787 frag_len, dpaa_bp->size); 1788 } 1789 1790 /* Update the pool count for the current {cpu x bpool} */ 1791 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 1792 (*count_ptr)--; 1793 1794 if (qm_sg_entry_is_final(&sgt[i])) 1795 break; 1796 } 1797 WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); 1798 1799 /* free the SG table buffer */ 1800 free_pages((unsigned long)vaddr, 0); 1801 1802 return skb; 1803 1804 free_buffers: 1805 /* free all the SG entries */ 1806 for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) { 1807 sg_addr = qm_sg_addr(&sgt[j]); 1808 sg_vaddr = phys_to_virt(sg_addr); 1809 /* all pages 0..i were unmaped */ 1810 if (j > i) 1811 dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]), 1812 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); 1813 free_pages((unsigned long)sg_vaddr, 0); 1814 /* counters 0..i-1 were decremented */ 1815 if (j >= i) { 1816 dpaa_bp = dpaa_bpid2pool(sgt[j].bpid); 1817 if (dpaa_bp) { 1818 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 1819 (*count_ptr)--; 1820 } 1821 } 1822 1823 if (qm_sg_entry_is_final(&sgt[j])) 1824 break; 1825 } 1826 /* free the SGT fragment */ 1827 free_pages((unsigned long)vaddr, 0); 1828 1829 return NULL; 1830 } 1831 1832 static int skb_to_contig_fd(struct dpaa_priv *priv, 1833 struct sk_buff *skb, struct qm_fd *fd, 1834 int *offset) 1835 { 1836 struct net_device *net_dev = priv->net_dev; 1837 enum dma_data_direction dma_dir; 1838 unsigned char *buff_start; 1839 struct sk_buff **skbh; 1840 dma_addr_t addr; 1841 int err; 1842 1843 /* We are guaranteed to have at least tx_headroom bytes 1844 * available, so just use that for offset. 1845 */ 1846 fd->bpid = FSL_DPAA_BPID_INV; 1847 buff_start = skb->data - priv->tx_headroom; 1848 dma_dir = DMA_TO_DEVICE; 1849 1850 skbh = (struct sk_buff **)buff_start; 1851 *skbh = skb; 1852 1853 /* Enable L3/L4 hardware checksum computation. 1854 * 1855 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may 1856 * need to write into the skb. 1857 */ 1858 err = dpaa_enable_tx_csum(priv, skb, fd, 1859 buff_start + DPAA_TX_PRIV_DATA_SIZE); 1860 if (unlikely(err < 0)) { 1861 if (net_ratelimit()) 1862 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", 1863 err); 1864 return err; 1865 } 1866 1867 /* Fill in the rest of the FD fields */ 1868 qm_fd_set_contig(fd, priv->tx_headroom, skb->len); 1869 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); 1870 1871 /* Map the entire buffer size that may be seen by FMan, but no more */ 1872 addr = dma_map_single(priv->tx_dma_dev, buff_start, 1873 priv->tx_headroom + skb->len, dma_dir); 1874 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { 1875 if (net_ratelimit()) 1876 netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n"); 1877 return -EINVAL; 1878 } 1879 qm_fd_addr_set64(fd, addr); 1880 1881 return 0; 1882 } 1883 1884 static int skb_to_sg_fd(struct dpaa_priv *priv, 1885 struct sk_buff *skb, struct qm_fd *fd) 1886 { 1887 const enum dma_data_direction dma_dir = DMA_TO_DEVICE; 1888 const int nr_frags = skb_shinfo(skb)->nr_frags; 1889 struct net_device *net_dev = priv->net_dev; 1890 struct qm_sg_entry *sgt; 1891 struct sk_buff **skbh; 1892 void *buff_start; 1893 skb_frag_t *frag; 1894 dma_addr_t addr; 1895 size_t frag_len; 1896 struct page *p; 1897 int i, j, err; 1898 1899 /* get a page to store the SGTable */ 1900 p = dev_alloc_pages(0); 1901 if (unlikely(!p)) { 1902 netdev_err(net_dev, "dev_alloc_pages() failed\n"); 1903 return -ENOMEM; 1904 } 1905 buff_start = page_address(p); 1906 1907 /* Enable L3/L4 hardware checksum computation. 1908 * 1909 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may 1910 * need to write into the skb. 1911 */ 1912 err = dpaa_enable_tx_csum(priv, skb, fd, 1913 buff_start + DPAA_TX_PRIV_DATA_SIZE); 1914 if (unlikely(err < 0)) { 1915 if (net_ratelimit()) 1916 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", 1917 err); 1918 goto csum_failed; 1919 } 1920 1921 /* SGT[0] is used by the linear part */ 1922 sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom); 1923 frag_len = skb_headlen(skb); 1924 qm_sg_entry_set_len(&sgt[0], frag_len); 1925 sgt[0].bpid = FSL_DPAA_BPID_INV; 1926 sgt[0].offset = 0; 1927 addr = dma_map_single(priv->tx_dma_dev, skb->data, 1928 skb_headlen(skb), dma_dir); 1929 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { 1930 netdev_err(priv->net_dev, "DMA mapping failed\n"); 1931 err = -EINVAL; 1932 goto sg0_map_failed; 1933 } 1934 qm_sg_entry_set64(&sgt[0], addr); 1935 1936 /* populate the rest of SGT entries */ 1937 for (i = 0; i < nr_frags; i++) { 1938 frag = &skb_shinfo(skb)->frags[i]; 1939 frag_len = skb_frag_size(frag); 1940 WARN_ON(!skb_frag_page(frag)); 1941 addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0, 1942 frag_len, dma_dir); 1943 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { 1944 netdev_err(priv->net_dev, "DMA mapping failed\n"); 1945 err = -EINVAL; 1946 goto sg_map_failed; 1947 } 1948 1949 qm_sg_entry_set_len(&sgt[i + 1], frag_len); 1950 sgt[i + 1].bpid = FSL_DPAA_BPID_INV; 1951 sgt[i + 1].offset = 0; 1952 1953 /* keep the offset in the address */ 1954 qm_sg_entry_set64(&sgt[i + 1], addr); 1955 } 1956 1957 /* Set the final bit in the last used entry of the SGT */ 1958 qm_sg_entry_set_f(&sgt[nr_frags], frag_len); 1959 1960 /* set fd offset to priv->tx_headroom */ 1961 qm_fd_set_sg(fd, priv->tx_headroom, skb->len); 1962 1963 /* DMA map the SGT page */ 1964 skbh = (struct sk_buff **)buff_start; 1965 *skbh = skb; 1966 1967 addr = dma_map_page(priv->tx_dma_dev, p, 0, 1968 priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); 1969 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { 1970 netdev_err(priv->net_dev, "DMA mapping failed\n"); 1971 err = -EINVAL; 1972 goto sgt_map_failed; 1973 } 1974 1975 fd->bpid = FSL_DPAA_BPID_INV; 1976 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); 1977 qm_fd_addr_set64(fd, addr); 1978 1979 return 0; 1980 1981 sgt_map_failed: 1982 sg_map_failed: 1983 for (j = 0; j < i; j++) 1984 dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]), 1985 qm_sg_entry_get_len(&sgt[j]), dma_dir); 1986 sg0_map_failed: 1987 csum_failed: 1988 free_pages((unsigned long)buff_start, 0); 1989 1990 return err; 1991 } 1992 1993 static inline int dpaa_xmit(struct dpaa_priv *priv, 1994 struct rtnl_link_stats64 *percpu_stats, 1995 int queue, 1996 struct qm_fd *fd) 1997 { 1998 struct qman_fq *egress_fq; 1999 int err, i; 2000 2001 egress_fq = priv->egress_fqs[queue]; 2002 if (fd->bpid == FSL_DPAA_BPID_INV) 2003 fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue])); 2004 2005 /* Trace this Tx fd */ 2006 trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd); 2007 2008 for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) { 2009 err = qman_enqueue(egress_fq, fd); 2010 if (err != -EBUSY) 2011 break; 2012 } 2013 2014 if (unlikely(err < 0)) { 2015 percpu_stats->tx_fifo_errors++; 2016 return err; 2017 } 2018 2019 percpu_stats->tx_packets++; 2020 percpu_stats->tx_bytes += qm_fd_get_length(fd); 2021 2022 return 0; 2023 } 2024 2025 static netdev_tx_t 2026 dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) 2027 { 2028 const int queue_mapping = skb_get_queue_mapping(skb); 2029 bool nonlinear = skb_is_nonlinear(skb); 2030 struct rtnl_link_stats64 *percpu_stats; 2031 struct dpaa_percpu_priv *percpu_priv; 2032 struct netdev_queue *txq; 2033 struct dpaa_priv *priv; 2034 struct qm_fd fd; 2035 int offset = 0; 2036 int err = 0; 2037 2038 priv = netdev_priv(net_dev); 2039 percpu_priv = this_cpu_ptr(priv->percpu_priv); 2040 percpu_stats = &percpu_priv->stats; 2041 2042 qm_fd_clear_fd(&fd); 2043 2044 if (!nonlinear) { 2045 /* We're going to store the skb backpointer at the beginning 2046 * of the data buffer, so we need a privately owned skb 2047 * 2048 * We've made sure skb is not shared in dev->priv_flags, 2049 * we need to verify the skb head is not cloned 2050 */ 2051 if (skb_cow_head(skb, priv->tx_headroom)) 2052 goto enomem; 2053 2054 WARN_ON(skb_is_nonlinear(skb)); 2055 } 2056 2057 /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES; 2058 * make sure we don't feed FMan with more fragments than it supports. 2059 */ 2060 if (unlikely(nonlinear && 2061 (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) { 2062 /* If the egress skb contains more fragments than we support 2063 * we have no choice but to linearize it ourselves. 2064 */ 2065 if (__skb_linearize(skb)) 2066 goto enomem; 2067 2068 nonlinear = skb_is_nonlinear(skb); 2069 } 2070 2071 if (nonlinear) { 2072 /* Just create a S/G fd based on the skb */ 2073 err = skb_to_sg_fd(priv, skb, &fd); 2074 percpu_priv->tx_frag_skbuffs++; 2075 } else { 2076 /* Create a contig FD from this skb */ 2077 err = skb_to_contig_fd(priv, skb, &fd, &offset); 2078 } 2079 if (unlikely(err < 0)) 2080 goto skb_to_fd_failed; 2081 2082 txq = netdev_get_tx_queue(net_dev, queue_mapping); 2083 2084 /* LLTX requires to do our own update of trans_start */ 2085 txq->trans_start = jiffies; 2086 2087 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 2088 fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); 2089 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2090 } 2091 2092 if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) 2093 return NETDEV_TX_OK; 2094 2095 dpaa_cleanup_tx_fd(priv, &fd, false); 2096 skb_to_fd_failed: 2097 enomem: 2098 percpu_stats->tx_errors++; 2099 dev_kfree_skb(skb); 2100 return NETDEV_TX_OK; 2101 } 2102 2103 static void dpaa_rx_error(struct net_device *net_dev, 2104 const struct dpaa_priv *priv, 2105 struct dpaa_percpu_priv *percpu_priv, 2106 const struct qm_fd *fd, 2107 u32 fqid) 2108 { 2109 if (net_ratelimit()) 2110 netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n", 2111 be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS); 2112 2113 percpu_priv->stats.rx_errors++; 2114 2115 if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA) 2116 percpu_priv->rx_errors.dme++; 2117 if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL) 2118 percpu_priv->rx_errors.fpe++; 2119 if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE) 2120 percpu_priv->rx_errors.fse++; 2121 if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR) 2122 percpu_priv->rx_errors.phe++; 2123 2124 dpaa_fd_release(net_dev, fd); 2125 } 2126 2127 static void dpaa_tx_error(struct net_device *net_dev, 2128 const struct dpaa_priv *priv, 2129 struct dpaa_percpu_priv *percpu_priv, 2130 const struct qm_fd *fd, 2131 u32 fqid) 2132 { 2133 struct sk_buff *skb; 2134 2135 if (net_ratelimit()) 2136 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", 2137 be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS); 2138 2139 percpu_priv->stats.tx_errors++; 2140 2141 skb = dpaa_cleanup_tx_fd(priv, fd, false); 2142 dev_kfree_skb(skb); 2143 } 2144 2145 static int dpaa_eth_poll(struct napi_struct *napi, int budget) 2146 { 2147 struct dpaa_napi_portal *np = 2148 container_of(napi, struct dpaa_napi_portal, napi); 2149 2150 int cleaned = qman_p_poll_dqrr(np->p, budget); 2151 2152 if (cleaned < budget) { 2153 napi_complete_done(napi, cleaned); 2154 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); 2155 } else if (np->down) { 2156 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); 2157 } 2158 2159 return cleaned; 2160 } 2161 2162 static void dpaa_tx_conf(struct net_device *net_dev, 2163 const struct dpaa_priv *priv, 2164 struct dpaa_percpu_priv *percpu_priv, 2165 const struct qm_fd *fd, 2166 u32 fqid) 2167 { 2168 struct sk_buff *skb; 2169 2170 if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) { 2171 if (net_ratelimit()) 2172 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", 2173 be32_to_cpu(fd->status) & 2174 FM_FD_STAT_TX_ERRORS); 2175 2176 percpu_priv->stats.tx_errors++; 2177 } 2178 2179 percpu_priv->tx_confirm++; 2180 2181 skb = dpaa_cleanup_tx_fd(priv, fd, true); 2182 2183 consume_skb(skb); 2184 } 2185 2186 static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv, 2187 struct qman_portal *portal) 2188 { 2189 if (unlikely(in_irq() || !in_serving_softirq())) { 2190 /* Disable QMan IRQ and invoke NAPI */ 2191 qman_p_irqsource_remove(portal, QM_PIRQ_DQRI); 2192 2193 percpu_priv->np.p = portal; 2194 napi_schedule(&percpu_priv->np.napi); 2195 percpu_priv->in_interrupt++; 2196 return 1; 2197 } 2198 return 0; 2199 } 2200 2201 static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal, 2202 struct qman_fq *fq, 2203 const struct qm_dqrr_entry *dq) 2204 { 2205 struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); 2206 struct dpaa_percpu_priv *percpu_priv; 2207 struct net_device *net_dev; 2208 struct dpaa_bp *dpaa_bp; 2209 struct dpaa_priv *priv; 2210 2211 net_dev = dpaa_fq->net_dev; 2212 priv = netdev_priv(net_dev); 2213 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); 2214 if (!dpaa_bp) 2215 return qman_cb_dqrr_consume; 2216 2217 percpu_priv = this_cpu_ptr(priv->percpu_priv); 2218 2219 if (dpaa_eth_napi_schedule(percpu_priv, portal)) 2220 return qman_cb_dqrr_stop; 2221 2222 dpaa_eth_refill_bpools(priv); 2223 dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); 2224 2225 return qman_cb_dqrr_consume; 2226 } 2227 2228 static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, 2229 struct qman_fq *fq, 2230 const struct qm_dqrr_entry *dq) 2231 { 2232 struct skb_shared_hwtstamps *shhwtstamps; 2233 struct rtnl_link_stats64 *percpu_stats; 2234 struct dpaa_percpu_priv *percpu_priv; 2235 const struct qm_fd *fd = &dq->fd; 2236 dma_addr_t addr = qm_fd_addr(fd); 2237 enum qm_fd_format fd_format; 2238 struct net_device *net_dev; 2239 u32 fd_status, hash_offset; 2240 struct dpaa_bp *dpaa_bp; 2241 struct dpaa_priv *priv; 2242 unsigned int skb_len; 2243 struct sk_buff *skb; 2244 int *count_ptr; 2245 void *vaddr; 2246 u64 ns; 2247 2248 fd_status = be32_to_cpu(fd->status); 2249 fd_format = qm_fd_get_format(fd); 2250 net_dev = ((struct dpaa_fq *)fq)->net_dev; 2251 priv = netdev_priv(net_dev); 2252 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); 2253 if (!dpaa_bp) 2254 return qman_cb_dqrr_consume; 2255 2256 /* Trace the Rx fd */ 2257 trace_dpaa_rx_fd(net_dev, fq, &dq->fd); 2258 2259 percpu_priv = this_cpu_ptr(priv->percpu_priv); 2260 percpu_stats = &percpu_priv->stats; 2261 2262 if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal))) 2263 return qman_cb_dqrr_stop; 2264 2265 /* Make sure we didn't run out of buffers */ 2266 if (unlikely(dpaa_eth_refill_bpools(priv))) { 2267 /* Unable to refill the buffer pool due to insufficient 2268 * system memory. Just release the frame back into the pool, 2269 * otherwise we'll soon end up with an empty buffer pool. 2270 */ 2271 dpaa_fd_release(net_dev, &dq->fd); 2272 return qman_cb_dqrr_consume; 2273 } 2274 2275 if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) { 2276 if (net_ratelimit()) 2277 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", 2278 fd_status & FM_FD_STAT_RX_ERRORS); 2279 2280 percpu_stats->rx_errors++; 2281 dpaa_fd_release(net_dev, fd); 2282 return qman_cb_dqrr_consume; 2283 } 2284 2285 dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE, 2286 DMA_FROM_DEVICE); 2287 2288 /* prefetch the first 64 bytes of the frame or the SGT start */ 2289 vaddr = phys_to_virt(addr); 2290 prefetch(vaddr + qm_fd_get_offset(fd)); 2291 2292 /* The only FD types that we may receive are contig and S/G */ 2293 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); 2294 2295 /* Account for either the contig buffer or the SGT buffer (depending on 2296 * which case we were in) having been removed from the pool. 2297 */ 2298 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 2299 (*count_ptr)--; 2300 2301 if (likely(fd_format == qm_fd_contig)) 2302 skb = contig_fd_to_skb(priv, fd); 2303 else 2304 skb = sg_fd_to_skb(priv, fd); 2305 if (!skb) 2306 return qman_cb_dqrr_consume; 2307 2308 if (priv->rx_tstamp) { 2309 shhwtstamps = skb_hwtstamps(skb); 2310 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 2311 2312 if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns)) 2313 shhwtstamps->hwtstamp = ns_to_ktime(ns); 2314 else 2315 dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n"); 2316 } 2317 2318 skb->protocol = eth_type_trans(skb, net_dev); 2319 2320 if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && 2321 !fman_port_get_hash_result_offset(priv->mac_dev->port[RX], 2322 &hash_offset)) { 2323 enum pkt_hash_types type; 2324 2325 /* if L4 exists, it was used in the hash generation */ 2326 type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ? 2327 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3; 2328 skb_set_hash(skb, be32_to_cpu(*(u32 *)(vaddr + hash_offset)), 2329 type); 2330 } 2331 2332 skb_len = skb->len; 2333 2334 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) { 2335 percpu_stats->rx_dropped++; 2336 return qman_cb_dqrr_consume; 2337 } 2338 2339 percpu_stats->rx_packets++; 2340 percpu_stats->rx_bytes += skb_len; 2341 2342 return qman_cb_dqrr_consume; 2343 } 2344 2345 static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal, 2346 struct qman_fq *fq, 2347 const struct qm_dqrr_entry *dq) 2348 { 2349 struct dpaa_percpu_priv *percpu_priv; 2350 struct net_device *net_dev; 2351 struct dpaa_priv *priv; 2352 2353 net_dev = ((struct dpaa_fq *)fq)->net_dev; 2354 priv = netdev_priv(net_dev); 2355 2356 percpu_priv = this_cpu_ptr(priv->percpu_priv); 2357 2358 if (dpaa_eth_napi_schedule(percpu_priv, portal)) 2359 return qman_cb_dqrr_stop; 2360 2361 dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); 2362 2363 return qman_cb_dqrr_consume; 2364 } 2365 2366 static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal, 2367 struct qman_fq *fq, 2368 const struct qm_dqrr_entry *dq) 2369 { 2370 struct dpaa_percpu_priv *percpu_priv; 2371 struct net_device *net_dev; 2372 struct dpaa_priv *priv; 2373 2374 net_dev = ((struct dpaa_fq *)fq)->net_dev; 2375 priv = netdev_priv(net_dev); 2376 2377 /* Trace the fd */ 2378 trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd); 2379 2380 percpu_priv = this_cpu_ptr(priv->percpu_priv); 2381 2382 if (dpaa_eth_napi_schedule(percpu_priv, portal)) 2383 return qman_cb_dqrr_stop; 2384 2385 dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); 2386 2387 return qman_cb_dqrr_consume; 2388 } 2389 2390 static void egress_ern(struct qman_portal *portal, 2391 struct qman_fq *fq, 2392 const union qm_mr_entry *msg) 2393 { 2394 const struct qm_fd *fd = &msg->ern.fd; 2395 struct dpaa_percpu_priv *percpu_priv; 2396 const struct dpaa_priv *priv; 2397 struct net_device *net_dev; 2398 struct sk_buff *skb; 2399 2400 net_dev = ((struct dpaa_fq *)fq)->net_dev; 2401 priv = netdev_priv(net_dev); 2402 percpu_priv = this_cpu_ptr(priv->percpu_priv); 2403 2404 percpu_priv->stats.tx_dropped++; 2405 percpu_priv->stats.tx_fifo_errors++; 2406 count_ern(percpu_priv, msg); 2407 2408 skb = dpaa_cleanup_tx_fd(priv, fd, false); 2409 dev_kfree_skb_any(skb); 2410 } 2411 2412 static const struct dpaa_fq_cbs dpaa_fq_cbs = { 2413 .rx_defq = { .cb = { .dqrr = rx_default_dqrr } }, 2414 .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } }, 2415 .rx_errq = { .cb = { .dqrr = rx_error_dqrr } }, 2416 .tx_errq = { .cb = { .dqrr = conf_error_dqrr } }, 2417 .egress_ern = { .cb = { .ern = egress_ern } } 2418 }; 2419 2420 static void dpaa_eth_napi_enable(struct dpaa_priv *priv) 2421 { 2422 struct dpaa_percpu_priv *percpu_priv; 2423 int i; 2424 2425 for_each_online_cpu(i) { 2426 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 2427 2428 percpu_priv->np.down = 0; 2429 napi_enable(&percpu_priv->np.napi); 2430 } 2431 } 2432 2433 static void dpaa_eth_napi_disable(struct dpaa_priv *priv) 2434 { 2435 struct dpaa_percpu_priv *percpu_priv; 2436 int i; 2437 2438 for_each_online_cpu(i) { 2439 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 2440 2441 percpu_priv->np.down = 1; 2442 napi_disable(&percpu_priv->np.napi); 2443 } 2444 } 2445 2446 static void dpaa_adjust_link(struct net_device *net_dev) 2447 { 2448 struct mac_device *mac_dev; 2449 struct dpaa_priv *priv; 2450 2451 priv = netdev_priv(net_dev); 2452 mac_dev = priv->mac_dev; 2453 mac_dev->adjust_link(mac_dev); 2454 } 2455 2456 /* The Aquantia PHYs are capable of performing rate adaptation */ 2457 #define PHY_VEND_AQUANTIA 0x03a1b400 2458 2459 static int dpaa_phy_init(struct net_device *net_dev) 2460 { 2461 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 2462 struct mac_device *mac_dev; 2463 struct phy_device *phy_dev; 2464 struct dpaa_priv *priv; 2465 2466 priv = netdev_priv(net_dev); 2467 mac_dev = priv->mac_dev; 2468 2469 phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, 2470 &dpaa_adjust_link, 0, 2471 mac_dev->phy_if); 2472 if (!phy_dev) { 2473 netif_err(priv, ifup, net_dev, "init_phy() failed\n"); 2474 return -ENODEV; 2475 } 2476 2477 /* Unless the PHY is capable of rate adaptation */ 2478 if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII || 2479 ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) { 2480 /* remove any features not supported by the controller */ 2481 ethtool_convert_legacy_u32_to_link_mode(mask, 2482 mac_dev->if_support); 2483 linkmode_and(phy_dev->supported, phy_dev->supported, mask); 2484 } 2485 2486 phy_support_asym_pause(phy_dev); 2487 2488 mac_dev->phy_dev = phy_dev; 2489 net_dev->phydev = phy_dev; 2490 2491 return 0; 2492 } 2493 2494 static int dpaa_open(struct net_device *net_dev) 2495 { 2496 struct mac_device *mac_dev; 2497 struct dpaa_priv *priv; 2498 int err, i; 2499 2500 priv = netdev_priv(net_dev); 2501 mac_dev = priv->mac_dev; 2502 dpaa_eth_napi_enable(priv); 2503 2504 err = dpaa_phy_init(net_dev); 2505 if (err) 2506 goto phy_init_failed; 2507 2508 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { 2509 err = fman_port_enable(mac_dev->port[i]); 2510 if (err) 2511 goto mac_start_failed; 2512 } 2513 2514 err = priv->mac_dev->start(mac_dev); 2515 if (err < 0) { 2516 netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err); 2517 goto mac_start_failed; 2518 } 2519 2520 netif_tx_start_all_queues(net_dev); 2521 2522 return 0; 2523 2524 mac_start_failed: 2525 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) 2526 fman_port_disable(mac_dev->port[i]); 2527 2528 phy_init_failed: 2529 dpaa_eth_napi_disable(priv); 2530 2531 return err; 2532 } 2533 2534 static int dpaa_eth_stop(struct net_device *net_dev) 2535 { 2536 struct dpaa_priv *priv; 2537 int err; 2538 2539 err = dpaa_stop(net_dev); 2540 2541 priv = netdev_priv(net_dev); 2542 dpaa_eth_napi_disable(priv); 2543 2544 return err; 2545 } 2546 2547 static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2548 { 2549 struct dpaa_priv *priv = netdev_priv(dev); 2550 struct hwtstamp_config config; 2551 2552 if (copy_from_user(&config, rq->ifr_data, sizeof(config))) 2553 return -EFAULT; 2554 2555 switch (config.tx_type) { 2556 case HWTSTAMP_TX_OFF: 2557 /* Couldn't disable rx/tx timestamping separately. 2558 * Do nothing here. 2559 */ 2560 priv->tx_tstamp = false; 2561 break; 2562 case HWTSTAMP_TX_ON: 2563 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); 2564 priv->tx_tstamp = true; 2565 break; 2566 default: 2567 return -ERANGE; 2568 } 2569 2570 if (config.rx_filter == HWTSTAMP_FILTER_NONE) { 2571 /* Couldn't disable rx/tx timestamping separately. 2572 * Do nothing here. 2573 */ 2574 priv->rx_tstamp = false; 2575 } else { 2576 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); 2577 priv->rx_tstamp = true; 2578 /* TS is set for all frame types, not only those requested */ 2579 config.rx_filter = HWTSTAMP_FILTER_ALL; 2580 } 2581 2582 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? 2583 -EFAULT : 0; 2584 } 2585 2586 static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) 2587 { 2588 int ret = -EINVAL; 2589 2590 if (cmd == SIOCGMIIREG) { 2591 if (net_dev->phydev) 2592 return phy_mii_ioctl(net_dev->phydev, rq, cmd); 2593 } 2594 2595 if (cmd == SIOCSHWTSTAMP) 2596 return dpaa_ts_ioctl(net_dev, rq, cmd); 2597 2598 return ret; 2599 } 2600 2601 static const struct net_device_ops dpaa_ops = { 2602 .ndo_open = dpaa_open, 2603 .ndo_start_xmit = dpaa_start_xmit, 2604 .ndo_stop = dpaa_eth_stop, 2605 .ndo_tx_timeout = dpaa_tx_timeout, 2606 .ndo_get_stats64 = dpaa_get_stats64, 2607 .ndo_change_carrier = fixed_phy_change_carrier, 2608 .ndo_set_mac_address = dpaa_set_mac_address, 2609 .ndo_validate_addr = eth_validate_addr, 2610 .ndo_set_rx_mode = dpaa_set_rx_mode, 2611 .ndo_do_ioctl = dpaa_ioctl, 2612 .ndo_setup_tc = dpaa_setup_tc, 2613 }; 2614 2615 static int dpaa_napi_add(struct net_device *net_dev) 2616 { 2617 struct dpaa_priv *priv = netdev_priv(net_dev); 2618 struct dpaa_percpu_priv *percpu_priv; 2619 int cpu; 2620 2621 for_each_possible_cpu(cpu) { 2622 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); 2623 2624 netif_napi_add(net_dev, &percpu_priv->np.napi, 2625 dpaa_eth_poll, NAPI_POLL_WEIGHT); 2626 } 2627 2628 return 0; 2629 } 2630 2631 static void dpaa_napi_del(struct net_device *net_dev) 2632 { 2633 struct dpaa_priv *priv = netdev_priv(net_dev); 2634 struct dpaa_percpu_priv *percpu_priv; 2635 int cpu; 2636 2637 for_each_possible_cpu(cpu) { 2638 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); 2639 2640 netif_napi_del(&percpu_priv->np.napi); 2641 } 2642 } 2643 2644 static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp, 2645 struct bm_buffer *bmb) 2646 { 2647 dma_addr_t addr = bm_buf_addr(bmb); 2648 2649 dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE, 2650 DMA_FROM_DEVICE); 2651 2652 skb_free_frag(phys_to_virt(addr)); 2653 } 2654 2655 /* Alloc the dpaa_bp struct and configure default values */ 2656 static struct dpaa_bp *dpaa_bp_alloc(struct device *dev) 2657 { 2658 struct dpaa_bp *dpaa_bp; 2659 2660 dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL); 2661 if (!dpaa_bp) 2662 return ERR_PTR(-ENOMEM); 2663 2664 dpaa_bp->bpid = FSL_DPAA_BPID_INV; 2665 dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count); 2666 if (!dpaa_bp->percpu_count) 2667 return ERR_PTR(-ENOMEM); 2668 2669 dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT; 2670 2671 dpaa_bp->seed_cb = dpaa_bp_seed; 2672 dpaa_bp->free_buf_cb = dpaa_bp_free_pf; 2673 2674 return dpaa_bp; 2675 } 2676 2677 /* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR. 2678 * We won't be sending congestion notifications to FMan; for now, we just use 2679 * this CGR to generate enqueue rejections to FMan in order to drop the frames 2680 * before they reach our ingress queues and eat up memory. 2681 */ 2682 static int dpaa_ingress_cgr_init(struct dpaa_priv *priv) 2683 { 2684 struct qm_mcc_initcgr initcgr; 2685 u32 cs_th; 2686 int err; 2687 2688 err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid); 2689 if (err < 0) { 2690 if (netif_msg_drv(priv)) 2691 pr_err("Error %d allocating CGR ID\n", err); 2692 goto out_error; 2693 } 2694 2695 /* Enable CS TD, but disable Congestion State Change Notifications. */ 2696 memset(&initcgr, 0, sizeof(initcgr)); 2697 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES); 2698 initcgr.cgr.cscn_en = QM_CGR_EN; 2699 cs_th = DPAA_INGRESS_CS_THRESHOLD; 2700 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); 2701 2702 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); 2703 initcgr.cgr.cstd_en = QM_CGR_EN; 2704 2705 /* This CGR will be associated with the SWP affined to the current CPU. 2706 * However, we'll place all our ingress FQs in it. 2707 */ 2708 err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT, 2709 &initcgr); 2710 if (err < 0) { 2711 if (netif_msg_drv(priv)) 2712 pr_err("Error %d creating ingress CGR with ID %d\n", 2713 err, priv->ingress_cgr.cgrid); 2714 qman_release_cgrid(priv->ingress_cgr.cgrid); 2715 goto out_error; 2716 } 2717 if (netif_msg_drv(priv)) 2718 pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n", 2719 priv->ingress_cgr.cgrid, priv->mac_dev->addr); 2720 2721 priv->use_ingress_cgr = true; 2722 2723 out_error: 2724 return err; 2725 } 2726 2727 static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) 2728 { 2729 u16 headroom; 2730 2731 /* The frame headroom must accommodate: 2732 * - the driver private data area 2733 * - parse results, hash results, timestamp if selected 2734 * If either hash results or time stamp are selected, both will 2735 * be copied to/from the frame headroom, as TS is located between PR and 2736 * HR in the IC and IC copy size has a granularity of 16bytes 2737 * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM) 2738 * 2739 * Also make sure the headroom is a multiple of data_align bytes 2740 */ 2741 headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE + 2742 DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE); 2743 2744 return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom, 2745 DPAA_FD_DATA_ALIGNMENT) : 2746 headroom; 2747 } 2748 2749 static int dpaa_eth_probe(struct platform_device *pdev) 2750 { 2751 struct net_device *net_dev = NULL; 2752 struct dpaa_bp *dpaa_bp = NULL; 2753 struct dpaa_fq *dpaa_fq, *tmp; 2754 struct dpaa_priv *priv = NULL; 2755 struct fm_port_fqs port_fqs; 2756 struct mac_device *mac_dev; 2757 int err = 0, channel; 2758 struct device *dev; 2759 2760 dev = &pdev->dev; 2761 2762 err = bman_is_probed(); 2763 if (!err) 2764 return -EPROBE_DEFER; 2765 if (err < 0) { 2766 dev_err(dev, "failing probe due to bman probe error\n"); 2767 return -ENODEV; 2768 } 2769 err = qman_is_probed(); 2770 if (!err) 2771 return -EPROBE_DEFER; 2772 if (err < 0) { 2773 dev_err(dev, "failing probe due to qman probe error\n"); 2774 return -ENODEV; 2775 } 2776 err = bman_portals_probed(); 2777 if (!err) 2778 return -EPROBE_DEFER; 2779 if (err < 0) { 2780 dev_err(dev, 2781 "failing probe due to bman portals probe error\n"); 2782 return -ENODEV; 2783 } 2784 err = qman_portals_probed(); 2785 if (!err) 2786 return -EPROBE_DEFER; 2787 if (err < 0) { 2788 dev_err(dev, 2789 "failing probe due to qman portals probe error\n"); 2790 return -ENODEV; 2791 } 2792 2793 /* Allocate this early, so we can store relevant information in 2794 * the private area 2795 */ 2796 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); 2797 if (!net_dev) { 2798 dev_err(dev, "alloc_etherdev_mq() failed\n"); 2799 return -ENOMEM; 2800 } 2801 2802 /* Do this here, so we can be verbose early */ 2803 SET_NETDEV_DEV(net_dev, dev); 2804 dev_set_drvdata(dev, net_dev); 2805 2806 priv = netdev_priv(net_dev); 2807 priv->net_dev = net_dev; 2808 2809 priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); 2810 2811 mac_dev = dpaa_mac_dev_get(pdev); 2812 if (IS_ERR(mac_dev)) { 2813 netdev_err(net_dev, "dpaa_mac_dev_get() failed\n"); 2814 err = PTR_ERR(mac_dev); 2815 goto free_netdev; 2816 } 2817 2818 /* Devices used for DMA mapping */ 2819 priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]); 2820 priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]); 2821 err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40)); 2822 if (!err) 2823 err = dma_coerce_mask_and_coherent(priv->tx_dma_dev, 2824 DMA_BIT_MASK(40)); 2825 if (err) { 2826 netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n"); 2827 return err; 2828 } 2829 2830 /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, 2831 * we choose conservatively and let the user explicitly set a higher 2832 * MTU via ifconfig. Otherwise, the user may end up with different MTUs 2833 * in the same LAN. 2834 * If on the other hand fsl_fm_max_frm has been chosen below 1500, 2835 * start with the maximum allowed. 2836 */ 2837 net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN); 2838 2839 netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n", 2840 net_dev->mtu); 2841 2842 priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */ 2843 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ 2844 2845 /* bp init */ 2846 dpaa_bp = dpaa_bp_alloc(dev); 2847 if (IS_ERR(dpaa_bp)) { 2848 err = PTR_ERR(dpaa_bp); 2849 goto free_dpaa_bps; 2850 } 2851 /* the raw size of the buffers used for reception */ 2852 dpaa_bp->raw_size = DPAA_BP_RAW_SIZE; 2853 /* avoid runtime computations by keeping the usable size here */ 2854 dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size); 2855 dpaa_bp->priv = priv; 2856 2857 err = dpaa_bp_alloc_pool(dpaa_bp); 2858 if (err < 0) 2859 goto free_dpaa_bps; 2860 priv->dpaa_bp = dpaa_bp; 2861 2862 INIT_LIST_HEAD(&priv->dpaa_fq_list); 2863 2864 memset(&port_fqs, 0, sizeof(port_fqs)); 2865 2866 err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs); 2867 if (err < 0) { 2868 dev_err(dev, "dpaa_alloc_all_fqs() failed\n"); 2869 goto free_dpaa_bps; 2870 } 2871 2872 priv->mac_dev = mac_dev; 2873 2874 channel = dpaa_get_channel(); 2875 if (channel < 0) { 2876 dev_err(dev, "dpaa_get_channel() failed\n"); 2877 err = channel; 2878 goto free_dpaa_bps; 2879 } 2880 2881 priv->channel = (u16)channel; 2882 2883 /* Walk the CPUs with affine portals 2884 * and add this pool channel to each's dequeue mask. 2885 */ 2886 dpaa_eth_add_channel(priv->channel, &pdev->dev); 2887 2888 dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); 2889 2890 /* Create a congestion group for this netdev, with 2891 * dynamically-allocated CGR ID. 2892 * Must be executed after probing the MAC, but before 2893 * assigning the egress FQs to the CGRs. 2894 */ 2895 err = dpaa_eth_cgr_init(priv); 2896 if (err < 0) { 2897 dev_err(dev, "Error initializing CGR\n"); 2898 goto free_dpaa_bps; 2899 } 2900 2901 err = dpaa_ingress_cgr_init(priv); 2902 if (err < 0) { 2903 dev_err(dev, "Error initializing ingress CGR\n"); 2904 goto delete_egress_cgr; 2905 } 2906 2907 /* Add the FQs to the interface, and make them active */ 2908 list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) { 2909 err = dpaa_fq_init(dpaa_fq, false); 2910 if (err < 0) 2911 goto free_dpaa_fqs; 2912 } 2913 2914 priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]); 2915 priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]); 2916 2917 /* All real interfaces need their ports initialized */ 2918 err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs, 2919 &priv->buf_layout[0], dev); 2920 if (err) 2921 goto free_dpaa_fqs; 2922 2923 /* Rx traffic distribution based on keygen hashing defaults to on */ 2924 priv->keygen_in_use = true; 2925 2926 priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); 2927 if (!priv->percpu_priv) { 2928 dev_err(dev, "devm_alloc_percpu() failed\n"); 2929 err = -ENOMEM; 2930 goto free_dpaa_fqs; 2931 } 2932 2933 priv->num_tc = 1; 2934 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); 2935 2936 /* Initialize NAPI */ 2937 err = dpaa_napi_add(net_dev); 2938 if (err < 0) 2939 goto delete_dpaa_napi; 2940 2941 err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout); 2942 if (err < 0) 2943 goto delete_dpaa_napi; 2944 2945 dpaa_eth_sysfs_init(&net_dev->dev); 2946 2947 netif_info(priv, probe, net_dev, "Probed interface %s\n", 2948 net_dev->name); 2949 2950 return 0; 2951 2952 delete_dpaa_napi: 2953 dpaa_napi_del(net_dev); 2954 free_dpaa_fqs: 2955 dpaa_fq_free(dev, &priv->dpaa_fq_list); 2956 qman_delete_cgr_safe(&priv->ingress_cgr); 2957 qman_release_cgrid(priv->ingress_cgr.cgrid); 2958 delete_egress_cgr: 2959 qman_delete_cgr_safe(&priv->cgr_data.cgr); 2960 qman_release_cgrid(priv->cgr_data.cgr.cgrid); 2961 free_dpaa_bps: 2962 dpaa_bps_free(priv); 2963 free_netdev: 2964 dev_set_drvdata(dev, NULL); 2965 free_netdev(net_dev); 2966 2967 return err; 2968 } 2969 2970 static int dpaa_remove(struct platform_device *pdev) 2971 { 2972 struct net_device *net_dev; 2973 struct dpaa_priv *priv; 2974 struct device *dev; 2975 int err; 2976 2977 dev = &pdev->dev; 2978 net_dev = dev_get_drvdata(dev); 2979 2980 priv = netdev_priv(net_dev); 2981 2982 dpaa_eth_sysfs_remove(dev); 2983 2984 dev_set_drvdata(dev, NULL); 2985 unregister_netdev(net_dev); 2986 2987 err = dpaa_fq_free(dev, &priv->dpaa_fq_list); 2988 2989 qman_delete_cgr_safe(&priv->ingress_cgr); 2990 qman_release_cgrid(priv->ingress_cgr.cgrid); 2991 qman_delete_cgr_safe(&priv->cgr_data.cgr); 2992 qman_release_cgrid(priv->cgr_data.cgr.cgrid); 2993 2994 dpaa_napi_del(net_dev); 2995 2996 dpaa_bps_free(priv); 2997 2998 free_netdev(net_dev); 2999 3000 return err; 3001 } 3002 3003 static const struct platform_device_id dpaa_devtype[] = { 3004 { 3005 .name = "dpaa-ethernet", 3006 .driver_data = 0, 3007 }, { 3008 } 3009 }; 3010 MODULE_DEVICE_TABLE(platform, dpaa_devtype); 3011 3012 static struct platform_driver dpaa_driver = { 3013 .driver = { 3014 .name = KBUILD_MODNAME, 3015 }, 3016 .id_table = dpaa_devtype, 3017 .probe = dpaa_eth_probe, 3018 .remove = dpaa_remove 3019 }; 3020 3021 static int __init dpaa_load(void) 3022 { 3023 int err; 3024 3025 pr_debug("FSL DPAA Ethernet driver\n"); 3026 3027 /* initialize dpaa_eth mirror values */ 3028 dpaa_rx_extra_headroom = fman_get_rx_extra_headroom(); 3029 dpaa_max_frm = fman_get_max_frm(); 3030 3031 err = platform_driver_register(&dpaa_driver); 3032 if (err < 0) 3033 pr_err("Error, platform_driver_register() = %d\n", err); 3034 3035 return err; 3036 } 3037 module_init(dpaa_load); 3038 3039 static void __exit dpaa_unload(void) 3040 { 3041 platform_driver_unregister(&dpaa_driver); 3042 3043 /* Only one channel is used and needs to be released after all 3044 * interfaces are removed 3045 */ 3046 dpaa_release_channel(); 3047 } 3048 module_exit(dpaa_unload); 3049 3050 MODULE_LICENSE("Dual BSD/GPL"); 3051 MODULE_DESCRIPTION("FSL DPAA Ethernet driver"); 3052