1 /* Copyright 2008 - 2016 Freescale Semiconductor Inc. 2 * 3 * Redistribution and use in source and binary forms, with or without 4 * modification, are permitted provided that the following conditions are met: 5 * * Redistributions of source code must retain the above copyright 6 * notice, this list of conditions and the following disclaimer. 7 * * Redistributions in binary form must reproduce the above copyright 8 * notice, this list of conditions and the following disclaimer in the 9 * documentation and/or other materials provided with the distribution. 10 * * Neither the name of Freescale Semiconductor nor the 11 * names of its contributors may be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * GNU General Public License ("GPL") as published by the Free Software 16 * Foundation, either version 2 of that License or (at your option) any 17 * later version. 18 * 19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32 33 #include <linux/init.h> 34 #include <linux/module.h> 35 #include <linux/of_platform.h> 36 #include <linux/of_mdio.h> 37 #include <linux/of_net.h> 38 #include <linux/io.h> 39 #include <linux/if_arp.h> 40 #include <linux/if_vlan.h> 41 #include <linux/icmp.h> 42 #include <linux/ip.h> 43 #include <linux/ipv6.h> 44 #include <linux/udp.h> 45 #include <linux/tcp.h> 46 #include <linux/net.h> 47 #include <linux/skbuff.h> 48 #include <linux/etherdevice.h> 49 #include <linux/if_ether.h> 50 #include <linux/highmem.h> 51 #include <linux/percpu.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/sort.h> 54 #include <soc/fsl/bman.h> 55 #include <soc/fsl/qman.h> 56 57 #include "fman.h" 58 #include "fman_port.h" 59 #include "mac.h" 60 #include "dpaa_eth.h" 61 62 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files 63 * using trace events only need to #include <trace/events/sched.h> 64 */ 65 #define CREATE_TRACE_POINTS 66 #include "dpaa_eth_trace.h" 67 68 static int debug = -1; 69 module_param(debug, int, 0444); 70 MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)"); 71 72 static u16 tx_timeout = 1000; 73 module_param(tx_timeout, ushort, 0444); 74 MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); 75 76 #define FM_FD_STAT_RX_ERRORS \ 77 (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \ 78 FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \ 79 FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \ 80 FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \ 81 FM_FD_ERR_PRS_HDR_ERR) 82 83 #define FM_FD_STAT_TX_ERRORS \ 84 (FM_FD_ERR_UNSUPPORTED_FORMAT | \ 85 FM_FD_ERR_LENGTH | FM_FD_ERR_DMA) 86 87 #define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ 88 NETIF_MSG_LINK | NETIF_MSG_IFUP | \ 89 NETIF_MSG_IFDOWN) 90 91 #define DPAA_INGRESS_CS_THRESHOLD 0x10000000 92 /* Ingress congestion threshold on FMan ports 93 * The size in bytes of the ingress tail-drop threshold on FMan ports. 94 * Traffic piling up above this value will be rejected by QMan and discarded 95 * by FMan. 96 */ 97 98 /* Size in bytes of the FQ taildrop threshold */ 99 #define DPAA_FQ_TD 0x200000 100 101 #define DPAA_CS_THRESHOLD_1G 0x06000000 102 /* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000 103 * The size in bytes of the egress Congestion State notification threshold on 104 * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a 105 * tight loop (e.g. by sending UDP datagrams at "while(1) speed"), 106 * and the larger the frame size, the more acute the problem. 107 * So we have to find a balance between these factors: 108 * - avoiding the device staying congested for a prolonged time (risking 109 * the netdev watchdog to fire - see also the tx_timeout module param); 110 * - affecting performance of protocols such as TCP, which otherwise 111 * behave well under the congestion notification mechanism; 112 * - preventing the Tx cores from tightly-looping (as if the congestion 113 * threshold was too low to be effective); 114 * - running out of memory if the CS threshold is set too high. 115 */ 116 117 #define DPAA_CS_THRESHOLD_10G 0x10000000 118 /* The size in bytes of the egress Congestion State notification threshold on 119 * 10G ports, range 0x1000 .. 0x10000000 120 */ 121 122 /* Largest value that the FQD's OAL field can hold */ 123 #define FSL_QMAN_MAX_OAL 127 124 125 /* Default alignment for start of data in an Rx FD */ 126 #define DPAA_FD_DATA_ALIGNMENT 16 127 128 /* Values for the L3R field of the FM Parse Results 129 */ 130 /* L3 Type field: First IP Present IPv4 */ 131 #define FM_L3_PARSE_RESULT_IPV4 0x8000 132 /* L3 Type field: First IP Present IPv6 */ 133 #define FM_L3_PARSE_RESULT_IPV6 0x4000 134 /* Values for the L4R field of the FM Parse Results */ 135 /* L4 Type field: UDP */ 136 #define FM_L4_PARSE_RESULT_UDP 0x40 137 /* L4 Type field: TCP */ 138 #define FM_L4_PARSE_RESULT_TCP 0x20 139 140 /* FD status field indicating whether the FM Parser has attempted to validate 141 * the L4 csum of the frame. 142 * Note that having this bit set doesn't necessarily imply that the checksum 143 * is valid. One would have to check the parse results to find that out. 144 */ 145 #define FM_FD_STAT_L4CV 0x00000004 146 147 #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ 148 #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */ 149 150 #define FSL_DPAA_BPID_INV 0xff 151 #define FSL_DPAA_ETH_MAX_BUF_COUNT 128 152 #define FSL_DPAA_ETH_REFILL_THRESHOLD 80 153 154 #define DPAA_TX_PRIV_DATA_SIZE 16 155 #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result) 156 #define DPAA_TIME_STAMP_SIZE 8 157 #define DPAA_HASH_RESULTS_SIZE 8 158 #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \ 159 dpaa_rx_extra_headroom) 160 161 #define DPAA_ETH_RX_QUEUES 128 162 163 #define DPAA_ENQUEUE_RETRIES 100000 164 165 enum port_type {RX, TX}; 166 167 struct fm_port_fqs { 168 struct dpaa_fq *tx_defq; 169 struct dpaa_fq *tx_errq; 170 struct dpaa_fq *rx_defq; 171 struct dpaa_fq *rx_errq; 172 }; 173 174 /* All the dpa bps in use at any moment */ 175 static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS]; 176 177 /* The raw buffer size must be cacheline aligned */ 178 #define DPAA_BP_RAW_SIZE 4096 179 /* When using more than one buffer pool, the raw sizes are as follows: 180 * 1 bp: 4KB 181 * 2 bp: 2KB, 4KB 182 * 3 bp: 1KB, 2KB, 4KB 183 * 4 bp: 1KB, 2KB, 4KB, 8KB 184 */ 185 static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt) 186 { 187 size_t res = DPAA_BP_RAW_SIZE / 4; 188 u8 i; 189 190 for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++) 191 res *= 2; 192 return res; 193 } 194 195 /* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is 196 * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that, 197 * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us 198 * half-page-aligned buffers, so we reserve some more space for start-of-buffer 199 * alignment. 200 */ 201 #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES) 202 203 static int dpaa_max_frm; 204 205 static int dpaa_rx_extra_headroom; 206 207 #define dpaa_get_max_mtu() \ 208 (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN)) 209 210 static int dpaa_netdev_init(struct net_device *net_dev, 211 const struct net_device_ops *dpaa_ops, 212 u16 tx_timeout) 213 { 214 struct dpaa_priv *priv = netdev_priv(net_dev); 215 struct device *dev = net_dev->dev.parent; 216 struct dpaa_percpu_priv *percpu_priv; 217 const u8 *mac_addr; 218 int i, err; 219 220 /* Although we access another CPU's private data here 221 * we do it at initialization so it is safe 222 */ 223 for_each_possible_cpu(i) { 224 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 225 percpu_priv->net_dev = net_dev; 226 } 227 228 net_dev->netdev_ops = dpaa_ops; 229 mac_addr = priv->mac_dev->addr; 230 231 net_dev->mem_start = priv->mac_dev->res->start; 232 net_dev->mem_end = priv->mac_dev->res->end; 233 234 net_dev->min_mtu = ETH_MIN_MTU; 235 net_dev->max_mtu = dpaa_get_max_mtu(); 236 237 net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 238 NETIF_F_LLTX); 239 240 net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; 241 /* The kernels enables GSO automatically, if we declare NETIF_F_SG. 242 * For conformity, we'll still declare GSO explicitly. 243 */ 244 net_dev->features |= NETIF_F_GSO; 245 net_dev->features |= NETIF_F_RXCSUM; 246 247 net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 248 /* we do not want shared skbs on TX */ 249 net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; 250 251 net_dev->features |= net_dev->hw_features; 252 net_dev->vlan_features = net_dev->features; 253 254 memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); 255 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); 256 257 net_dev->ethtool_ops = &dpaa_ethtool_ops; 258 259 net_dev->needed_headroom = priv->tx_headroom; 260 net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); 261 262 /* start without the RUNNING flag, phylib controls it later */ 263 netif_carrier_off(net_dev); 264 265 err = register_netdev(net_dev); 266 if (err < 0) { 267 dev_err(dev, "register_netdev() = %d\n", err); 268 return err; 269 } 270 271 return 0; 272 } 273 274 static int dpaa_stop(struct net_device *net_dev) 275 { 276 struct mac_device *mac_dev; 277 struct dpaa_priv *priv; 278 int i, err, error; 279 280 priv = netdev_priv(net_dev); 281 mac_dev = priv->mac_dev; 282 283 netif_tx_stop_all_queues(net_dev); 284 /* Allow the Fman (Tx) port to process in-flight frames before we 285 * try switching it off. 286 */ 287 usleep_range(5000, 10000); 288 289 err = mac_dev->stop(mac_dev); 290 if (err < 0) 291 netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n", 292 err); 293 294 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { 295 error = fman_port_disable(mac_dev->port[i]); 296 if (error) 297 err = error; 298 } 299 300 if (net_dev->phydev) 301 phy_disconnect(net_dev->phydev); 302 net_dev->phydev = NULL; 303 304 return err; 305 } 306 307 static void dpaa_tx_timeout(struct net_device *net_dev) 308 { 309 struct dpaa_percpu_priv *percpu_priv; 310 const struct dpaa_priv *priv; 311 312 priv = netdev_priv(net_dev); 313 percpu_priv = this_cpu_ptr(priv->percpu_priv); 314 315 netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n", 316 jiffies_to_msecs(jiffies - dev_trans_start(net_dev))); 317 318 percpu_priv->stats.tx_errors++; 319 } 320 321 /* Calculates the statistics for the given device by adding the statistics 322 * collected by each CPU. 323 */ 324 static void dpaa_get_stats64(struct net_device *net_dev, 325 struct rtnl_link_stats64 *s) 326 { 327 int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); 328 struct dpaa_priv *priv = netdev_priv(net_dev); 329 struct dpaa_percpu_priv *percpu_priv; 330 u64 *netstats = (u64 *)s; 331 u64 *cpustats; 332 int i, j; 333 334 for_each_possible_cpu(i) { 335 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 336 337 cpustats = (u64 *)&percpu_priv->stats; 338 339 /* add stats from all CPUs */ 340 for (j = 0; j < numstats; j++) 341 netstats[j] += cpustats[j]; 342 } 343 } 344 345 static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, 346 u32 chain_index, __be16 proto, struct tc_to_netdev *tc) 347 { 348 struct dpaa_priv *priv = netdev_priv(net_dev); 349 u8 num_tc; 350 int i; 351 352 if (tc->type != TC_SETUP_MQPRIO) 353 return -EINVAL; 354 355 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 356 num_tc = tc->mqprio->num_tc; 357 358 if (num_tc == priv->num_tc) 359 return 0; 360 361 if (!num_tc) { 362 netdev_reset_tc(net_dev); 363 goto out; 364 } 365 366 if (num_tc > DPAA_TC_NUM) { 367 netdev_err(net_dev, "Too many traffic classes: max %d supported.\n", 368 DPAA_TC_NUM); 369 return -EINVAL; 370 } 371 372 netdev_set_num_tc(net_dev, num_tc); 373 374 for (i = 0; i < num_tc; i++) 375 netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM, 376 i * DPAA_TC_TXQ_NUM); 377 378 out: 379 priv->num_tc = num_tc ? : 1; 380 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); 381 return 0; 382 } 383 384 static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) 385 { 386 struct platform_device *of_dev; 387 struct dpaa_eth_data *eth_data; 388 struct device *dpaa_dev, *dev; 389 struct device_node *mac_node; 390 struct mac_device *mac_dev; 391 392 dpaa_dev = &pdev->dev; 393 eth_data = dpaa_dev->platform_data; 394 if (!eth_data) 395 return ERR_PTR(-ENODEV); 396 397 mac_node = eth_data->mac_node; 398 399 of_dev = of_find_device_by_node(mac_node); 400 if (!of_dev) { 401 dev_err(dpaa_dev, "of_find_device_by_node(%s) failed\n", 402 mac_node->full_name); 403 of_node_put(mac_node); 404 return ERR_PTR(-EINVAL); 405 } 406 of_node_put(mac_node); 407 408 dev = &of_dev->dev; 409 410 mac_dev = dev_get_drvdata(dev); 411 if (!mac_dev) { 412 dev_err(dpaa_dev, "dev_get_drvdata(%s) failed\n", 413 dev_name(dev)); 414 return ERR_PTR(-EINVAL); 415 } 416 417 return mac_dev; 418 } 419 420 static int dpaa_set_mac_address(struct net_device *net_dev, void *addr) 421 { 422 const struct dpaa_priv *priv; 423 struct mac_device *mac_dev; 424 struct sockaddr old_addr; 425 int err; 426 427 priv = netdev_priv(net_dev); 428 429 memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN); 430 431 err = eth_mac_addr(net_dev, addr); 432 if (err < 0) { 433 netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err); 434 return err; 435 } 436 437 mac_dev = priv->mac_dev; 438 439 err = mac_dev->change_addr(mac_dev->fman_mac, 440 (enet_addr_t *)net_dev->dev_addr); 441 if (err < 0) { 442 netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", 443 err); 444 /* reverting to previous address */ 445 eth_mac_addr(net_dev, &old_addr); 446 447 return err; 448 } 449 450 return 0; 451 } 452 453 static void dpaa_set_rx_mode(struct net_device *net_dev) 454 { 455 const struct dpaa_priv *priv; 456 int err; 457 458 priv = netdev_priv(net_dev); 459 460 if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { 461 priv->mac_dev->promisc = !priv->mac_dev->promisc; 462 err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac, 463 priv->mac_dev->promisc); 464 if (err < 0) 465 netif_err(priv, drv, net_dev, 466 "mac_dev->set_promisc() = %d\n", 467 err); 468 } 469 470 err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); 471 if (err < 0) 472 netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", 473 err); 474 } 475 476 static struct dpaa_bp *dpaa_bpid2pool(int bpid) 477 { 478 if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS)) 479 return NULL; 480 481 return dpaa_bp_array[bpid]; 482 } 483 484 /* checks if this bpool is already allocated */ 485 static bool dpaa_bpid2pool_use(int bpid) 486 { 487 if (dpaa_bpid2pool(bpid)) { 488 atomic_inc(&dpaa_bp_array[bpid]->refs); 489 return true; 490 } 491 492 return false; 493 } 494 495 /* called only once per bpid by dpaa_bp_alloc_pool() */ 496 static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp) 497 { 498 dpaa_bp_array[bpid] = dpaa_bp; 499 atomic_set(&dpaa_bp->refs, 1); 500 } 501 502 static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp) 503 { 504 int err; 505 506 if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) { 507 pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n", 508 __func__); 509 return -EINVAL; 510 } 511 512 /* If the pool is already specified, we only create one per bpid */ 513 if (dpaa_bp->bpid != FSL_DPAA_BPID_INV && 514 dpaa_bpid2pool_use(dpaa_bp->bpid)) 515 return 0; 516 517 if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) { 518 dpaa_bp->pool = bman_new_pool(); 519 if (!dpaa_bp->pool) { 520 pr_err("%s: bman_new_pool() failed\n", 521 __func__); 522 return -ENODEV; 523 } 524 525 dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool); 526 } 527 528 if (dpaa_bp->seed_cb) { 529 err = dpaa_bp->seed_cb(dpaa_bp); 530 if (err) 531 goto pool_seed_failed; 532 } 533 534 dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp); 535 536 return 0; 537 538 pool_seed_failed: 539 pr_err("%s: pool seeding failed\n", __func__); 540 bman_free_pool(dpaa_bp->pool); 541 542 return err; 543 } 544 545 /* remove and free all the buffers from the given buffer pool */ 546 static void dpaa_bp_drain(struct dpaa_bp *bp) 547 { 548 u8 num = 8; 549 int ret; 550 551 do { 552 struct bm_buffer bmb[8]; 553 int i; 554 555 ret = bman_acquire(bp->pool, bmb, num); 556 if (ret < 0) { 557 if (num == 8) { 558 /* we have less than 8 buffers left; 559 * drain them one by one 560 */ 561 num = 1; 562 ret = 1; 563 continue; 564 } else { 565 /* Pool is fully drained */ 566 break; 567 } 568 } 569 570 if (bp->free_buf_cb) 571 for (i = 0; i < num; i++) 572 bp->free_buf_cb(bp, &bmb[i]); 573 } while (ret > 0); 574 } 575 576 static void dpaa_bp_free(struct dpaa_bp *dpaa_bp) 577 { 578 struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid); 579 580 /* the mapping between bpid and dpaa_bp is done very late in the 581 * allocation procedure; if something failed before the mapping, the bp 582 * was not configured, therefore we don't need the below instructions 583 */ 584 if (!bp) 585 return; 586 587 if (!atomic_dec_and_test(&bp->refs)) 588 return; 589 590 if (bp->free_buf_cb) 591 dpaa_bp_drain(bp); 592 593 dpaa_bp_array[bp->bpid] = NULL; 594 bman_free_pool(bp->pool); 595 } 596 597 static void dpaa_bps_free(struct dpaa_priv *priv) 598 { 599 int i; 600 601 for (i = 0; i < DPAA_BPS_NUM; i++) 602 dpaa_bp_free(priv->dpaa_bps[i]); 603 } 604 605 /* Use multiple WQs for FQ assignment: 606 * - Tx Confirmation queues go to WQ1. 607 * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance 608 * to be scheduled, in case there are many more FQs in WQ6). 609 * - Rx Default goes to WQ6. 610 * - Tx queues go to different WQs depending on their priority. Equal 611 * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and 612 * WQ0 (highest priority). 613 * This ensures that Tx-confirmed buffers are timely released. In particular, 614 * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they 615 * are greatly outnumbered by other FQs in the system, while 616 * dequeue scheduling is round-robin. 617 */ 618 static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) 619 { 620 switch (fq->fq_type) { 621 case FQ_TYPE_TX_CONFIRM: 622 case FQ_TYPE_TX_CONF_MQ: 623 fq->wq = 1; 624 break; 625 case FQ_TYPE_RX_ERROR: 626 case FQ_TYPE_TX_ERROR: 627 fq->wq = 5; 628 break; 629 case FQ_TYPE_RX_DEFAULT: 630 fq->wq = 6; 631 break; 632 case FQ_TYPE_TX: 633 switch (idx / DPAA_TC_TXQ_NUM) { 634 case 0: 635 /* Low priority (best effort) */ 636 fq->wq = 6; 637 break; 638 case 1: 639 /* Medium priority */ 640 fq->wq = 2; 641 break; 642 case 2: 643 /* High priority */ 644 fq->wq = 1; 645 break; 646 case 3: 647 /* Very high priority */ 648 fq->wq = 0; 649 break; 650 default: 651 WARN(1, "Too many TX FQs: more than %d!\n", 652 DPAA_ETH_TXQ_NUM); 653 } 654 break; 655 default: 656 WARN(1, "Invalid FQ type %d for FQID %d!\n", 657 fq->fq_type, fq->fqid); 658 } 659 } 660 661 static struct dpaa_fq *dpaa_fq_alloc(struct device *dev, 662 u32 start, u32 count, 663 struct list_head *list, 664 enum dpaa_fq_type fq_type) 665 { 666 struct dpaa_fq *dpaa_fq; 667 int i; 668 669 dpaa_fq = devm_kzalloc(dev, sizeof(*dpaa_fq) * count, 670 GFP_KERNEL); 671 if (!dpaa_fq) 672 return NULL; 673 674 for (i = 0; i < count; i++) { 675 dpaa_fq[i].fq_type = fq_type; 676 dpaa_fq[i].fqid = start ? start + i : 0; 677 list_add_tail(&dpaa_fq[i].list, list); 678 } 679 680 for (i = 0; i < count; i++) 681 dpaa_assign_wq(dpaa_fq + i, i); 682 683 return dpaa_fq; 684 } 685 686 static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, 687 struct fm_port_fqs *port_fqs) 688 { 689 struct dpaa_fq *dpaa_fq; 690 691 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR); 692 if (!dpaa_fq) 693 goto fq_alloc_failed; 694 695 port_fqs->rx_errq = &dpaa_fq[0]; 696 697 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT); 698 if (!dpaa_fq) 699 goto fq_alloc_failed; 700 701 port_fqs->rx_defq = &dpaa_fq[0]; 702 703 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ)) 704 goto fq_alloc_failed; 705 706 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR); 707 if (!dpaa_fq) 708 goto fq_alloc_failed; 709 710 port_fqs->tx_errq = &dpaa_fq[0]; 711 712 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM); 713 if (!dpaa_fq) 714 goto fq_alloc_failed; 715 716 port_fqs->tx_defq = &dpaa_fq[0]; 717 718 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX)) 719 goto fq_alloc_failed; 720 721 return 0; 722 723 fq_alloc_failed: 724 dev_err(dev, "dpaa_fq_alloc() failed\n"); 725 return -ENOMEM; 726 } 727 728 static u32 rx_pool_channel; 729 static DEFINE_SPINLOCK(rx_pool_channel_init); 730 731 static int dpaa_get_channel(void) 732 { 733 spin_lock(&rx_pool_channel_init); 734 if (!rx_pool_channel) { 735 u32 pool; 736 int ret; 737 738 ret = qman_alloc_pool(&pool); 739 740 if (!ret) 741 rx_pool_channel = pool; 742 } 743 spin_unlock(&rx_pool_channel_init); 744 if (!rx_pool_channel) 745 return -ENOMEM; 746 return rx_pool_channel; 747 } 748 749 static void dpaa_release_channel(void) 750 { 751 qman_release_pool(rx_pool_channel); 752 } 753 754 static void dpaa_eth_add_channel(u16 channel) 755 { 756 u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel); 757 const cpumask_t *cpus = qman_affine_cpus(); 758 struct qman_portal *portal; 759 int cpu; 760 761 for_each_cpu(cpu, cpus) { 762 portal = qman_get_affine_portal(cpu); 763 qman_p_static_dequeue_add(portal, pool); 764 } 765 } 766 767 /* Congestion group state change notification callback. 768 * Stops the device's egress queues while they are congested and 769 * wakes them upon exiting congested state. 770 * Also updates some CGR-related stats. 771 */ 772 static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr, 773 int congested) 774 { 775 struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr, 776 struct dpaa_priv, cgr_data.cgr); 777 778 if (congested) { 779 priv->cgr_data.congestion_start_jiffies = jiffies; 780 netif_tx_stop_all_queues(priv->net_dev); 781 priv->cgr_data.cgr_congested_count++; 782 } else { 783 priv->cgr_data.congested_jiffies += 784 (jiffies - priv->cgr_data.congestion_start_jiffies); 785 netif_tx_wake_all_queues(priv->net_dev); 786 } 787 } 788 789 static int dpaa_eth_cgr_init(struct dpaa_priv *priv) 790 { 791 struct qm_mcc_initcgr initcgr; 792 u32 cs_th; 793 int err; 794 795 err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid); 796 if (err < 0) { 797 if (netif_msg_drv(priv)) 798 pr_err("%s: Error %d allocating CGR ID\n", 799 __func__, err); 800 goto out_error; 801 } 802 priv->cgr_data.cgr.cb = dpaa_eth_cgscn; 803 804 /* Enable Congestion State Change Notifications and CS taildrop */ 805 memset(&initcgr, 0, sizeof(initcgr)); 806 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES); 807 initcgr.cgr.cscn_en = QM_CGR_EN; 808 809 /* Set different thresholds based on the MAC speed. 810 * This may turn suboptimal if the MAC is reconfigured at a speed 811 * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link. 812 * In such cases, we ought to reconfigure the threshold, too. 813 */ 814 if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) 815 cs_th = DPAA_CS_THRESHOLD_10G; 816 else 817 cs_th = DPAA_CS_THRESHOLD_1G; 818 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); 819 820 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); 821 initcgr.cgr.cstd_en = QM_CGR_EN; 822 823 err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, 824 &initcgr); 825 if (err < 0) { 826 if (netif_msg_drv(priv)) 827 pr_err("%s: Error %d creating CGR with ID %d\n", 828 __func__, err, priv->cgr_data.cgr.cgrid); 829 qman_release_cgrid(priv->cgr_data.cgr.cgrid); 830 goto out_error; 831 } 832 if (netif_msg_drv(priv)) 833 pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n", 834 priv->cgr_data.cgr.cgrid, priv->mac_dev->addr, 835 priv->cgr_data.cgr.chan); 836 837 out_error: 838 return err; 839 } 840 841 static inline void dpaa_setup_ingress(const struct dpaa_priv *priv, 842 struct dpaa_fq *fq, 843 const struct qman_fq *template) 844 { 845 fq->fq_base = *template; 846 fq->net_dev = priv->net_dev; 847 848 fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; 849 fq->channel = priv->channel; 850 } 851 852 static inline void dpaa_setup_egress(const struct dpaa_priv *priv, 853 struct dpaa_fq *fq, 854 struct fman_port *port, 855 const struct qman_fq *template) 856 { 857 fq->fq_base = *template; 858 fq->net_dev = priv->net_dev; 859 860 if (port) { 861 fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; 862 fq->channel = (u16)fman_port_get_qman_channel_id(port); 863 } else { 864 fq->flags = QMAN_FQ_FLAG_NO_MODIFY; 865 } 866 } 867 868 static void dpaa_fq_setup(struct dpaa_priv *priv, 869 const struct dpaa_fq_cbs *fq_cbs, 870 struct fman_port *tx_port) 871 { 872 int egress_cnt = 0, conf_cnt = 0, num_portals = 0, cpu; 873 const cpumask_t *affine_cpus = qman_affine_cpus(); 874 u16 portals[NR_CPUS]; 875 struct dpaa_fq *fq; 876 877 for_each_cpu(cpu, affine_cpus) 878 portals[num_portals++] = qman_affine_channel(cpu); 879 if (num_portals == 0) 880 dev_err(priv->net_dev->dev.parent, 881 "No Qman software (affine) channels found"); 882 883 /* Initialize each FQ in the list */ 884 list_for_each_entry(fq, &priv->dpaa_fq_list, list) { 885 switch (fq->fq_type) { 886 case FQ_TYPE_RX_DEFAULT: 887 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); 888 break; 889 case FQ_TYPE_RX_ERROR: 890 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq); 891 break; 892 case FQ_TYPE_TX: 893 dpaa_setup_egress(priv, fq, tx_port, 894 &fq_cbs->egress_ern); 895 /* If we have more Tx queues than the number of cores, 896 * just ignore the extra ones. 897 */ 898 if (egress_cnt < DPAA_ETH_TXQ_NUM) 899 priv->egress_fqs[egress_cnt++] = &fq->fq_base; 900 break; 901 case FQ_TYPE_TX_CONF_MQ: 902 priv->conf_fqs[conf_cnt++] = &fq->fq_base; 903 /* fall through */ 904 case FQ_TYPE_TX_CONFIRM: 905 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq); 906 break; 907 case FQ_TYPE_TX_ERROR: 908 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq); 909 break; 910 default: 911 dev_warn(priv->net_dev->dev.parent, 912 "Unknown FQ type detected!\n"); 913 break; 914 } 915 } 916 917 /* Make sure all CPUs receive a corresponding Tx queue. */ 918 while (egress_cnt < DPAA_ETH_TXQ_NUM) { 919 list_for_each_entry(fq, &priv->dpaa_fq_list, list) { 920 if (fq->fq_type != FQ_TYPE_TX) 921 continue; 922 priv->egress_fqs[egress_cnt++] = &fq->fq_base; 923 if (egress_cnt == DPAA_ETH_TXQ_NUM) 924 break; 925 } 926 } 927 } 928 929 static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, 930 struct qman_fq *tx_fq) 931 { 932 int i; 933 934 for (i = 0; i < DPAA_ETH_TXQ_NUM; i++) 935 if (priv->egress_fqs[i] == tx_fq) 936 return i; 937 938 return -EINVAL; 939 } 940 941 static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable) 942 { 943 const struct dpaa_priv *priv; 944 struct qman_fq *confq = NULL; 945 struct qm_mcc_initfq initfq; 946 struct device *dev; 947 struct qman_fq *fq; 948 int queue_id; 949 int err; 950 951 priv = netdev_priv(dpaa_fq->net_dev); 952 dev = dpaa_fq->net_dev->dev.parent; 953 954 if (dpaa_fq->fqid == 0) 955 dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; 956 957 dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); 958 959 err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base); 960 if (err) { 961 dev_err(dev, "qman_create_fq() failed\n"); 962 return err; 963 } 964 fq = &dpaa_fq->fq_base; 965 966 if (dpaa_fq->init) { 967 memset(&initfq, 0, sizeof(initfq)); 968 969 initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL); 970 /* Note: we may get to keep an empty FQ in cache */ 971 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE); 972 973 /* Try to reduce the number of portal interrupts for 974 * Tx Confirmation FQs. 975 */ 976 if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM) 977 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK); 978 979 /* FQ placement */ 980 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ); 981 982 qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq); 983 984 /* Put all egress queues in a congestion group of their own. 985 * Sensu stricto, the Tx confirmation queues are Rx FQs, 986 * rather than Tx - but they nonetheless account for the 987 * memory footprint on behalf of egress traffic. We therefore 988 * place them in the netdev's CGR, along with the Tx FQs. 989 */ 990 if (dpaa_fq->fq_type == FQ_TYPE_TX || 991 dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM || 992 dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { 993 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); 994 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); 995 initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid; 996 /* Set a fixed overhead accounting, in an attempt to 997 * reduce the impact of fixed-size skb shells and the 998 * driver's needed headroom on system memory. This is 999 * especially the case when the egress traffic is 1000 * composed of small datagrams. 1001 * Unfortunately, QMan's OAL value is capped to an 1002 * insufficient value, but even that is better than 1003 * no overhead accounting at all. 1004 */ 1005 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); 1006 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); 1007 qm_fqd_set_oal(&initfq.fqd, 1008 min(sizeof(struct sk_buff) + 1009 priv->tx_headroom, 1010 (size_t)FSL_QMAN_MAX_OAL)); 1011 } 1012 1013 if (td_enable) { 1014 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH); 1015 qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1); 1016 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE); 1017 } 1018 1019 if (dpaa_fq->fq_type == FQ_TYPE_TX) { 1020 queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base); 1021 if (queue_id >= 0) 1022 confq = priv->conf_fqs[queue_id]; 1023 if (confq) { 1024 initfq.we_mask |= 1025 cpu_to_be16(QM_INITFQ_WE_CONTEXTA); 1026 /* ContextA: OVOM=1(use contextA2 bits instead of ICAD) 1027 * A2V=1 (contextA A2 field is valid) 1028 * A0V=1 (contextA A0 field is valid) 1029 * B0V=1 (contextB field is valid) 1030 * ContextA A2: EBD=1 (deallocate buffers inside FMan) 1031 * ContextB B0(ASPID): 0 (absolute Virtual Storage ID) 1032 */ 1033 qm_fqd_context_a_set64(&initfq.fqd, 1034 0x1e00000080000000ULL); 1035 } 1036 } 1037 1038 /* Put all the ingress queues in our "ingress CGR". */ 1039 if (priv->use_ingress_cgr && 1040 (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || 1041 dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) { 1042 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); 1043 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); 1044 initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid; 1045 /* Set a fixed overhead accounting, just like for the 1046 * egress CGR. 1047 */ 1048 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); 1049 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); 1050 qm_fqd_set_oal(&initfq.fqd, 1051 min(sizeof(struct sk_buff) + 1052 priv->tx_headroom, 1053 (size_t)FSL_QMAN_MAX_OAL)); 1054 } 1055 1056 /* Initialization common to all ingress queues */ 1057 if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { 1058 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA); 1059 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE | 1060 QM_FQCTRL_CTXASTASHING); 1061 initfq.fqd.context_a.stashing.exclusive = 1062 QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | 1063 QM_STASHING_EXCL_ANNOTATION; 1064 qm_fqd_set_stashing(&initfq.fqd, 1, 2, 1065 DIV_ROUND_UP(sizeof(struct qman_fq), 1066 64)); 1067 } 1068 1069 err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); 1070 if (err < 0) { 1071 dev_err(dev, "qman_init_fq(%u) = %d\n", 1072 qman_fq_fqid(fq), err); 1073 qman_destroy_fq(fq); 1074 return err; 1075 } 1076 } 1077 1078 dpaa_fq->fqid = qman_fq_fqid(fq); 1079 1080 return 0; 1081 } 1082 1083 static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq) 1084 { 1085 const struct dpaa_priv *priv; 1086 struct dpaa_fq *dpaa_fq; 1087 int err, error; 1088 1089 err = 0; 1090 1091 dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); 1092 priv = netdev_priv(dpaa_fq->net_dev); 1093 1094 if (dpaa_fq->init) { 1095 err = qman_retire_fq(fq, NULL); 1096 if (err < 0 && netif_msg_drv(priv)) 1097 dev_err(dev, "qman_retire_fq(%u) = %d\n", 1098 qman_fq_fqid(fq), err); 1099 1100 error = qman_oos_fq(fq); 1101 if (error < 0 && netif_msg_drv(priv)) { 1102 dev_err(dev, "qman_oos_fq(%u) = %d\n", 1103 qman_fq_fqid(fq), error); 1104 if (err >= 0) 1105 err = error; 1106 } 1107 } 1108 1109 qman_destroy_fq(fq); 1110 list_del(&dpaa_fq->list); 1111 1112 return err; 1113 } 1114 1115 static int dpaa_fq_free(struct device *dev, struct list_head *list) 1116 { 1117 struct dpaa_fq *dpaa_fq, *tmp; 1118 int err, error; 1119 1120 err = 0; 1121 list_for_each_entry_safe(dpaa_fq, tmp, list, list) { 1122 error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq); 1123 if (error < 0 && err >= 0) 1124 err = error; 1125 } 1126 1127 return err; 1128 } 1129 1130 static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, 1131 struct dpaa_fq *defq, 1132 struct dpaa_buffer_layout *buf_layout) 1133 { 1134 struct fman_buffer_prefix_content buf_prefix_content; 1135 struct fman_port_params params; 1136 int err; 1137 1138 memset(¶ms, 0, sizeof(params)); 1139 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); 1140 1141 buf_prefix_content.priv_data_size = buf_layout->priv_data_size; 1142 buf_prefix_content.pass_prs_result = true; 1143 buf_prefix_content.pass_hash_result = true; 1144 buf_prefix_content.pass_time_stamp = false; 1145 buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; 1146 1147 params.specific_params.non_rx_params.err_fqid = errq->fqid; 1148 params.specific_params.non_rx_params.dflt_fqid = defq->fqid; 1149 1150 err = fman_port_config(port, ¶ms); 1151 if (err) { 1152 pr_err("%s: fman_port_config failed\n", __func__); 1153 return err; 1154 } 1155 1156 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); 1157 if (err) { 1158 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", 1159 __func__); 1160 return err; 1161 } 1162 1163 err = fman_port_init(port); 1164 if (err) 1165 pr_err("%s: fm_port_init failed\n", __func__); 1166 1167 return err; 1168 } 1169 1170 static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, 1171 size_t count, struct dpaa_fq *errq, 1172 struct dpaa_fq *defq, 1173 struct dpaa_buffer_layout *buf_layout) 1174 { 1175 struct fman_buffer_prefix_content buf_prefix_content; 1176 struct fman_port_rx_params *rx_p; 1177 struct fman_port_params params; 1178 int i, err; 1179 1180 memset(¶ms, 0, sizeof(params)); 1181 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); 1182 1183 buf_prefix_content.priv_data_size = buf_layout->priv_data_size; 1184 buf_prefix_content.pass_prs_result = true; 1185 buf_prefix_content.pass_hash_result = true; 1186 buf_prefix_content.pass_time_stamp = false; 1187 buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; 1188 1189 rx_p = ¶ms.specific_params.rx_params; 1190 rx_p->err_fqid = errq->fqid; 1191 rx_p->dflt_fqid = defq->fqid; 1192 1193 count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count); 1194 rx_p->ext_buf_pools.num_of_pools_used = (u8)count; 1195 for (i = 0; i < count; i++) { 1196 rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid; 1197 rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size; 1198 } 1199 1200 err = fman_port_config(port, ¶ms); 1201 if (err) { 1202 pr_err("%s: fman_port_config failed\n", __func__); 1203 return err; 1204 } 1205 1206 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); 1207 if (err) { 1208 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", 1209 __func__); 1210 return err; 1211 } 1212 1213 err = fman_port_init(port); 1214 if (err) 1215 pr_err("%s: fm_port_init failed\n", __func__); 1216 1217 return err; 1218 } 1219 1220 static int dpaa_eth_init_ports(struct mac_device *mac_dev, 1221 struct dpaa_bp **bps, size_t count, 1222 struct fm_port_fqs *port_fqs, 1223 struct dpaa_buffer_layout *buf_layout, 1224 struct device *dev) 1225 { 1226 struct fman_port *rxport = mac_dev->port[RX]; 1227 struct fman_port *txport = mac_dev->port[TX]; 1228 int err; 1229 1230 err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, 1231 port_fqs->tx_defq, &buf_layout[TX]); 1232 if (err) 1233 return err; 1234 1235 err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq, 1236 port_fqs->rx_defq, &buf_layout[RX]); 1237 1238 return err; 1239 } 1240 1241 static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp, 1242 struct bm_buffer *bmb, int cnt) 1243 { 1244 int err; 1245 1246 err = bman_release(dpaa_bp->pool, bmb, cnt); 1247 /* Should never occur, address anyway to avoid leaking the buffers */ 1248 if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb) 1249 while (cnt-- > 0) 1250 dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]); 1251 1252 return cnt; 1253 } 1254 1255 static void dpaa_release_sgt_members(struct qm_sg_entry *sgt) 1256 { 1257 struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX]; 1258 struct dpaa_bp *dpaa_bp; 1259 int i = 0, j; 1260 1261 memset(bmb, 0, sizeof(bmb)); 1262 1263 do { 1264 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); 1265 if (!dpaa_bp) 1266 return; 1267 1268 j = 0; 1269 do { 1270 WARN_ON(qm_sg_entry_is_ext(&sgt[i])); 1271 1272 bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i])); 1273 1274 j++; i++; 1275 } while (j < ARRAY_SIZE(bmb) && 1276 !qm_sg_entry_is_final(&sgt[i - 1]) && 1277 sgt[i - 1].bpid == sgt[i].bpid); 1278 1279 dpaa_bman_release(dpaa_bp, bmb, j); 1280 } while (!qm_sg_entry_is_final(&sgt[i - 1])); 1281 } 1282 1283 static void dpaa_fd_release(const struct net_device *net_dev, 1284 const struct qm_fd *fd) 1285 { 1286 struct qm_sg_entry *sgt; 1287 struct dpaa_bp *dpaa_bp; 1288 struct bm_buffer bmb; 1289 dma_addr_t addr; 1290 void *vaddr; 1291 1292 bmb.data = 0; 1293 bm_buffer_set64(&bmb, qm_fd_addr(fd)); 1294 1295 dpaa_bp = dpaa_bpid2pool(fd->bpid); 1296 if (!dpaa_bp) 1297 return; 1298 1299 if (qm_fd_get_format(fd) == qm_fd_sg) { 1300 vaddr = phys_to_virt(qm_fd_addr(fd)); 1301 sgt = vaddr + qm_fd_get_offset(fd); 1302 1303 dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size, 1304 DMA_FROM_DEVICE); 1305 1306 dpaa_release_sgt_members(sgt); 1307 1308 addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size, 1309 DMA_FROM_DEVICE); 1310 if (dma_mapping_error(dpaa_bp->dev, addr)) { 1311 dev_err(dpaa_bp->dev, "DMA mapping failed"); 1312 return; 1313 } 1314 bm_buffer_set64(&bmb, addr); 1315 } 1316 1317 dpaa_bman_release(dpaa_bp, &bmb, 1); 1318 } 1319 1320 static void count_ern(struct dpaa_percpu_priv *percpu_priv, 1321 const union qm_mr_entry *msg) 1322 { 1323 switch (msg->ern.rc & QM_MR_RC_MASK) { 1324 case QM_MR_RC_CGR_TAILDROP: 1325 percpu_priv->ern_cnt.cg_tdrop++; 1326 break; 1327 case QM_MR_RC_WRED: 1328 percpu_priv->ern_cnt.wred++; 1329 break; 1330 case QM_MR_RC_ERROR: 1331 percpu_priv->ern_cnt.err_cond++; 1332 break; 1333 case QM_MR_RC_ORPWINDOW_EARLY: 1334 percpu_priv->ern_cnt.early_window++; 1335 break; 1336 case QM_MR_RC_ORPWINDOW_LATE: 1337 percpu_priv->ern_cnt.late_window++; 1338 break; 1339 case QM_MR_RC_FQ_TAILDROP: 1340 percpu_priv->ern_cnt.fq_tdrop++; 1341 break; 1342 case QM_MR_RC_ORPWINDOW_RETIRED: 1343 percpu_priv->ern_cnt.fq_retired++; 1344 break; 1345 case QM_MR_RC_ORP_ZERO: 1346 percpu_priv->ern_cnt.orp_zero++; 1347 break; 1348 } 1349 } 1350 1351 /* Turn on HW checksum computation for this outgoing frame. 1352 * If the current protocol is not something we support in this regard 1353 * (or if the stack has already computed the SW checksum), we do nothing. 1354 * 1355 * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value 1356 * otherwise. 1357 * 1358 * Note that this function may modify the fd->cmd field and the skb data buffer 1359 * (the Parse Results area). 1360 */ 1361 static int dpaa_enable_tx_csum(struct dpaa_priv *priv, 1362 struct sk_buff *skb, 1363 struct qm_fd *fd, 1364 char *parse_results) 1365 { 1366 struct fman_prs_result *parse_result; 1367 u16 ethertype = ntohs(skb->protocol); 1368 struct ipv6hdr *ipv6h = NULL; 1369 struct iphdr *iph; 1370 int retval = 0; 1371 u8 l4_proto; 1372 1373 if (skb->ip_summed != CHECKSUM_PARTIAL) 1374 return 0; 1375 1376 /* Note: L3 csum seems to be already computed in sw, but we can't choose 1377 * L4 alone from the FM configuration anyway. 1378 */ 1379 1380 /* Fill in some fields of the Parse Results array, so the FMan 1381 * can find them as if they came from the FMan Parser. 1382 */ 1383 parse_result = (struct fman_prs_result *)parse_results; 1384 1385 /* If we're dealing with VLAN, get the real Ethernet type */ 1386 if (ethertype == ETH_P_8021Q) { 1387 /* We can't always assume the MAC header is set correctly 1388 * by the stack, so reset to beginning of skb->data 1389 */ 1390 skb_reset_mac_header(skb); 1391 ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); 1392 } 1393 1394 /* Fill in the relevant L3 parse result fields 1395 * and read the L4 protocol type 1396 */ 1397 switch (ethertype) { 1398 case ETH_P_IP: 1399 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4); 1400 iph = ip_hdr(skb); 1401 WARN_ON(!iph); 1402 l4_proto = iph->protocol; 1403 break; 1404 case ETH_P_IPV6: 1405 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6); 1406 ipv6h = ipv6_hdr(skb); 1407 WARN_ON(!ipv6h); 1408 l4_proto = ipv6h->nexthdr; 1409 break; 1410 default: 1411 /* We shouldn't even be here */ 1412 if (net_ratelimit()) 1413 netif_alert(priv, tx_err, priv->net_dev, 1414 "Can't compute HW csum for L3 proto 0x%x\n", 1415 ntohs(skb->protocol)); 1416 retval = -EIO; 1417 goto return_error; 1418 } 1419 1420 /* Fill in the relevant L4 parse result fields */ 1421 switch (l4_proto) { 1422 case IPPROTO_UDP: 1423 parse_result->l4r = FM_L4_PARSE_RESULT_UDP; 1424 break; 1425 case IPPROTO_TCP: 1426 parse_result->l4r = FM_L4_PARSE_RESULT_TCP; 1427 break; 1428 default: 1429 if (net_ratelimit()) 1430 netif_alert(priv, tx_err, priv->net_dev, 1431 "Can't compute HW csum for L4 proto 0x%x\n", 1432 l4_proto); 1433 retval = -EIO; 1434 goto return_error; 1435 } 1436 1437 /* At index 0 is IPOffset_1 as defined in the Parse Results */ 1438 parse_result->ip_off[0] = (u8)skb_network_offset(skb); 1439 parse_result->l4_off = (u8)skb_transport_offset(skb); 1440 1441 /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ 1442 fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC); 1443 1444 /* On P1023 and similar platforms fd->cmd interpretation could 1445 * be disabled by setting CONTEXT_A bit ICMD; currently this bit 1446 * is not set so we do not need to check; in the future, if/when 1447 * using context_a we need to check this bit 1448 */ 1449 1450 return_error: 1451 return retval; 1452 } 1453 1454 static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp) 1455 { 1456 struct device *dev = dpaa_bp->dev; 1457 struct bm_buffer bmb[8]; 1458 dma_addr_t addr; 1459 void *new_buf; 1460 u8 i; 1461 1462 for (i = 0; i < 8; i++) { 1463 new_buf = netdev_alloc_frag(dpaa_bp->raw_size); 1464 if (unlikely(!new_buf)) { 1465 dev_err(dev, "netdev_alloc_frag() failed, size %zu\n", 1466 dpaa_bp->raw_size); 1467 goto release_previous_buffs; 1468 } 1469 new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES); 1470 1471 addr = dma_map_single(dev, new_buf, 1472 dpaa_bp->size, DMA_FROM_DEVICE); 1473 if (unlikely(dma_mapping_error(dev, addr))) { 1474 dev_err(dpaa_bp->dev, "DMA map failed"); 1475 goto release_previous_buffs; 1476 } 1477 1478 bmb[i].data = 0; 1479 bm_buffer_set64(&bmb[i], addr); 1480 } 1481 1482 release_bufs: 1483 return dpaa_bman_release(dpaa_bp, bmb, i); 1484 1485 release_previous_buffs: 1486 WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n"); 1487 1488 bm_buffer_set64(&bmb[i], 0); 1489 /* Avoid releasing a completely null buffer; bman_release() requires 1490 * at least one buffer. 1491 */ 1492 if (likely(i)) 1493 goto release_bufs; 1494 1495 return 0; 1496 } 1497 1498 static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp) 1499 { 1500 int i; 1501 1502 /* Give each CPU an allotment of "config_count" buffers */ 1503 for_each_possible_cpu(i) { 1504 int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i); 1505 int j; 1506 1507 /* Although we access another CPU's counters here 1508 * we do it at boot time so it is safe 1509 */ 1510 for (j = 0; j < dpaa_bp->config_count; j += 8) 1511 *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp); 1512 } 1513 return 0; 1514 } 1515 1516 /* Add buffers/(pages) for Rx processing whenever bpool count falls below 1517 * REFILL_THRESHOLD. 1518 */ 1519 static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr) 1520 { 1521 int count = *countptr; 1522 int new_bufs; 1523 1524 if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) { 1525 do { 1526 new_bufs = dpaa_bp_add_8_bufs(dpaa_bp); 1527 if (unlikely(!new_bufs)) { 1528 /* Avoid looping forever if we've temporarily 1529 * run out of memory. We'll try again at the 1530 * next NAPI cycle. 1531 */ 1532 break; 1533 } 1534 count += new_bufs; 1535 } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT); 1536 1537 *countptr = count; 1538 if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT)) 1539 return -ENOMEM; 1540 } 1541 1542 return 0; 1543 } 1544 1545 static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) 1546 { 1547 struct dpaa_bp *dpaa_bp; 1548 int *countptr; 1549 int res, i; 1550 1551 for (i = 0; i < DPAA_BPS_NUM; i++) { 1552 dpaa_bp = priv->dpaa_bps[i]; 1553 if (!dpaa_bp) 1554 return -EINVAL; 1555 countptr = this_cpu_ptr(dpaa_bp->percpu_count); 1556 res = dpaa_eth_refill_bpool(dpaa_bp, countptr); 1557 if (res) 1558 return res; 1559 } 1560 return 0; 1561 } 1562 1563 /* Cleanup function for outgoing frame descriptors that were built on Tx path, 1564 * either contiguous frames or scatter/gather ones. 1565 * Skb freeing is not handled here. 1566 * 1567 * This function may be called on error paths in the Tx function, so guard 1568 * against cases when not all fd relevant fields were filled in. 1569 * 1570 * Return the skb backpointer, since for S/G frames the buffer containing it 1571 * gets freed here. 1572 */ 1573 static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, 1574 const struct qm_fd *fd) 1575 { 1576 const enum dma_data_direction dma_dir = DMA_TO_DEVICE; 1577 struct device *dev = priv->net_dev->dev.parent; 1578 dma_addr_t addr = qm_fd_addr(fd); 1579 const struct qm_sg_entry *sgt; 1580 struct sk_buff **skbh, *skb; 1581 int nr_frags, i; 1582 1583 skbh = (struct sk_buff **)phys_to_virt(addr); 1584 skb = *skbh; 1585 1586 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { 1587 nr_frags = skb_shinfo(skb)->nr_frags; 1588 dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + 1589 sizeof(struct qm_sg_entry) * (1 + nr_frags), 1590 dma_dir); 1591 1592 /* The sgt buffer has been allocated with netdev_alloc_frag(), 1593 * it's from lowmem. 1594 */ 1595 sgt = phys_to_virt(addr + qm_fd_get_offset(fd)); 1596 1597 /* sgt[0] is from lowmem, was dma_map_single()-ed */ 1598 dma_unmap_single(dev, qm_sg_addr(&sgt[0]), 1599 qm_sg_entry_get_len(&sgt[0]), dma_dir); 1600 1601 /* remaining pages were mapped with skb_frag_dma_map() */ 1602 for (i = 1; i < nr_frags; i++) { 1603 WARN_ON(qm_sg_entry_is_ext(&sgt[i])); 1604 1605 dma_unmap_page(dev, qm_sg_addr(&sgt[i]), 1606 qm_sg_entry_get_len(&sgt[i]), dma_dir); 1607 } 1608 1609 /* Free the page frag that we allocated on Tx */ 1610 skb_free_frag(phys_to_virt(addr)); 1611 } else { 1612 dma_unmap_single(dev, addr, 1613 skb_tail_pointer(skb) - (u8 *)skbh, dma_dir); 1614 } 1615 1616 return skb; 1617 } 1618 1619 static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd) 1620 { 1621 /* The parser has run and performed L4 checksum validation. 1622 * We know there were no parser errors (and implicitly no 1623 * L4 csum error), otherwise we wouldn't be here. 1624 */ 1625 if ((priv->net_dev->features & NETIF_F_RXCSUM) && 1626 (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV)) 1627 return CHECKSUM_UNNECESSARY; 1628 1629 /* We're here because either the parser didn't run or the L4 checksum 1630 * was not verified. This may include the case of a UDP frame with 1631 * checksum zero or an L4 proto other than TCP/UDP 1632 */ 1633 return CHECKSUM_NONE; 1634 } 1635 1636 /* Build a linear skb around the received buffer. 1637 * We are guaranteed there is enough room at the end of the data buffer to 1638 * accommodate the shared info area of the skb. 1639 */ 1640 static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, 1641 const struct qm_fd *fd) 1642 { 1643 ssize_t fd_off = qm_fd_get_offset(fd); 1644 dma_addr_t addr = qm_fd_addr(fd); 1645 struct dpaa_bp *dpaa_bp; 1646 struct sk_buff *skb; 1647 void *vaddr; 1648 1649 vaddr = phys_to_virt(addr); 1650 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); 1651 1652 dpaa_bp = dpaa_bpid2pool(fd->bpid); 1653 if (!dpaa_bp) 1654 goto free_buffer; 1655 1656 skb = build_skb(vaddr, dpaa_bp->size + 1657 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 1658 if (unlikely(!skb)) { 1659 WARN_ONCE(1, "Build skb failure on Rx\n"); 1660 goto free_buffer; 1661 } 1662 WARN_ON(fd_off != priv->rx_headroom); 1663 skb_reserve(skb, fd_off); 1664 skb_put(skb, qm_fd_get_length(fd)); 1665 1666 skb->ip_summed = rx_csum_offload(priv, fd); 1667 1668 return skb; 1669 1670 free_buffer: 1671 skb_free_frag(vaddr); 1672 return NULL; 1673 } 1674 1675 /* Build an skb with the data of the first S/G entry in the linear portion and 1676 * the rest of the frame as skb fragments. 1677 * 1678 * The page fragment holding the S/G Table is recycled here. 1679 */ 1680 static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, 1681 const struct qm_fd *fd) 1682 { 1683 ssize_t fd_off = qm_fd_get_offset(fd); 1684 dma_addr_t addr = qm_fd_addr(fd); 1685 const struct qm_sg_entry *sgt; 1686 struct page *page, *head_page; 1687 struct dpaa_bp *dpaa_bp; 1688 void *vaddr, *sg_vaddr; 1689 int frag_off, frag_len; 1690 struct sk_buff *skb; 1691 dma_addr_t sg_addr; 1692 int page_offset; 1693 unsigned int sz; 1694 int *count_ptr; 1695 int i; 1696 1697 vaddr = phys_to_virt(addr); 1698 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); 1699 1700 /* Iterate through the SGT entries and add data buffers to the skb */ 1701 sgt = vaddr + fd_off; 1702 for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) { 1703 /* Extension bit is not supported */ 1704 WARN_ON(qm_sg_entry_is_ext(&sgt[i])); 1705 1706 sg_addr = qm_sg_addr(&sgt[i]); 1707 sg_vaddr = phys_to_virt(sg_addr); 1708 WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, 1709 SMP_CACHE_BYTES)); 1710 1711 /* We may use multiple Rx pools */ 1712 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); 1713 if (!dpaa_bp) 1714 goto free_buffers; 1715 1716 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 1717 dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size, 1718 DMA_FROM_DEVICE); 1719 if (i == 0) { 1720 sz = dpaa_bp->size + 1721 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1722 skb = build_skb(sg_vaddr, sz); 1723 if (WARN_ON(unlikely(!skb))) 1724 goto free_buffers; 1725 1726 skb->ip_summed = rx_csum_offload(priv, fd); 1727 1728 /* Make sure forwarded skbs will have enough space 1729 * on Tx, if extra headers are added. 1730 */ 1731 WARN_ON(fd_off != priv->rx_headroom); 1732 skb_reserve(skb, fd_off); 1733 skb_put(skb, qm_sg_entry_get_len(&sgt[i])); 1734 } else { 1735 /* Not the first S/G entry; all data from buffer will 1736 * be added in an skb fragment; fragment index is offset 1737 * by one since first S/G entry was incorporated in the 1738 * linear part of the skb. 1739 * 1740 * Caution: 'page' may be a tail page. 1741 */ 1742 page = virt_to_page(sg_vaddr); 1743 head_page = virt_to_head_page(sg_vaddr); 1744 1745 /* Compute offset in (possibly tail) page */ 1746 page_offset = ((unsigned long)sg_vaddr & 1747 (PAGE_SIZE - 1)) + 1748 (page_address(page) - page_address(head_page)); 1749 /* page_offset only refers to the beginning of sgt[i]; 1750 * but the buffer itself may have an internal offset. 1751 */ 1752 frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset; 1753 frag_len = qm_sg_entry_get_len(&sgt[i]); 1754 /* skb_add_rx_frag() does no checking on the page; if 1755 * we pass it a tail page, we'll end up with 1756 * bad page accounting and eventually with segafults. 1757 */ 1758 skb_add_rx_frag(skb, i - 1, head_page, frag_off, 1759 frag_len, dpaa_bp->size); 1760 } 1761 /* Update the pool count for the current {cpu x bpool} */ 1762 (*count_ptr)--; 1763 1764 if (qm_sg_entry_is_final(&sgt[i])) 1765 break; 1766 } 1767 WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); 1768 1769 /* free the SG table buffer */ 1770 skb_free_frag(vaddr); 1771 1772 return skb; 1773 1774 free_buffers: 1775 /* compensate sw bpool counter changes */ 1776 for (i--; i >= 0; i--) { 1777 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); 1778 if (dpaa_bp) { 1779 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 1780 (*count_ptr)++; 1781 } 1782 } 1783 /* free all the SG entries */ 1784 for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) { 1785 sg_addr = qm_sg_addr(&sgt[i]); 1786 sg_vaddr = phys_to_virt(sg_addr); 1787 skb_free_frag(sg_vaddr); 1788 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); 1789 if (dpaa_bp) { 1790 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 1791 (*count_ptr)--; 1792 } 1793 1794 if (qm_sg_entry_is_final(&sgt[i])) 1795 break; 1796 } 1797 /* free the SGT fragment */ 1798 skb_free_frag(vaddr); 1799 1800 return NULL; 1801 } 1802 1803 static int skb_to_contig_fd(struct dpaa_priv *priv, 1804 struct sk_buff *skb, struct qm_fd *fd, 1805 int *offset) 1806 { 1807 struct net_device *net_dev = priv->net_dev; 1808 struct device *dev = net_dev->dev.parent; 1809 enum dma_data_direction dma_dir; 1810 unsigned char *buffer_start; 1811 struct sk_buff **skbh; 1812 dma_addr_t addr; 1813 int err; 1814 1815 /* We are guaranteed to have at least tx_headroom bytes 1816 * available, so just use that for offset. 1817 */ 1818 fd->bpid = FSL_DPAA_BPID_INV; 1819 buffer_start = skb->data - priv->tx_headroom; 1820 dma_dir = DMA_TO_DEVICE; 1821 1822 skbh = (struct sk_buff **)buffer_start; 1823 *skbh = skb; 1824 1825 /* Enable L3/L4 hardware checksum computation. 1826 * 1827 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may 1828 * need to write into the skb. 1829 */ 1830 err = dpaa_enable_tx_csum(priv, skb, fd, 1831 ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE); 1832 if (unlikely(err < 0)) { 1833 if (net_ratelimit()) 1834 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", 1835 err); 1836 return err; 1837 } 1838 1839 /* Fill in the rest of the FD fields */ 1840 qm_fd_set_contig(fd, priv->tx_headroom, skb->len); 1841 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); 1842 1843 /* Map the entire buffer size that may be seen by FMan, but no more */ 1844 addr = dma_map_single(dev, skbh, 1845 skb_tail_pointer(skb) - buffer_start, dma_dir); 1846 if (unlikely(dma_mapping_error(dev, addr))) { 1847 if (net_ratelimit()) 1848 netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n"); 1849 return -EINVAL; 1850 } 1851 qm_fd_addr_set64(fd, addr); 1852 1853 return 0; 1854 } 1855 1856 static int skb_to_sg_fd(struct dpaa_priv *priv, 1857 struct sk_buff *skb, struct qm_fd *fd) 1858 { 1859 const enum dma_data_direction dma_dir = DMA_TO_DEVICE; 1860 const int nr_frags = skb_shinfo(skb)->nr_frags; 1861 struct net_device *net_dev = priv->net_dev; 1862 struct device *dev = net_dev->dev.parent; 1863 struct qm_sg_entry *sgt; 1864 struct sk_buff **skbh; 1865 int i, j, err, sz; 1866 void *buffer_start; 1867 skb_frag_t *frag; 1868 dma_addr_t addr; 1869 size_t frag_len; 1870 void *sgt_buf; 1871 1872 /* get a page frag to store the SGTable */ 1873 sz = SKB_DATA_ALIGN(priv->tx_headroom + 1874 sizeof(struct qm_sg_entry) * (1 + nr_frags)); 1875 sgt_buf = netdev_alloc_frag(sz); 1876 if (unlikely(!sgt_buf)) { 1877 netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", 1878 sz); 1879 return -ENOMEM; 1880 } 1881 1882 /* Enable L3/L4 hardware checksum computation. 1883 * 1884 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may 1885 * need to write into the skb. 1886 */ 1887 err = dpaa_enable_tx_csum(priv, skb, fd, 1888 sgt_buf + DPAA_TX_PRIV_DATA_SIZE); 1889 if (unlikely(err < 0)) { 1890 if (net_ratelimit()) 1891 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", 1892 err); 1893 goto csum_failed; 1894 } 1895 1896 sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); 1897 qm_sg_entry_set_len(&sgt[0], skb_headlen(skb)); 1898 sgt[0].bpid = FSL_DPAA_BPID_INV; 1899 sgt[0].offset = 0; 1900 addr = dma_map_single(dev, skb->data, 1901 skb_headlen(skb), dma_dir); 1902 if (unlikely(dma_mapping_error(dev, addr))) { 1903 dev_err(dev, "DMA mapping failed"); 1904 err = -EINVAL; 1905 goto sg0_map_failed; 1906 } 1907 qm_sg_entry_set64(&sgt[0], addr); 1908 1909 /* populate the rest of SGT entries */ 1910 frag = &skb_shinfo(skb)->frags[0]; 1911 frag_len = frag->size; 1912 for (i = 1; i <= nr_frags; i++, frag++) { 1913 WARN_ON(!skb_frag_page(frag)); 1914 addr = skb_frag_dma_map(dev, frag, 0, 1915 frag_len, dma_dir); 1916 if (unlikely(dma_mapping_error(dev, addr))) { 1917 dev_err(dev, "DMA mapping failed"); 1918 err = -EINVAL; 1919 goto sg_map_failed; 1920 } 1921 1922 qm_sg_entry_set_len(&sgt[i], frag_len); 1923 sgt[i].bpid = FSL_DPAA_BPID_INV; 1924 sgt[i].offset = 0; 1925 1926 /* keep the offset in the address */ 1927 qm_sg_entry_set64(&sgt[i], addr); 1928 frag_len = frag->size; 1929 } 1930 qm_sg_entry_set_f(&sgt[i - 1], frag_len); 1931 1932 qm_fd_set_sg(fd, priv->tx_headroom, skb->len); 1933 1934 /* DMA map the SGT page */ 1935 buffer_start = (void *)sgt - priv->tx_headroom; 1936 skbh = (struct sk_buff **)buffer_start; 1937 *skbh = skb; 1938 1939 addr = dma_map_single(dev, buffer_start, priv->tx_headroom + 1940 sizeof(struct qm_sg_entry) * (1 + nr_frags), 1941 dma_dir); 1942 if (unlikely(dma_mapping_error(dev, addr))) { 1943 dev_err(dev, "DMA mapping failed"); 1944 err = -EINVAL; 1945 goto sgt_map_failed; 1946 } 1947 1948 fd->bpid = FSL_DPAA_BPID_INV; 1949 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); 1950 qm_fd_addr_set64(fd, addr); 1951 1952 return 0; 1953 1954 sgt_map_failed: 1955 sg_map_failed: 1956 for (j = 0; j < i; j++) 1957 dma_unmap_page(dev, qm_sg_addr(&sgt[j]), 1958 qm_sg_entry_get_len(&sgt[j]), dma_dir); 1959 sg0_map_failed: 1960 csum_failed: 1961 skb_free_frag(sgt_buf); 1962 1963 return err; 1964 } 1965 1966 static inline int dpaa_xmit(struct dpaa_priv *priv, 1967 struct rtnl_link_stats64 *percpu_stats, 1968 int queue, 1969 struct qm_fd *fd) 1970 { 1971 struct qman_fq *egress_fq; 1972 int err, i; 1973 1974 egress_fq = priv->egress_fqs[queue]; 1975 if (fd->bpid == FSL_DPAA_BPID_INV) 1976 fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue])); 1977 1978 /* Trace this Tx fd */ 1979 trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd); 1980 1981 for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) { 1982 err = qman_enqueue(egress_fq, fd); 1983 if (err != -EBUSY) 1984 break; 1985 } 1986 1987 if (unlikely(err < 0)) { 1988 percpu_stats->tx_errors++; 1989 percpu_stats->tx_fifo_errors++; 1990 return err; 1991 } 1992 1993 percpu_stats->tx_packets++; 1994 percpu_stats->tx_bytes += qm_fd_get_length(fd); 1995 1996 return 0; 1997 } 1998 1999 static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) 2000 { 2001 const int queue_mapping = skb_get_queue_mapping(skb); 2002 bool nonlinear = skb_is_nonlinear(skb); 2003 struct rtnl_link_stats64 *percpu_stats; 2004 struct dpaa_percpu_priv *percpu_priv; 2005 struct dpaa_priv *priv; 2006 struct qm_fd fd; 2007 int offset = 0; 2008 int err = 0; 2009 2010 priv = netdev_priv(net_dev); 2011 percpu_priv = this_cpu_ptr(priv->percpu_priv); 2012 percpu_stats = &percpu_priv->stats; 2013 2014 qm_fd_clear_fd(&fd); 2015 2016 if (!nonlinear) { 2017 /* We're going to store the skb backpointer at the beginning 2018 * of the data buffer, so we need a privately owned skb 2019 * 2020 * We've made sure skb is not shared in dev->priv_flags, 2021 * we need to verify the skb head is not cloned 2022 */ 2023 if (skb_cow_head(skb, priv->tx_headroom)) 2024 goto enomem; 2025 2026 WARN_ON(skb_is_nonlinear(skb)); 2027 } 2028 2029 /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES; 2030 * make sure we don't feed FMan with more fragments than it supports. 2031 */ 2032 if (nonlinear && 2033 likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) { 2034 /* Just create a S/G fd based on the skb */ 2035 err = skb_to_sg_fd(priv, skb, &fd); 2036 percpu_priv->tx_frag_skbuffs++; 2037 } else { 2038 /* If the egress skb contains more fragments than we support 2039 * we have no choice but to linearize it ourselves. 2040 */ 2041 if (unlikely(nonlinear) && __skb_linearize(skb)) 2042 goto enomem; 2043 2044 /* Finally, create a contig FD from this skb */ 2045 err = skb_to_contig_fd(priv, skb, &fd, &offset); 2046 } 2047 if (unlikely(err < 0)) 2048 goto skb_to_fd_failed; 2049 2050 if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) 2051 return NETDEV_TX_OK; 2052 2053 dpaa_cleanup_tx_fd(priv, &fd); 2054 skb_to_fd_failed: 2055 enomem: 2056 percpu_stats->tx_errors++; 2057 dev_kfree_skb(skb); 2058 return NETDEV_TX_OK; 2059 } 2060 2061 static void dpaa_rx_error(struct net_device *net_dev, 2062 const struct dpaa_priv *priv, 2063 struct dpaa_percpu_priv *percpu_priv, 2064 const struct qm_fd *fd, 2065 u32 fqid) 2066 { 2067 if (net_ratelimit()) 2068 netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n", 2069 be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS); 2070 2071 percpu_priv->stats.rx_errors++; 2072 2073 if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA) 2074 percpu_priv->rx_errors.dme++; 2075 if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL) 2076 percpu_priv->rx_errors.fpe++; 2077 if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE) 2078 percpu_priv->rx_errors.fse++; 2079 if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR) 2080 percpu_priv->rx_errors.phe++; 2081 2082 dpaa_fd_release(net_dev, fd); 2083 } 2084 2085 static void dpaa_tx_error(struct net_device *net_dev, 2086 const struct dpaa_priv *priv, 2087 struct dpaa_percpu_priv *percpu_priv, 2088 const struct qm_fd *fd, 2089 u32 fqid) 2090 { 2091 struct sk_buff *skb; 2092 2093 if (net_ratelimit()) 2094 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", 2095 be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS); 2096 2097 percpu_priv->stats.tx_errors++; 2098 2099 skb = dpaa_cleanup_tx_fd(priv, fd); 2100 dev_kfree_skb(skb); 2101 } 2102 2103 static int dpaa_eth_poll(struct napi_struct *napi, int budget) 2104 { 2105 struct dpaa_napi_portal *np = 2106 container_of(napi, struct dpaa_napi_portal, napi); 2107 2108 int cleaned = qman_p_poll_dqrr(np->p, budget); 2109 2110 if (cleaned < budget) { 2111 napi_complete_done(napi, cleaned); 2112 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); 2113 2114 } else if (np->down) { 2115 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); 2116 } 2117 2118 return cleaned; 2119 } 2120 2121 static void dpaa_tx_conf(struct net_device *net_dev, 2122 const struct dpaa_priv *priv, 2123 struct dpaa_percpu_priv *percpu_priv, 2124 const struct qm_fd *fd, 2125 u32 fqid) 2126 { 2127 struct sk_buff *skb; 2128 2129 if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) { 2130 if (net_ratelimit()) 2131 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", 2132 be32_to_cpu(fd->status) & 2133 FM_FD_STAT_TX_ERRORS); 2134 2135 percpu_priv->stats.tx_errors++; 2136 } 2137 2138 percpu_priv->tx_confirm++; 2139 2140 skb = dpaa_cleanup_tx_fd(priv, fd); 2141 2142 consume_skb(skb); 2143 } 2144 2145 static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv, 2146 struct qman_portal *portal) 2147 { 2148 if (unlikely(in_irq() || !in_serving_softirq())) { 2149 /* Disable QMan IRQ and invoke NAPI */ 2150 qman_p_irqsource_remove(portal, QM_PIRQ_DQRI); 2151 2152 percpu_priv->np.p = portal; 2153 napi_schedule(&percpu_priv->np.napi); 2154 percpu_priv->in_interrupt++; 2155 return 1; 2156 } 2157 return 0; 2158 } 2159 2160 static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal, 2161 struct qman_fq *fq, 2162 const struct qm_dqrr_entry *dq) 2163 { 2164 struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); 2165 struct dpaa_percpu_priv *percpu_priv; 2166 struct net_device *net_dev; 2167 struct dpaa_bp *dpaa_bp; 2168 struct dpaa_priv *priv; 2169 2170 net_dev = dpaa_fq->net_dev; 2171 priv = netdev_priv(net_dev); 2172 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); 2173 if (!dpaa_bp) 2174 return qman_cb_dqrr_consume; 2175 2176 percpu_priv = this_cpu_ptr(priv->percpu_priv); 2177 2178 if (dpaa_eth_napi_schedule(percpu_priv, portal)) 2179 return qman_cb_dqrr_stop; 2180 2181 if (dpaa_eth_refill_bpools(priv)) 2182 /* Unable to refill the buffer pool due to insufficient 2183 * system memory. Just release the frame back into the pool, 2184 * otherwise we'll soon end up with an empty buffer pool. 2185 */ 2186 dpaa_fd_release(net_dev, &dq->fd); 2187 else 2188 dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); 2189 2190 return qman_cb_dqrr_consume; 2191 } 2192 2193 static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, 2194 struct qman_fq *fq, 2195 const struct qm_dqrr_entry *dq) 2196 { 2197 struct rtnl_link_stats64 *percpu_stats; 2198 struct dpaa_percpu_priv *percpu_priv; 2199 const struct qm_fd *fd = &dq->fd; 2200 dma_addr_t addr = qm_fd_addr(fd); 2201 enum qm_fd_format fd_format; 2202 struct net_device *net_dev; 2203 u32 fd_status; 2204 struct dpaa_bp *dpaa_bp; 2205 struct dpaa_priv *priv; 2206 unsigned int skb_len; 2207 struct sk_buff *skb; 2208 int *count_ptr; 2209 2210 fd_status = be32_to_cpu(fd->status); 2211 fd_format = qm_fd_get_format(fd); 2212 net_dev = ((struct dpaa_fq *)fq)->net_dev; 2213 priv = netdev_priv(net_dev); 2214 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); 2215 if (!dpaa_bp) 2216 return qman_cb_dqrr_consume; 2217 2218 /* Trace the Rx fd */ 2219 trace_dpaa_rx_fd(net_dev, fq, &dq->fd); 2220 2221 percpu_priv = this_cpu_ptr(priv->percpu_priv); 2222 percpu_stats = &percpu_priv->stats; 2223 2224 if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal))) 2225 return qman_cb_dqrr_stop; 2226 2227 /* Make sure we didn't run out of buffers */ 2228 if (unlikely(dpaa_eth_refill_bpools(priv))) { 2229 /* Unable to refill the buffer pool due to insufficient 2230 * system memory. Just release the frame back into the pool, 2231 * otherwise we'll soon end up with an empty buffer pool. 2232 */ 2233 dpaa_fd_release(net_dev, &dq->fd); 2234 return qman_cb_dqrr_consume; 2235 } 2236 2237 if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) { 2238 if (net_ratelimit()) 2239 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", 2240 fd_status & FM_FD_STAT_RX_ERRORS); 2241 2242 percpu_stats->rx_errors++; 2243 dpaa_fd_release(net_dev, fd); 2244 return qman_cb_dqrr_consume; 2245 } 2246 2247 dpaa_bp = dpaa_bpid2pool(fd->bpid); 2248 if (!dpaa_bp) 2249 return qman_cb_dqrr_consume; 2250 2251 dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE); 2252 2253 /* prefetch the first 64 bytes of the frame or the SGT start */ 2254 prefetch(phys_to_virt(addr) + qm_fd_get_offset(fd)); 2255 2256 fd_format = qm_fd_get_format(fd); 2257 /* The only FD types that we may receive are contig and S/G */ 2258 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); 2259 2260 /* Account for either the contig buffer or the SGT buffer (depending on 2261 * which case we were in) having been removed from the pool. 2262 */ 2263 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 2264 (*count_ptr)--; 2265 2266 if (likely(fd_format == qm_fd_contig)) 2267 skb = contig_fd_to_skb(priv, fd); 2268 else 2269 skb = sg_fd_to_skb(priv, fd); 2270 if (!skb) 2271 return qman_cb_dqrr_consume; 2272 2273 skb->protocol = eth_type_trans(skb, net_dev); 2274 2275 skb_len = skb->len; 2276 2277 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) 2278 return qman_cb_dqrr_consume; 2279 2280 percpu_stats->rx_packets++; 2281 percpu_stats->rx_bytes += skb_len; 2282 2283 return qman_cb_dqrr_consume; 2284 } 2285 2286 static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal, 2287 struct qman_fq *fq, 2288 const struct qm_dqrr_entry *dq) 2289 { 2290 struct dpaa_percpu_priv *percpu_priv; 2291 struct net_device *net_dev; 2292 struct dpaa_priv *priv; 2293 2294 net_dev = ((struct dpaa_fq *)fq)->net_dev; 2295 priv = netdev_priv(net_dev); 2296 2297 percpu_priv = this_cpu_ptr(priv->percpu_priv); 2298 2299 if (dpaa_eth_napi_schedule(percpu_priv, portal)) 2300 return qman_cb_dqrr_stop; 2301 2302 dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); 2303 2304 return qman_cb_dqrr_consume; 2305 } 2306 2307 static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal, 2308 struct qman_fq *fq, 2309 const struct qm_dqrr_entry *dq) 2310 { 2311 struct dpaa_percpu_priv *percpu_priv; 2312 struct net_device *net_dev; 2313 struct dpaa_priv *priv; 2314 2315 net_dev = ((struct dpaa_fq *)fq)->net_dev; 2316 priv = netdev_priv(net_dev); 2317 2318 /* Trace the fd */ 2319 trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd); 2320 2321 percpu_priv = this_cpu_ptr(priv->percpu_priv); 2322 2323 if (dpaa_eth_napi_schedule(percpu_priv, portal)) 2324 return qman_cb_dqrr_stop; 2325 2326 dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); 2327 2328 return qman_cb_dqrr_consume; 2329 } 2330 2331 static void egress_ern(struct qman_portal *portal, 2332 struct qman_fq *fq, 2333 const union qm_mr_entry *msg) 2334 { 2335 const struct qm_fd *fd = &msg->ern.fd; 2336 struct dpaa_percpu_priv *percpu_priv; 2337 const struct dpaa_priv *priv; 2338 struct net_device *net_dev; 2339 struct sk_buff *skb; 2340 2341 net_dev = ((struct dpaa_fq *)fq)->net_dev; 2342 priv = netdev_priv(net_dev); 2343 percpu_priv = this_cpu_ptr(priv->percpu_priv); 2344 2345 percpu_priv->stats.tx_dropped++; 2346 percpu_priv->stats.tx_fifo_errors++; 2347 count_ern(percpu_priv, msg); 2348 2349 skb = dpaa_cleanup_tx_fd(priv, fd); 2350 dev_kfree_skb_any(skb); 2351 } 2352 2353 static const struct dpaa_fq_cbs dpaa_fq_cbs = { 2354 .rx_defq = { .cb = { .dqrr = rx_default_dqrr } }, 2355 .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } }, 2356 .rx_errq = { .cb = { .dqrr = rx_error_dqrr } }, 2357 .tx_errq = { .cb = { .dqrr = conf_error_dqrr } }, 2358 .egress_ern = { .cb = { .ern = egress_ern } } 2359 }; 2360 2361 static void dpaa_eth_napi_enable(struct dpaa_priv *priv) 2362 { 2363 struct dpaa_percpu_priv *percpu_priv; 2364 int i; 2365 2366 for_each_possible_cpu(i) { 2367 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 2368 2369 percpu_priv->np.down = 0; 2370 napi_enable(&percpu_priv->np.napi); 2371 } 2372 } 2373 2374 static void dpaa_eth_napi_disable(struct dpaa_priv *priv) 2375 { 2376 struct dpaa_percpu_priv *percpu_priv; 2377 int i; 2378 2379 for_each_possible_cpu(i) { 2380 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 2381 2382 percpu_priv->np.down = 1; 2383 napi_disable(&percpu_priv->np.napi); 2384 } 2385 } 2386 2387 static int dpaa_open(struct net_device *net_dev) 2388 { 2389 struct mac_device *mac_dev; 2390 struct dpaa_priv *priv; 2391 int err, i; 2392 2393 priv = netdev_priv(net_dev); 2394 mac_dev = priv->mac_dev; 2395 dpaa_eth_napi_enable(priv); 2396 2397 net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev); 2398 if (!net_dev->phydev) { 2399 netif_err(priv, ifup, net_dev, "init_phy() failed\n"); 2400 err = -ENODEV; 2401 goto phy_init_failed; 2402 } 2403 2404 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { 2405 err = fman_port_enable(mac_dev->port[i]); 2406 if (err) 2407 goto mac_start_failed; 2408 } 2409 2410 err = priv->mac_dev->start(mac_dev); 2411 if (err < 0) { 2412 netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err); 2413 goto mac_start_failed; 2414 } 2415 2416 netif_tx_start_all_queues(net_dev); 2417 2418 return 0; 2419 2420 mac_start_failed: 2421 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) 2422 fman_port_disable(mac_dev->port[i]); 2423 2424 phy_init_failed: 2425 dpaa_eth_napi_disable(priv); 2426 2427 return err; 2428 } 2429 2430 static int dpaa_eth_stop(struct net_device *net_dev) 2431 { 2432 struct dpaa_priv *priv; 2433 int err; 2434 2435 err = dpaa_stop(net_dev); 2436 2437 priv = netdev_priv(net_dev); 2438 dpaa_eth_napi_disable(priv); 2439 2440 return err; 2441 } 2442 2443 static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) 2444 { 2445 if (!net_dev->phydev) 2446 return -EINVAL; 2447 return phy_mii_ioctl(net_dev->phydev, rq, cmd); 2448 } 2449 2450 static const struct net_device_ops dpaa_ops = { 2451 .ndo_open = dpaa_open, 2452 .ndo_start_xmit = dpaa_start_xmit, 2453 .ndo_stop = dpaa_eth_stop, 2454 .ndo_tx_timeout = dpaa_tx_timeout, 2455 .ndo_get_stats64 = dpaa_get_stats64, 2456 .ndo_set_mac_address = dpaa_set_mac_address, 2457 .ndo_validate_addr = eth_validate_addr, 2458 .ndo_set_rx_mode = dpaa_set_rx_mode, 2459 .ndo_do_ioctl = dpaa_ioctl, 2460 .ndo_setup_tc = dpaa_setup_tc, 2461 }; 2462 2463 static int dpaa_napi_add(struct net_device *net_dev) 2464 { 2465 struct dpaa_priv *priv = netdev_priv(net_dev); 2466 struct dpaa_percpu_priv *percpu_priv; 2467 int cpu; 2468 2469 for_each_possible_cpu(cpu) { 2470 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); 2471 2472 netif_napi_add(net_dev, &percpu_priv->np.napi, 2473 dpaa_eth_poll, NAPI_POLL_WEIGHT); 2474 } 2475 2476 return 0; 2477 } 2478 2479 static void dpaa_napi_del(struct net_device *net_dev) 2480 { 2481 struct dpaa_priv *priv = netdev_priv(net_dev); 2482 struct dpaa_percpu_priv *percpu_priv; 2483 int cpu; 2484 2485 for_each_possible_cpu(cpu) { 2486 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); 2487 2488 netif_napi_del(&percpu_priv->np.napi); 2489 } 2490 } 2491 2492 static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp, 2493 struct bm_buffer *bmb) 2494 { 2495 dma_addr_t addr = bm_buf_addr(bmb); 2496 2497 dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE); 2498 2499 skb_free_frag(phys_to_virt(addr)); 2500 } 2501 2502 /* Alloc the dpaa_bp struct and configure default values */ 2503 static struct dpaa_bp *dpaa_bp_alloc(struct device *dev) 2504 { 2505 struct dpaa_bp *dpaa_bp; 2506 2507 dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL); 2508 if (!dpaa_bp) 2509 return ERR_PTR(-ENOMEM); 2510 2511 dpaa_bp->bpid = FSL_DPAA_BPID_INV; 2512 dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count); 2513 dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT; 2514 2515 dpaa_bp->seed_cb = dpaa_bp_seed; 2516 dpaa_bp->free_buf_cb = dpaa_bp_free_pf; 2517 2518 return dpaa_bp; 2519 } 2520 2521 /* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR. 2522 * We won't be sending congestion notifications to FMan; for now, we just use 2523 * this CGR to generate enqueue rejections to FMan in order to drop the frames 2524 * before they reach our ingress queues and eat up memory. 2525 */ 2526 static int dpaa_ingress_cgr_init(struct dpaa_priv *priv) 2527 { 2528 struct qm_mcc_initcgr initcgr; 2529 u32 cs_th; 2530 int err; 2531 2532 err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid); 2533 if (err < 0) { 2534 if (netif_msg_drv(priv)) 2535 pr_err("Error %d allocating CGR ID\n", err); 2536 goto out_error; 2537 } 2538 2539 /* Enable CS TD, but disable Congestion State Change Notifications. */ 2540 memset(&initcgr, 0, sizeof(initcgr)); 2541 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES); 2542 initcgr.cgr.cscn_en = QM_CGR_EN; 2543 cs_th = DPAA_INGRESS_CS_THRESHOLD; 2544 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); 2545 2546 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); 2547 initcgr.cgr.cstd_en = QM_CGR_EN; 2548 2549 /* This CGR will be associated with the SWP affined to the current CPU. 2550 * However, we'll place all our ingress FQs in it. 2551 */ 2552 err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT, 2553 &initcgr); 2554 if (err < 0) { 2555 if (netif_msg_drv(priv)) 2556 pr_err("Error %d creating ingress CGR with ID %d\n", 2557 err, priv->ingress_cgr.cgrid); 2558 qman_release_cgrid(priv->ingress_cgr.cgrid); 2559 goto out_error; 2560 } 2561 if (netif_msg_drv(priv)) 2562 pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n", 2563 priv->ingress_cgr.cgrid, priv->mac_dev->addr); 2564 2565 priv->use_ingress_cgr = true; 2566 2567 out_error: 2568 return err; 2569 } 2570 2571 static const struct of_device_id dpaa_match[]; 2572 2573 static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) 2574 { 2575 u16 headroom; 2576 2577 /* The frame headroom must accommodate: 2578 * - the driver private data area 2579 * - parse results, hash results, timestamp if selected 2580 * If either hash results or time stamp are selected, both will 2581 * be copied to/from the frame headroom, as TS is located between PR and 2582 * HR in the IC and IC copy size has a granularity of 16bytes 2583 * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM) 2584 * 2585 * Also make sure the headroom is a multiple of data_align bytes 2586 */ 2587 headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE + 2588 DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE); 2589 2590 return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom, 2591 DPAA_FD_DATA_ALIGNMENT) : 2592 headroom; 2593 } 2594 2595 static int dpaa_eth_probe(struct platform_device *pdev) 2596 { 2597 struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL}; 2598 struct dpaa_percpu_priv *percpu_priv; 2599 struct net_device *net_dev = NULL; 2600 struct dpaa_fq *dpaa_fq, *tmp; 2601 struct dpaa_priv *priv = NULL; 2602 struct fm_port_fqs port_fqs; 2603 struct mac_device *mac_dev; 2604 int err = 0, i, channel; 2605 struct device *dev; 2606 2607 dev = &pdev->dev; 2608 2609 /* Allocate this early, so we can store relevant information in 2610 * the private area 2611 */ 2612 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); 2613 if (!net_dev) { 2614 dev_err(dev, "alloc_etherdev_mq() failed\n"); 2615 goto alloc_etherdev_mq_failed; 2616 } 2617 2618 /* Do this here, so we can be verbose early */ 2619 SET_NETDEV_DEV(net_dev, dev); 2620 dev_set_drvdata(dev, net_dev); 2621 2622 priv = netdev_priv(net_dev); 2623 priv->net_dev = net_dev; 2624 2625 priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); 2626 2627 mac_dev = dpaa_mac_dev_get(pdev); 2628 if (IS_ERR(mac_dev)) { 2629 dev_err(dev, "dpaa_mac_dev_get() failed\n"); 2630 err = PTR_ERR(mac_dev); 2631 goto mac_probe_failed; 2632 } 2633 2634 /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, 2635 * we choose conservatively and let the user explicitly set a higher 2636 * MTU via ifconfig. Otherwise, the user may end up with different MTUs 2637 * in the same LAN. 2638 * If on the other hand fsl_fm_max_frm has been chosen below 1500, 2639 * start with the maximum allowed. 2640 */ 2641 net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN); 2642 2643 netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n", 2644 net_dev->mtu); 2645 2646 priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */ 2647 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ 2648 2649 /* device used for DMA mapping */ 2650 set_dma_ops(dev, get_dma_ops(&pdev->dev)); 2651 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); 2652 if (err) { 2653 dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); 2654 goto dev_mask_failed; 2655 } 2656 2657 /* bp init */ 2658 for (i = 0; i < DPAA_BPS_NUM; i++) { 2659 int err; 2660 2661 dpaa_bps[i] = dpaa_bp_alloc(dev); 2662 if (IS_ERR(dpaa_bps[i])) 2663 return PTR_ERR(dpaa_bps[i]); 2664 /* the raw size of the buffers used for reception */ 2665 dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM); 2666 /* avoid runtime computations by keeping the usable size here */ 2667 dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size); 2668 dpaa_bps[i]->dev = dev; 2669 2670 err = dpaa_bp_alloc_pool(dpaa_bps[i]); 2671 if (err < 0) { 2672 dpaa_bps_free(priv); 2673 priv->dpaa_bps[i] = NULL; 2674 goto bp_create_failed; 2675 } 2676 priv->dpaa_bps[i] = dpaa_bps[i]; 2677 } 2678 2679 INIT_LIST_HEAD(&priv->dpaa_fq_list); 2680 2681 memset(&port_fqs, 0, sizeof(port_fqs)); 2682 2683 err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs); 2684 if (err < 0) { 2685 dev_err(dev, "dpaa_alloc_all_fqs() failed\n"); 2686 goto fq_probe_failed; 2687 } 2688 2689 priv->mac_dev = mac_dev; 2690 2691 channel = dpaa_get_channel(); 2692 if (channel < 0) { 2693 dev_err(dev, "dpaa_get_channel() failed\n"); 2694 err = channel; 2695 goto get_channel_failed; 2696 } 2697 2698 priv->channel = (u16)channel; 2699 2700 /* Start a thread that will walk the CPUs with affine portals 2701 * and add this pool channel to each's dequeue mask. 2702 */ 2703 dpaa_eth_add_channel(priv->channel); 2704 2705 dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); 2706 2707 /* Create a congestion group for this netdev, with 2708 * dynamically-allocated CGR ID. 2709 * Must be executed after probing the MAC, but before 2710 * assigning the egress FQs to the CGRs. 2711 */ 2712 err = dpaa_eth_cgr_init(priv); 2713 if (err < 0) { 2714 dev_err(dev, "Error initializing CGR\n"); 2715 goto tx_cgr_init_failed; 2716 } 2717 2718 err = dpaa_ingress_cgr_init(priv); 2719 if (err < 0) { 2720 dev_err(dev, "Error initializing ingress CGR\n"); 2721 goto rx_cgr_init_failed; 2722 } 2723 2724 /* Add the FQs to the interface, and make them active */ 2725 list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) { 2726 err = dpaa_fq_init(dpaa_fq, false); 2727 if (err < 0) 2728 goto fq_alloc_failed; 2729 } 2730 2731 priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]); 2732 priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]); 2733 2734 /* All real interfaces need their ports initialized */ 2735 err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs, 2736 &priv->buf_layout[0], dev); 2737 if (err) 2738 goto init_ports_failed; 2739 2740 priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); 2741 if (!priv->percpu_priv) { 2742 dev_err(dev, "devm_alloc_percpu() failed\n"); 2743 err = -ENOMEM; 2744 goto alloc_percpu_failed; 2745 } 2746 for_each_possible_cpu(i) { 2747 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 2748 memset(percpu_priv, 0, sizeof(*percpu_priv)); 2749 } 2750 2751 priv->num_tc = 1; 2752 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); 2753 2754 /* Initialize NAPI */ 2755 err = dpaa_napi_add(net_dev); 2756 if (err < 0) 2757 goto napi_add_failed; 2758 2759 err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout); 2760 if (err < 0) 2761 goto netdev_init_failed; 2762 2763 dpaa_eth_sysfs_init(&net_dev->dev); 2764 2765 netif_info(priv, probe, net_dev, "Probed interface %s\n", 2766 net_dev->name); 2767 2768 return 0; 2769 2770 netdev_init_failed: 2771 napi_add_failed: 2772 dpaa_napi_del(net_dev); 2773 alloc_percpu_failed: 2774 init_ports_failed: 2775 dpaa_fq_free(dev, &priv->dpaa_fq_list); 2776 fq_alloc_failed: 2777 qman_delete_cgr_safe(&priv->ingress_cgr); 2778 qman_release_cgrid(priv->ingress_cgr.cgrid); 2779 rx_cgr_init_failed: 2780 qman_delete_cgr_safe(&priv->cgr_data.cgr); 2781 qman_release_cgrid(priv->cgr_data.cgr.cgrid); 2782 tx_cgr_init_failed: 2783 get_channel_failed: 2784 dpaa_bps_free(priv); 2785 bp_create_failed: 2786 fq_probe_failed: 2787 dev_mask_failed: 2788 mac_probe_failed: 2789 dev_set_drvdata(dev, NULL); 2790 free_netdev(net_dev); 2791 alloc_etherdev_mq_failed: 2792 for (i = 0; i < DPAA_BPS_NUM && dpaa_bps[i]; i++) { 2793 if (atomic_read(&dpaa_bps[i]->refs) == 0) 2794 devm_kfree(dev, dpaa_bps[i]); 2795 } 2796 return err; 2797 } 2798 2799 static int dpaa_remove(struct platform_device *pdev) 2800 { 2801 struct net_device *net_dev; 2802 struct dpaa_priv *priv; 2803 struct device *dev; 2804 int err; 2805 2806 dev = &pdev->dev; 2807 net_dev = dev_get_drvdata(dev); 2808 2809 priv = netdev_priv(net_dev); 2810 2811 dpaa_eth_sysfs_remove(dev); 2812 2813 dev_set_drvdata(dev, NULL); 2814 unregister_netdev(net_dev); 2815 2816 err = dpaa_fq_free(dev, &priv->dpaa_fq_list); 2817 2818 qman_delete_cgr_safe(&priv->ingress_cgr); 2819 qman_release_cgrid(priv->ingress_cgr.cgrid); 2820 qman_delete_cgr_safe(&priv->cgr_data.cgr); 2821 qman_release_cgrid(priv->cgr_data.cgr.cgrid); 2822 2823 dpaa_napi_del(net_dev); 2824 2825 dpaa_bps_free(priv); 2826 2827 free_netdev(net_dev); 2828 2829 return err; 2830 } 2831 2832 static struct platform_device_id dpaa_devtype[] = { 2833 { 2834 .name = "dpaa-ethernet", 2835 .driver_data = 0, 2836 }, { 2837 } 2838 }; 2839 MODULE_DEVICE_TABLE(platform, dpaa_devtype); 2840 2841 static struct platform_driver dpaa_driver = { 2842 .driver = { 2843 .name = KBUILD_MODNAME, 2844 }, 2845 .id_table = dpaa_devtype, 2846 .probe = dpaa_eth_probe, 2847 .remove = dpaa_remove 2848 }; 2849 2850 static int __init dpaa_load(void) 2851 { 2852 int err; 2853 2854 pr_debug("FSL DPAA Ethernet driver\n"); 2855 2856 /* initialize dpaa_eth mirror values */ 2857 dpaa_rx_extra_headroom = fman_get_rx_extra_headroom(); 2858 dpaa_max_frm = fman_get_max_frm(); 2859 2860 err = platform_driver_register(&dpaa_driver); 2861 if (err < 0) 2862 pr_err("Error, platform_driver_register() = %d\n", err); 2863 2864 return err; 2865 } 2866 module_init(dpaa_load); 2867 2868 static void __exit dpaa_unload(void) 2869 { 2870 platform_driver_unregister(&dpaa_driver); 2871 2872 /* Only one channel is used and needs to be released after all 2873 * interfaces are removed 2874 */ 2875 dpaa_release_channel(); 2876 } 2877 module_exit(dpaa_unload); 2878 2879 MODULE_LICENSE("Dual BSD/GPL"); 2880 MODULE_DESCRIPTION("FSL DPAA Ethernet driver"); 2881