1 /* Copyright 2008-2016 Freescale Semiconductor, Inc. 2 * 3 * Redistribution and use in source and binary forms, with or without 4 * modification, are permitted provided that the following conditions are met: 5 * * Redistributions of source code must retain the above copyright 6 * notice, this list of conditions and the following disclaimer. 7 * * Redistributions in binary form must reproduce the above copyright 8 * notice, this list of conditions and the following disclaimer in the 9 * documentation and/or other materials provided with the distribution. 10 * * Neither the name of Freescale Semiconductor nor the 11 * names of its contributors may be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * 15 * ALTERNATIVELY, this software may be distributed under the terms of the 16 * GNU General Public License ("GPL") as published by the Free Software 17 * Foundation, either version 2 of that License or (at your option) any 18 * later version. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 34 #include <linux/string.h> 35 #include <linux/of_platform.h> 36 #include <linux/net_tstamp.h> 37 #include <linux/fsl/ptp_qoriq.h> 38 39 #include "dpaa_eth.h" 40 #include "mac.h" 41 42 static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = { 43 "interrupts", 44 "rx packets", 45 "tx packets", 46 "tx confirm", 47 "tx S/G", 48 "tx error", 49 "rx error", 50 "rx dropped", 51 "tx dropped", 52 }; 53 54 static char dpaa_stats_global[][ETH_GSTRING_LEN] = { 55 /* dpa rx errors */ 56 "rx dma error", 57 "rx frame physical error", 58 "rx frame size error", 59 "rx header error", 60 61 /* demultiplexing errors */ 62 "qman cg_tdrop", 63 "qman wred", 64 "qman error cond", 65 "qman early window", 66 "qman late window", 67 "qman fq tdrop", 68 "qman fq retired", 69 "qman orp disabled", 70 71 /* congestion related stats */ 72 "congestion time (ms)", 73 "entered congestion", 74 "congested (0/1)" 75 }; 76 77 #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu) 78 #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global) 79 80 static int dpaa_get_link_ksettings(struct net_device *net_dev, 81 struct ethtool_link_ksettings *cmd) 82 { 83 if (!net_dev->phydev) 84 return 0; 85 86 phy_ethtool_ksettings_get(net_dev->phydev, cmd); 87 88 return 0; 89 } 90 91 static int dpaa_set_link_ksettings(struct net_device *net_dev, 92 const struct ethtool_link_ksettings *cmd) 93 { 94 int err; 95 96 if (!net_dev->phydev) 97 return -ENODEV; 98 99 err = phy_ethtool_ksettings_set(net_dev->phydev, cmd); 100 if (err < 0) 101 netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err); 102 103 return err; 104 } 105 106 static void dpaa_get_drvinfo(struct net_device *net_dev, 107 struct ethtool_drvinfo *drvinfo) 108 { 109 int len; 110 111 strlcpy(drvinfo->driver, KBUILD_MODNAME, 112 sizeof(drvinfo->driver)); 113 len = snprintf(drvinfo->version, sizeof(drvinfo->version), 114 "%X", 0); 115 len = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 116 "%X", 0); 117 118 if (len >= sizeof(drvinfo->fw_version)) { 119 /* Truncated output */ 120 netdev_notice(net_dev, "snprintf() = %d\n", len); 121 } 122 strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), 123 sizeof(drvinfo->bus_info)); 124 } 125 126 static u32 dpaa_get_msglevel(struct net_device *net_dev) 127 { 128 return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable; 129 } 130 131 static void dpaa_set_msglevel(struct net_device *net_dev, 132 u32 msg_enable) 133 { 134 ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable; 135 } 136 137 static int dpaa_nway_reset(struct net_device *net_dev) 138 { 139 int err; 140 141 if (!net_dev->phydev) 142 return -ENODEV; 143 144 err = 0; 145 if (net_dev->phydev->autoneg) { 146 err = phy_start_aneg(net_dev->phydev); 147 if (err < 0) 148 netdev_err(net_dev, "phy_start_aneg() = %d\n", 149 err); 150 } 151 152 return err; 153 } 154 155 static void dpaa_get_pauseparam(struct net_device *net_dev, 156 struct ethtool_pauseparam *epause) 157 { 158 struct mac_device *mac_dev; 159 struct dpaa_priv *priv; 160 161 priv = netdev_priv(net_dev); 162 mac_dev = priv->mac_dev; 163 164 if (!net_dev->phydev) 165 return; 166 167 epause->autoneg = mac_dev->autoneg_pause; 168 epause->rx_pause = mac_dev->rx_pause_active; 169 epause->tx_pause = mac_dev->tx_pause_active; 170 } 171 172 static int dpaa_set_pauseparam(struct net_device *net_dev, 173 struct ethtool_pauseparam *epause) 174 { 175 struct mac_device *mac_dev; 176 struct phy_device *phydev; 177 bool rx_pause, tx_pause; 178 struct dpaa_priv *priv; 179 int err; 180 181 priv = netdev_priv(net_dev); 182 mac_dev = priv->mac_dev; 183 184 phydev = net_dev->phydev; 185 if (!phydev) { 186 netdev_err(net_dev, "phy device not initialized\n"); 187 return -ENODEV; 188 } 189 190 if (!phy_validate_pause(phydev, epause)) 191 return -EINVAL; 192 193 /* The MAC should know how to handle PAUSE frame autonegotiation before 194 * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE 195 * settings. 196 */ 197 mac_dev->autoneg_pause = !!epause->autoneg; 198 mac_dev->rx_pause_req = !!epause->rx_pause; 199 mac_dev->tx_pause_req = !!epause->tx_pause; 200 201 /* Determine the sym/asym advertised PAUSE capabilities from the desired 202 * rx/tx pause settings. 203 */ 204 205 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); 206 207 fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); 208 err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); 209 if (err < 0) 210 netdev_err(net_dev, "set_mac_active_pause() = %d\n", err); 211 212 return err; 213 } 214 215 static int dpaa_get_sset_count(struct net_device *net_dev, int type) 216 { 217 unsigned int total_stats, num_stats; 218 219 num_stats = num_online_cpus() + 1; 220 total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) + 221 DPAA_STATS_GLOBAL_LEN; 222 223 switch (type) { 224 case ETH_SS_STATS: 225 return total_stats; 226 default: 227 return -EOPNOTSUPP; 228 } 229 } 230 231 static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus, 232 int crr_cpu, u64 bp_count, u64 *data) 233 { 234 int num_values = num_cpus + 1; 235 int crr = 0; 236 237 /* update current CPU's stats and also add them to the total values */ 238 data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt; 239 data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt; 240 241 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets; 242 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets; 243 244 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets; 245 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets; 246 247 data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm; 248 data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm; 249 250 data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs; 251 data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs; 252 253 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors; 254 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors; 255 256 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors; 257 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors; 258 259 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped; 260 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped; 261 262 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped; 263 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped; 264 265 data[crr * num_values + crr_cpu] = bp_count; 266 data[crr++ * num_values + num_cpus] += bp_count; 267 } 268 269 static void dpaa_get_ethtool_stats(struct net_device *net_dev, 270 struct ethtool_stats *stats, u64 *data) 271 { 272 struct dpaa_percpu_priv *percpu_priv; 273 struct dpaa_rx_errors rx_errors; 274 unsigned int num_cpus, offset; 275 u64 bp_count, cg_time, cg_num; 276 struct dpaa_ern_cnt ern_cnt; 277 struct dpaa_bp *dpaa_bp; 278 struct dpaa_priv *priv; 279 int total_stats, i; 280 bool cg_status; 281 282 total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS); 283 priv = netdev_priv(net_dev); 284 num_cpus = num_online_cpus(); 285 286 memset(&bp_count, 0, sizeof(bp_count)); 287 memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors)); 288 memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt)); 289 memset(data, 0, total_stats * sizeof(u64)); 290 291 for_each_online_cpu(i) { 292 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 293 dpaa_bp = priv->dpaa_bp; 294 if (!dpaa_bp->percpu_count) 295 continue; 296 bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i)); 297 rx_errors.dme += percpu_priv->rx_errors.dme; 298 rx_errors.fpe += percpu_priv->rx_errors.fpe; 299 rx_errors.fse += percpu_priv->rx_errors.fse; 300 rx_errors.phe += percpu_priv->rx_errors.phe; 301 302 ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop; 303 ern_cnt.wred += percpu_priv->ern_cnt.wred; 304 ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond; 305 ern_cnt.early_window += percpu_priv->ern_cnt.early_window; 306 ern_cnt.late_window += percpu_priv->ern_cnt.late_window; 307 ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop; 308 ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired; 309 ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero; 310 311 copy_stats(percpu_priv, num_cpus, i, bp_count, data); 312 } 313 314 offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1); 315 memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors)); 316 317 offset += sizeof(struct dpaa_rx_errors) / sizeof(u64); 318 memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt)); 319 320 /* gather congestion related counters */ 321 cg_num = 0; 322 cg_status = false; 323 cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies); 324 if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) { 325 cg_num = priv->cgr_data.cgr_congested_count; 326 327 /* reset congestion stats (like QMan API does */ 328 priv->cgr_data.congested_jiffies = 0; 329 priv->cgr_data.cgr_congested_count = 0; 330 } 331 332 offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64); 333 data[offset++] = cg_time; 334 data[offset++] = cg_num; 335 data[offset++] = cg_status; 336 } 337 338 static void dpaa_get_strings(struct net_device *net_dev, u32 stringset, 339 u8 *data) 340 { 341 unsigned int i, j, num_cpus, size; 342 char string_cpu[ETH_GSTRING_LEN]; 343 u8 *strings; 344 345 memset(string_cpu, 0, sizeof(string_cpu)); 346 strings = data; 347 num_cpus = num_online_cpus(); 348 size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN; 349 350 for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) { 351 for (j = 0; j < num_cpus; j++) { 352 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", 353 dpaa_stats_percpu[i], j); 354 memcpy(strings, string_cpu, ETH_GSTRING_LEN); 355 strings += ETH_GSTRING_LEN; 356 } 357 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", 358 dpaa_stats_percpu[i]); 359 memcpy(strings, string_cpu, ETH_GSTRING_LEN); 360 strings += ETH_GSTRING_LEN; 361 } 362 for (j = 0; j < num_cpus; j++) { 363 snprintf(string_cpu, ETH_GSTRING_LEN, 364 "bpool [CPU %d]", j); 365 memcpy(strings, string_cpu, ETH_GSTRING_LEN); 366 strings += ETH_GSTRING_LEN; 367 } 368 snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]"); 369 memcpy(strings, string_cpu, ETH_GSTRING_LEN); 370 strings += ETH_GSTRING_LEN; 371 372 memcpy(strings, dpaa_stats_global, size); 373 } 374 375 static int dpaa_get_hash_opts(struct net_device *dev, 376 struct ethtool_rxnfc *cmd) 377 { 378 struct dpaa_priv *priv = netdev_priv(dev); 379 380 cmd->data = 0; 381 382 switch (cmd->flow_type) { 383 case TCP_V4_FLOW: 384 case TCP_V6_FLOW: 385 case UDP_V4_FLOW: 386 case UDP_V6_FLOW: 387 if (priv->keygen_in_use) 388 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 389 /* Fall through */ 390 case IPV4_FLOW: 391 case IPV6_FLOW: 392 case SCTP_V4_FLOW: 393 case SCTP_V6_FLOW: 394 case AH_ESP_V4_FLOW: 395 case AH_ESP_V6_FLOW: 396 case AH_V4_FLOW: 397 case AH_V6_FLOW: 398 case ESP_V4_FLOW: 399 case ESP_V6_FLOW: 400 if (priv->keygen_in_use) 401 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 402 break; 403 default: 404 cmd->data = 0; 405 break; 406 } 407 408 return 0; 409 } 410 411 static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 412 u32 *unused) 413 { 414 int ret = -EOPNOTSUPP; 415 416 switch (cmd->cmd) { 417 case ETHTOOL_GRXFH: 418 ret = dpaa_get_hash_opts(dev, cmd); 419 break; 420 default: 421 break; 422 } 423 424 return ret; 425 } 426 427 static void dpaa_set_hash(struct net_device *net_dev, bool enable) 428 { 429 struct mac_device *mac_dev; 430 struct fman_port *rxport; 431 struct dpaa_priv *priv; 432 433 priv = netdev_priv(net_dev); 434 mac_dev = priv->mac_dev; 435 rxport = mac_dev->port[0]; 436 437 fman_port_use_kg_hash(rxport, enable); 438 priv->keygen_in_use = enable; 439 } 440 441 static int dpaa_set_hash_opts(struct net_device *dev, 442 struct ethtool_rxnfc *nfc) 443 { 444 int ret = -EINVAL; 445 446 /* we support hashing on IPv4/v6 src/dest IP and L4 src/dest port */ 447 if (nfc->data & 448 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 449 return -EINVAL; 450 451 switch (nfc->flow_type) { 452 case TCP_V4_FLOW: 453 case TCP_V6_FLOW: 454 case UDP_V4_FLOW: 455 case UDP_V6_FLOW: 456 case IPV4_FLOW: 457 case IPV6_FLOW: 458 case SCTP_V4_FLOW: 459 case SCTP_V6_FLOW: 460 case AH_ESP_V4_FLOW: 461 case AH_ESP_V6_FLOW: 462 case AH_V4_FLOW: 463 case AH_V6_FLOW: 464 case ESP_V4_FLOW: 465 case ESP_V6_FLOW: 466 dpaa_set_hash(dev, !!nfc->data); 467 ret = 0; 468 break; 469 default: 470 break; 471 } 472 473 return ret; 474 } 475 476 static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 477 { 478 int ret = -EOPNOTSUPP; 479 480 switch (cmd->cmd) { 481 case ETHTOOL_SRXFH: 482 ret = dpaa_set_hash_opts(dev, cmd); 483 break; 484 default: 485 break; 486 } 487 488 return ret; 489 } 490 491 static int dpaa_get_ts_info(struct net_device *net_dev, 492 struct ethtool_ts_info *info) 493 { 494 struct device *dev = net_dev->dev.parent; 495 struct device_node *mac_node = dev->of_node; 496 struct device_node *fman_node = NULL, *ptp_node = NULL; 497 struct platform_device *ptp_dev = NULL; 498 struct ptp_qoriq *ptp = NULL; 499 500 info->phc_index = -1; 501 502 fman_node = of_get_parent(mac_node); 503 if (fman_node) 504 ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0); 505 506 if (ptp_node) 507 ptp_dev = of_find_device_by_node(ptp_node); 508 509 if (ptp_dev) 510 ptp = platform_get_drvdata(ptp_dev); 511 512 if (ptp) 513 info->phc_index = ptp->phc_index; 514 515 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | 516 SOF_TIMESTAMPING_RX_HARDWARE | 517 SOF_TIMESTAMPING_RAW_HARDWARE; 518 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 519 (1 << HWTSTAMP_TX_ON); 520 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 521 (1 << HWTSTAMP_FILTER_ALL); 522 523 return 0; 524 } 525 526 static int dpaa_get_coalesce(struct net_device *dev, 527 struct ethtool_coalesce *c) 528 { 529 struct qman_portal *portal; 530 u32 period; 531 u8 thresh; 532 533 portal = qman_get_affine_portal(smp_processor_id()); 534 qman_portal_get_iperiod(portal, &period); 535 qman_dqrr_get_ithresh(portal, &thresh); 536 537 c->rx_coalesce_usecs = period; 538 c->rx_max_coalesced_frames = thresh; 539 c->use_adaptive_rx_coalesce = false; 540 541 return 0; 542 } 543 544 static int dpaa_set_coalesce(struct net_device *dev, 545 struct ethtool_coalesce *c) 546 { 547 const cpumask_t *cpus = qman_affine_cpus(); 548 bool needs_revert[NR_CPUS] = {false}; 549 struct qman_portal *portal; 550 u32 period, prev_period; 551 u8 thresh, prev_thresh; 552 int cpu, res; 553 554 if (c->use_adaptive_rx_coalesce) 555 return -EINVAL; 556 557 period = c->rx_coalesce_usecs; 558 thresh = c->rx_max_coalesced_frames; 559 560 /* save previous values */ 561 portal = qman_get_affine_portal(smp_processor_id()); 562 qman_portal_get_iperiod(portal, &prev_period); 563 qman_dqrr_get_ithresh(portal, &prev_thresh); 564 565 /* set new values */ 566 for_each_cpu_and(cpu, cpus, cpu_online_mask) { 567 portal = qman_get_affine_portal(cpu); 568 res = qman_portal_set_iperiod(portal, period); 569 if (res) 570 goto revert_values; 571 res = qman_dqrr_set_ithresh(portal, thresh); 572 if (res) { 573 qman_portal_set_iperiod(portal, prev_period); 574 goto revert_values; 575 } 576 needs_revert[cpu] = true; 577 } 578 579 return 0; 580 581 revert_values: 582 /* restore previous values */ 583 for_each_cpu_and(cpu, cpus, cpu_online_mask) { 584 if (!needs_revert[cpu]) 585 continue; 586 portal = qman_get_affine_portal(cpu); 587 /* previous values will not fail, ignore return value */ 588 qman_portal_set_iperiod(portal, prev_period); 589 qman_dqrr_set_ithresh(portal, prev_thresh); 590 } 591 592 return res; 593 } 594 595 const struct ethtool_ops dpaa_ethtool_ops = { 596 .get_drvinfo = dpaa_get_drvinfo, 597 .get_msglevel = dpaa_get_msglevel, 598 .set_msglevel = dpaa_set_msglevel, 599 .nway_reset = dpaa_nway_reset, 600 .get_pauseparam = dpaa_get_pauseparam, 601 .set_pauseparam = dpaa_set_pauseparam, 602 .get_link = ethtool_op_get_link, 603 .get_sset_count = dpaa_get_sset_count, 604 .get_ethtool_stats = dpaa_get_ethtool_stats, 605 .get_strings = dpaa_get_strings, 606 .get_link_ksettings = dpaa_get_link_ksettings, 607 .set_link_ksettings = dpaa_set_link_ksettings, 608 .get_rxnfc = dpaa_get_rxnfc, 609 .set_rxnfc = dpaa_set_rxnfc, 610 .get_ts_info = dpaa_get_ts_info, 611 .get_coalesce = dpaa_get_coalesce, 612 .set_coalesce = dpaa_set_coalesce, 613 }; 614