1 /* Copyright 2008-2016 Freescale Semiconductor, Inc. 2 * 3 * Redistribution and use in source and binary forms, with or without 4 * modification, are permitted provided that the following conditions are met: 5 * * Redistributions of source code must retain the above copyright 6 * notice, this list of conditions and the following disclaimer. 7 * * Redistributions in binary form must reproduce the above copyright 8 * notice, this list of conditions and the following disclaimer in the 9 * documentation and/or other materials provided with the distribution. 10 * * Neither the name of Freescale Semiconductor nor the 11 * names of its contributors may be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * 15 * ALTERNATIVELY, this software may be distributed under the terms of the 16 * GNU General Public License ("GPL") as published by the Free Software 17 * Foundation, either version 2 of that License or (at your option) any 18 * later version. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 34 #include <linux/string.h> 35 #include <linux/of_platform.h> 36 #include <linux/net_tstamp.h> 37 #include <linux/fsl/ptp_qoriq.h> 38 39 #include "dpaa_eth.h" 40 #include "mac.h" 41 42 static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = { 43 "interrupts", 44 "rx packets", 45 "tx packets", 46 "tx confirm", 47 "tx S/G", 48 "tx error", 49 "rx error", 50 }; 51 52 static char dpaa_stats_global[][ETH_GSTRING_LEN] = { 53 /* dpa rx errors */ 54 "rx dma error", 55 "rx frame physical error", 56 "rx frame size error", 57 "rx header error", 58 59 /* demultiplexing errors */ 60 "qman cg_tdrop", 61 "qman wred", 62 "qman error cond", 63 "qman early window", 64 "qman late window", 65 "qman fq tdrop", 66 "qman fq retired", 67 "qman orp disabled", 68 69 /* congestion related stats */ 70 "congestion time (ms)", 71 "entered congestion", 72 "congested (0/1)" 73 }; 74 75 #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu) 76 #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global) 77 78 static int dpaa_get_link_ksettings(struct net_device *net_dev, 79 struct ethtool_link_ksettings *cmd) 80 { 81 if (!net_dev->phydev) { 82 netdev_dbg(net_dev, "phy device not initialized\n"); 83 return 0; 84 } 85 86 phy_ethtool_ksettings_get(net_dev->phydev, cmd); 87 88 return 0; 89 } 90 91 static int dpaa_set_link_ksettings(struct net_device *net_dev, 92 const struct ethtool_link_ksettings *cmd) 93 { 94 int err; 95 96 if (!net_dev->phydev) { 97 netdev_err(net_dev, "phy device not initialized\n"); 98 return -ENODEV; 99 } 100 101 err = phy_ethtool_ksettings_set(net_dev->phydev, cmd); 102 if (err < 0) 103 netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err); 104 105 return err; 106 } 107 108 static void dpaa_get_drvinfo(struct net_device *net_dev, 109 struct ethtool_drvinfo *drvinfo) 110 { 111 int len; 112 113 strlcpy(drvinfo->driver, KBUILD_MODNAME, 114 sizeof(drvinfo->driver)); 115 len = snprintf(drvinfo->version, sizeof(drvinfo->version), 116 "%X", 0); 117 len = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 118 "%X", 0); 119 120 if (len >= sizeof(drvinfo->fw_version)) { 121 /* Truncated output */ 122 netdev_notice(net_dev, "snprintf() = %d\n", len); 123 } 124 strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), 125 sizeof(drvinfo->bus_info)); 126 } 127 128 static u32 dpaa_get_msglevel(struct net_device *net_dev) 129 { 130 return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable; 131 } 132 133 static void dpaa_set_msglevel(struct net_device *net_dev, 134 u32 msg_enable) 135 { 136 ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable; 137 } 138 139 static int dpaa_nway_reset(struct net_device *net_dev) 140 { 141 int err; 142 143 if (!net_dev->phydev) { 144 netdev_err(net_dev, "phy device not initialized\n"); 145 return -ENODEV; 146 } 147 148 err = 0; 149 if (net_dev->phydev->autoneg) { 150 err = phy_start_aneg(net_dev->phydev); 151 if (err < 0) 152 netdev_err(net_dev, "phy_start_aneg() = %d\n", 153 err); 154 } 155 156 return err; 157 } 158 159 static void dpaa_get_pauseparam(struct net_device *net_dev, 160 struct ethtool_pauseparam *epause) 161 { 162 struct mac_device *mac_dev; 163 struct dpaa_priv *priv; 164 165 priv = netdev_priv(net_dev); 166 mac_dev = priv->mac_dev; 167 168 if (!net_dev->phydev) { 169 netdev_err(net_dev, "phy device not initialized\n"); 170 return; 171 } 172 173 epause->autoneg = mac_dev->autoneg_pause; 174 epause->rx_pause = mac_dev->rx_pause_active; 175 epause->tx_pause = mac_dev->tx_pause_active; 176 } 177 178 static int dpaa_set_pauseparam(struct net_device *net_dev, 179 struct ethtool_pauseparam *epause) 180 { 181 struct mac_device *mac_dev; 182 struct phy_device *phydev; 183 bool rx_pause, tx_pause; 184 struct dpaa_priv *priv; 185 int err; 186 187 priv = netdev_priv(net_dev); 188 mac_dev = priv->mac_dev; 189 190 phydev = net_dev->phydev; 191 if (!phydev) { 192 netdev_err(net_dev, "phy device not initialized\n"); 193 return -ENODEV; 194 } 195 196 if (!phy_validate_pause(phydev, epause)) 197 return -EINVAL; 198 199 /* The MAC should know how to handle PAUSE frame autonegotiation before 200 * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE 201 * settings. 202 */ 203 mac_dev->autoneg_pause = !!epause->autoneg; 204 mac_dev->rx_pause_req = !!epause->rx_pause; 205 mac_dev->tx_pause_req = !!epause->tx_pause; 206 207 /* Determine the sym/asym advertised PAUSE capabilities from the desired 208 * rx/tx pause settings. 209 */ 210 211 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); 212 213 fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); 214 err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); 215 if (err < 0) 216 netdev_err(net_dev, "set_mac_active_pause() = %d\n", err); 217 218 return err; 219 } 220 221 static int dpaa_get_sset_count(struct net_device *net_dev, int type) 222 { 223 unsigned int total_stats, num_stats; 224 225 num_stats = num_online_cpus() + 1; 226 total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM) + 227 DPAA_STATS_GLOBAL_LEN; 228 229 switch (type) { 230 case ETH_SS_STATS: 231 return total_stats; 232 default: 233 return -EOPNOTSUPP; 234 } 235 } 236 237 static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus, 238 int crr_cpu, u64 *bp_count, u64 *data) 239 { 240 int num_values = num_cpus + 1; 241 int crr = 0, j; 242 243 /* update current CPU's stats and also add them to the total values */ 244 data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt; 245 data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt; 246 247 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets; 248 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets; 249 250 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets; 251 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets; 252 253 data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm; 254 data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm; 255 256 data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs; 257 data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs; 258 259 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors; 260 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors; 261 262 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors; 263 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors; 264 265 for (j = 0; j < DPAA_BPS_NUM; j++) { 266 data[crr * num_values + crr_cpu] = bp_count[j]; 267 data[crr++ * num_values + num_cpus] += bp_count[j]; 268 } 269 } 270 271 static void dpaa_get_ethtool_stats(struct net_device *net_dev, 272 struct ethtool_stats *stats, u64 *data) 273 { 274 u64 bp_count[DPAA_BPS_NUM], cg_time, cg_num; 275 struct dpaa_percpu_priv *percpu_priv; 276 struct dpaa_rx_errors rx_errors; 277 unsigned int num_cpus, offset; 278 struct dpaa_ern_cnt ern_cnt; 279 struct dpaa_bp *dpaa_bp; 280 struct dpaa_priv *priv; 281 int total_stats, i, j; 282 bool cg_status; 283 284 total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS); 285 priv = netdev_priv(net_dev); 286 num_cpus = num_online_cpus(); 287 288 memset(&bp_count, 0, sizeof(bp_count)); 289 memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors)); 290 memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt)); 291 memset(data, 0, total_stats * sizeof(u64)); 292 293 for_each_online_cpu(i) { 294 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 295 for (j = 0; j < DPAA_BPS_NUM; j++) { 296 dpaa_bp = priv->dpaa_bps[j]; 297 if (!dpaa_bp->percpu_count) 298 continue; 299 bp_count[j] = *(per_cpu_ptr(dpaa_bp->percpu_count, i)); 300 } 301 rx_errors.dme += percpu_priv->rx_errors.dme; 302 rx_errors.fpe += percpu_priv->rx_errors.fpe; 303 rx_errors.fse += percpu_priv->rx_errors.fse; 304 rx_errors.phe += percpu_priv->rx_errors.phe; 305 306 ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop; 307 ern_cnt.wred += percpu_priv->ern_cnt.wred; 308 ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond; 309 ern_cnt.early_window += percpu_priv->ern_cnt.early_window; 310 ern_cnt.late_window += percpu_priv->ern_cnt.late_window; 311 ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop; 312 ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired; 313 ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero; 314 315 copy_stats(percpu_priv, num_cpus, i, bp_count, data); 316 } 317 318 offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM); 319 memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors)); 320 321 offset += sizeof(struct dpaa_rx_errors) / sizeof(u64); 322 memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt)); 323 324 /* gather congestion related counters */ 325 cg_num = 0; 326 cg_status = false; 327 cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies); 328 if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) { 329 cg_num = priv->cgr_data.cgr_congested_count; 330 331 /* reset congestion stats (like QMan API does */ 332 priv->cgr_data.congested_jiffies = 0; 333 priv->cgr_data.cgr_congested_count = 0; 334 } 335 336 offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64); 337 data[offset++] = cg_time; 338 data[offset++] = cg_num; 339 data[offset++] = cg_status; 340 } 341 342 static void dpaa_get_strings(struct net_device *net_dev, u32 stringset, 343 u8 *data) 344 { 345 unsigned int i, j, num_cpus, size; 346 char string_cpu[ETH_GSTRING_LEN]; 347 u8 *strings; 348 349 memset(string_cpu, 0, sizeof(string_cpu)); 350 strings = data; 351 num_cpus = num_online_cpus(); 352 size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN; 353 354 for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) { 355 for (j = 0; j < num_cpus; j++) { 356 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", 357 dpaa_stats_percpu[i], j); 358 memcpy(strings, string_cpu, ETH_GSTRING_LEN); 359 strings += ETH_GSTRING_LEN; 360 } 361 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", 362 dpaa_stats_percpu[i]); 363 memcpy(strings, string_cpu, ETH_GSTRING_LEN); 364 strings += ETH_GSTRING_LEN; 365 } 366 for (i = 0; i < DPAA_BPS_NUM; i++) { 367 for (j = 0; j < num_cpus; j++) { 368 snprintf(string_cpu, ETH_GSTRING_LEN, 369 "bpool %c [CPU %d]", 'a' + i, j); 370 memcpy(strings, string_cpu, ETH_GSTRING_LEN); 371 strings += ETH_GSTRING_LEN; 372 } 373 snprintf(string_cpu, ETH_GSTRING_LEN, "bpool %c [TOTAL]", 374 'a' + i); 375 memcpy(strings, string_cpu, ETH_GSTRING_LEN); 376 strings += ETH_GSTRING_LEN; 377 } 378 memcpy(strings, dpaa_stats_global, size); 379 } 380 381 static int dpaa_get_hash_opts(struct net_device *dev, 382 struct ethtool_rxnfc *cmd) 383 { 384 struct dpaa_priv *priv = netdev_priv(dev); 385 386 cmd->data = 0; 387 388 switch (cmd->flow_type) { 389 case TCP_V4_FLOW: 390 case TCP_V6_FLOW: 391 case UDP_V4_FLOW: 392 case UDP_V6_FLOW: 393 if (priv->keygen_in_use) 394 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 395 /* Fall through */ 396 case IPV4_FLOW: 397 case IPV6_FLOW: 398 case SCTP_V4_FLOW: 399 case SCTP_V6_FLOW: 400 case AH_ESP_V4_FLOW: 401 case AH_ESP_V6_FLOW: 402 case AH_V4_FLOW: 403 case AH_V6_FLOW: 404 case ESP_V4_FLOW: 405 case ESP_V6_FLOW: 406 if (priv->keygen_in_use) 407 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 408 break; 409 default: 410 cmd->data = 0; 411 break; 412 } 413 414 return 0; 415 } 416 417 static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 418 u32 *unused) 419 { 420 int ret = -EOPNOTSUPP; 421 422 switch (cmd->cmd) { 423 case ETHTOOL_GRXFH: 424 ret = dpaa_get_hash_opts(dev, cmd); 425 break; 426 default: 427 break; 428 } 429 430 return ret; 431 } 432 433 static void dpaa_set_hash(struct net_device *net_dev, bool enable) 434 { 435 struct mac_device *mac_dev; 436 struct fman_port *rxport; 437 struct dpaa_priv *priv; 438 439 priv = netdev_priv(net_dev); 440 mac_dev = priv->mac_dev; 441 rxport = mac_dev->port[0]; 442 443 fman_port_use_kg_hash(rxport, enable); 444 priv->keygen_in_use = enable; 445 } 446 447 static int dpaa_set_hash_opts(struct net_device *dev, 448 struct ethtool_rxnfc *nfc) 449 { 450 int ret = -EINVAL; 451 452 /* we support hashing on IPv4/v6 src/dest IP and L4 src/dest port */ 453 if (nfc->data & 454 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 455 return -EINVAL; 456 457 switch (nfc->flow_type) { 458 case TCP_V4_FLOW: 459 case TCP_V6_FLOW: 460 case UDP_V4_FLOW: 461 case UDP_V6_FLOW: 462 case IPV4_FLOW: 463 case IPV6_FLOW: 464 case SCTP_V4_FLOW: 465 case SCTP_V6_FLOW: 466 case AH_ESP_V4_FLOW: 467 case AH_ESP_V6_FLOW: 468 case AH_V4_FLOW: 469 case AH_V6_FLOW: 470 case ESP_V4_FLOW: 471 case ESP_V6_FLOW: 472 dpaa_set_hash(dev, !!nfc->data); 473 ret = 0; 474 break; 475 default: 476 break; 477 } 478 479 return ret; 480 } 481 482 static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 483 { 484 int ret = -EOPNOTSUPP; 485 486 switch (cmd->cmd) { 487 case ETHTOOL_SRXFH: 488 ret = dpaa_set_hash_opts(dev, cmd); 489 break; 490 default: 491 break; 492 } 493 494 return ret; 495 } 496 497 static int dpaa_get_ts_info(struct net_device *net_dev, 498 struct ethtool_ts_info *info) 499 { 500 struct device *dev = net_dev->dev.parent; 501 struct device_node *mac_node = dev->of_node; 502 struct device_node *fman_node = NULL, *ptp_node = NULL; 503 struct platform_device *ptp_dev = NULL; 504 struct ptp_qoriq *ptp = NULL; 505 506 info->phc_index = -1; 507 508 fman_node = of_get_parent(mac_node); 509 if (fman_node) 510 ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0); 511 512 if (ptp_node) 513 ptp_dev = of_find_device_by_node(ptp_node); 514 515 if (ptp_dev) 516 ptp = platform_get_drvdata(ptp_dev); 517 518 if (ptp) 519 info->phc_index = ptp->phc_index; 520 521 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | 522 SOF_TIMESTAMPING_RX_HARDWARE | 523 SOF_TIMESTAMPING_RAW_HARDWARE; 524 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 525 (1 << HWTSTAMP_TX_ON); 526 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 527 (1 << HWTSTAMP_FILTER_ALL); 528 529 return 0; 530 } 531 532 static int dpaa_get_coalesce(struct net_device *dev, 533 struct ethtool_coalesce *c) 534 { 535 struct qman_portal *portal; 536 u32 period; 537 u8 thresh; 538 539 portal = qman_get_affine_portal(smp_processor_id()); 540 qman_portal_get_iperiod(portal, &period); 541 qman_dqrr_get_ithresh(portal, &thresh); 542 543 c->rx_coalesce_usecs = period; 544 c->rx_max_coalesced_frames = thresh; 545 c->use_adaptive_rx_coalesce = false; 546 547 return 0; 548 } 549 550 static int dpaa_set_coalesce(struct net_device *dev, 551 struct ethtool_coalesce *c) 552 { 553 const cpumask_t *cpus = qman_affine_cpus(); 554 bool needs_revert[NR_CPUS] = {false}; 555 struct qman_portal *portal; 556 u32 period, prev_period; 557 u8 thresh, prev_thresh; 558 int cpu, res; 559 560 if (c->use_adaptive_rx_coalesce) 561 return -EINVAL; 562 563 period = c->rx_coalesce_usecs; 564 thresh = c->rx_max_coalesced_frames; 565 566 /* save previous values */ 567 portal = qman_get_affine_portal(smp_processor_id()); 568 qman_portal_get_iperiod(portal, &prev_period); 569 qman_dqrr_get_ithresh(portal, &prev_thresh); 570 571 /* set new values */ 572 for_each_cpu_and(cpu, cpus, cpu_online_mask) { 573 portal = qman_get_affine_portal(cpu); 574 res = qman_portal_set_iperiod(portal, period); 575 if (res) 576 goto revert_values; 577 res = qman_dqrr_set_ithresh(portal, thresh); 578 if (res) { 579 qman_portal_set_iperiod(portal, prev_period); 580 goto revert_values; 581 } 582 needs_revert[cpu] = true; 583 } 584 585 return 0; 586 587 revert_values: 588 /* restore previous values */ 589 for_each_cpu_and(cpu, cpus, cpu_online_mask) { 590 if (!needs_revert[cpu]) 591 continue; 592 portal = qman_get_affine_portal(cpu); 593 /* previous values will not fail, ignore return value */ 594 qman_portal_set_iperiod(portal, prev_period); 595 qman_dqrr_set_ithresh(portal, prev_thresh); 596 } 597 598 return res; 599 } 600 601 const struct ethtool_ops dpaa_ethtool_ops = { 602 .get_drvinfo = dpaa_get_drvinfo, 603 .get_msglevel = dpaa_get_msglevel, 604 .set_msglevel = dpaa_set_msglevel, 605 .nway_reset = dpaa_nway_reset, 606 .get_pauseparam = dpaa_get_pauseparam, 607 .set_pauseparam = dpaa_set_pauseparam, 608 .get_link = ethtool_op_get_link, 609 .get_sset_count = dpaa_get_sset_count, 610 .get_ethtool_stats = dpaa_get_ethtool_stats, 611 .get_strings = dpaa_get_strings, 612 .get_link_ksettings = dpaa_get_link_ksettings, 613 .set_link_ksettings = dpaa_set_link_ksettings, 614 .get_rxnfc = dpaa_get_rxnfc, 615 .set_rxnfc = dpaa_set_rxnfc, 616 .get_ts_info = dpaa_get_ts_info, 617 .get_coalesce = dpaa_get_coalesce, 618 .set_coalesce = dpaa_set_coalesce, 619 }; 620