1 /* Copyright 2008-2016 Freescale Semiconductor, Inc. 2 * 3 * Redistribution and use in source and binary forms, with or without 4 * modification, are permitted provided that the following conditions are met: 5 * * Redistributions of source code must retain the above copyright 6 * notice, this list of conditions and the following disclaimer. 7 * * Redistributions in binary form must reproduce the above copyright 8 * notice, this list of conditions and the following disclaimer in the 9 * documentation and/or other materials provided with the distribution. 10 * * Neither the name of Freescale Semiconductor nor the 11 * names of its contributors may be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * 15 * ALTERNATIVELY, this software may be distributed under the terms of the 16 * GNU General Public License ("GPL") as published by the Free Software 17 * Foundation, either version 2 of that License or (at your option) any 18 * later version. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 34 #include <linux/string.h> 35 36 #include "dpaa_eth.h" 37 #include "mac.h" 38 39 static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = { 40 "interrupts", 41 "rx packets", 42 "tx packets", 43 "tx confirm", 44 "tx S/G", 45 "tx error", 46 "rx error", 47 }; 48 49 static char dpaa_stats_global[][ETH_GSTRING_LEN] = { 50 /* dpa rx errors */ 51 "rx dma error", 52 "rx frame physical error", 53 "rx frame size error", 54 "rx header error", 55 56 /* demultiplexing errors */ 57 "qman cg_tdrop", 58 "qman wred", 59 "qman error cond", 60 "qman early window", 61 "qman late window", 62 "qman fq tdrop", 63 "qman fq retired", 64 "qman orp disabled", 65 66 /* congestion related stats */ 67 "congestion time (ms)", 68 "entered congestion", 69 "congested (0/1)" 70 }; 71 72 #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu) 73 #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global) 74 75 static int dpaa_get_link_ksettings(struct net_device *net_dev, 76 struct ethtool_link_ksettings *cmd) 77 { 78 if (!net_dev->phydev) { 79 netdev_dbg(net_dev, "phy device not initialized\n"); 80 return 0; 81 } 82 83 phy_ethtool_ksettings_get(net_dev->phydev, cmd); 84 85 return 0; 86 } 87 88 static int dpaa_set_link_ksettings(struct net_device *net_dev, 89 const struct ethtool_link_ksettings *cmd) 90 { 91 int err; 92 93 if (!net_dev->phydev) { 94 netdev_err(net_dev, "phy device not initialized\n"); 95 return -ENODEV; 96 } 97 98 err = phy_ethtool_ksettings_set(net_dev->phydev, cmd); 99 if (err < 0) 100 netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err); 101 102 return err; 103 } 104 105 static void dpaa_get_drvinfo(struct net_device *net_dev, 106 struct ethtool_drvinfo *drvinfo) 107 { 108 int len; 109 110 strlcpy(drvinfo->driver, KBUILD_MODNAME, 111 sizeof(drvinfo->driver)); 112 len = snprintf(drvinfo->version, sizeof(drvinfo->version), 113 "%X", 0); 114 len = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 115 "%X", 0); 116 117 if (len >= sizeof(drvinfo->fw_version)) { 118 /* Truncated output */ 119 netdev_notice(net_dev, "snprintf() = %d\n", len); 120 } 121 strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), 122 sizeof(drvinfo->bus_info)); 123 } 124 125 static u32 dpaa_get_msglevel(struct net_device *net_dev) 126 { 127 return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable; 128 } 129 130 static void dpaa_set_msglevel(struct net_device *net_dev, 131 u32 msg_enable) 132 { 133 ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable; 134 } 135 136 static int dpaa_nway_reset(struct net_device *net_dev) 137 { 138 int err; 139 140 if (!net_dev->phydev) { 141 netdev_err(net_dev, "phy device not initialized\n"); 142 return -ENODEV; 143 } 144 145 err = 0; 146 if (net_dev->phydev->autoneg) { 147 err = phy_start_aneg(net_dev->phydev); 148 if (err < 0) 149 netdev_err(net_dev, "phy_start_aneg() = %d\n", 150 err); 151 } 152 153 return err; 154 } 155 156 static void dpaa_get_pauseparam(struct net_device *net_dev, 157 struct ethtool_pauseparam *epause) 158 { 159 struct mac_device *mac_dev; 160 struct dpaa_priv *priv; 161 162 priv = netdev_priv(net_dev); 163 mac_dev = priv->mac_dev; 164 165 if (!net_dev->phydev) { 166 netdev_err(net_dev, "phy device not initialized\n"); 167 return; 168 } 169 170 epause->autoneg = mac_dev->autoneg_pause; 171 epause->rx_pause = mac_dev->rx_pause_active; 172 epause->tx_pause = mac_dev->tx_pause_active; 173 } 174 175 static int dpaa_set_pauseparam(struct net_device *net_dev, 176 struct ethtool_pauseparam *epause) 177 { 178 struct mac_device *mac_dev; 179 struct phy_device *phydev; 180 bool rx_pause, tx_pause; 181 struct dpaa_priv *priv; 182 u32 newadv, oldadv; 183 int err; 184 185 priv = netdev_priv(net_dev); 186 mac_dev = priv->mac_dev; 187 188 phydev = net_dev->phydev; 189 if (!phydev) { 190 netdev_err(net_dev, "phy device not initialized\n"); 191 return -ENODEV; 192 } 193 194 if (!(phydev->supported & SUPPORTED_Pause) || 195 (!(phydev->supported & SUPPORTED_Asym_Pause) && 196 (epause->rx_pause != epause->tx_pause))) 197 return -EINVAL; 198 199 /* The MAC should know how to handle PAUSE frame autonegotiation before 200 * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE 201 * settings. 202 */ 203 mac_dev->autoneg_pause = !!epause->autoneg; 204 mac_dev->rx_pause_req = !!epause->rx_pause; 205 mac_dev->tx_pause_req = !!epause->tx_pause; 206 207 /* Determine the sym/asym advertised PAUSE capabilities from the desired 208 * rx/tx pause settings. 209 */ 210 newadv = 0; 211 if (epause->rx_pause) 212 newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; 213 if (epause->tx_pause) 214 newadv ^= ADVERTISED_Asym_Pause; 215 216 oldadv = phydev->advertising & 217 (ADVERTISED_Pause | ADVERTISED_Asym_Pause); 218 219 /* If there are differences between the old and the new advertised 220 * values, restart PHY autonegotiation and advertise the new values. 221 */ 222 if (oldadv != newadv) { 223 phydev->advertising &= ~(ADVERTISED_Pause 224 | ADVERTISED_Asym_Pause); 225 phydev->advertising |= newadv; 226 if (phydev->autoneg) { 227 err = phy_start_aneg(phydev); 228 if (err < 0) 229 netdev_err(net_dev, "phy_start_aneg() = %d\n", 230 err); 231 } 232 } 233 234 fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); 235 err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); 236 if (err < 0) 237 netdev_err(net_dev, "set_mac_active_pause() = %d\n", err); 238 239 return err; 240 } 241 242 static int dpaa_get_sset_count(struct net_device *net_dev, int type) 243 { 244 unsigned int total_stats, num_stats; 245 246 num_stats = num_online_cpus() + 1; 247 total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM) + 248 DPAA_STATS_GLOBAL_LEN; 249 250 switch (type) { 251 case ETH_SS_STATS: 252 return total_stats; 253 default: 254 return -EOPNOTSUPP; 255 } 256 } 257 258 static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus, 259 int crr_cpu, u64 *bp_count, u64 *data) 260 { 261 int num_values = num_cpus + 1; 262 int crr = 0, j; 263 264 /* update current CPU's stats and also add them to the total values */ 265 data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt; 266 data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt; 267 268 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets; 269 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets; 270 271 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets; 272 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets; 273 274 data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm; 275 data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm; 276 277 data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs; 278 data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs; 279 280 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors; 281 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors; 282 283 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors; 284 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors; 285 286 for (j = 0; j < DPAA_BPS_NUM; j++) { 287 data[crr * num_values + crr_cpu] = bp_count[j]; 288 data[crr++ * num_values + num_cpus] += bp_count[j]; 289 } 290 } 291 292 static void dpaa_get_ethtool_stats(struct net_device *net_dev, 293 struct ethtool_stats *stats, u64 *data) 294 { 295 u64 bp_count[DPAA_BPS_NUM], cg_time, cg_num; 296 struct dpaa_percpu_priv *percpu_priv; 297 struct dpaa_rx_errors rx_errors; 298 unsigned int num_cpus, offset; 299 struct dpaa_ern_cnt ern_cnt; 300 struct dpaa_bp *dpaa_bp; 301 struct dpaa_priv *priv; 302 int total_stats, i, j; 303 bool cg_status; 304 305 total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS); 306 priv = netdev_priv(net_dev); 307 num_cpus = num_online_cpus(); 308 309 memset(&bp_count, 0, sizeof(bp_count)); 310 memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors)); 311 memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt)); 312 memset(data, 0, total_stats * sizeof(u64)); 313 314 for_each_online_cpu(i) { 315 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 316 for (j = 0; j < DPAA_BPS_NUM; j++) { 317 dpaa_bp = priv->dpaa_bps[j]; 318 if (!dpaa_bp->percpu_count) 319 continue; 320 bp_count[j] = *(per_cpu_ptr(dpaa_bp->percpu_count, i)); 321 } 322 rx_errors.dme += percpu_priv->rx_errors.dme; 323 rx_errors.fpe += percpu_priv->rx_errors.fpe; 324 rx_errors.fse += percpu_priv->rx_errors.fse; 325 rx_errors.phe += percpu_priv->rx_errors.phe; 326 327 ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop; 328 ern_cnt.wred += percpu_priv->ern_cnt.wred; 329 ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond; 330 ern_cnt.early_window += percpu_priv->ern_cnt.early_window; 331 ern_cnt.late_window += percpu_priv->ern_cnt.late_window; 332 ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop; 333 ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired; 334 ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero; 335 336 copy_stats(percpu_priv, num_cpus, i, bp_count, data); 337 } 338 339 offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM); 340 memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors)); 341 342 offset += sizeof(struct dpaa_rx_errors) / sizeof(u64); 343 memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt)); 344 345 /* gather congestion related counters */ 346 cg_num = 0; 347 cg_status = false; 348 cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies); 349 if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) { 350 cg_num = priv->cgr_data.cgr_congested_count; 351 352 /* reset congestion stats (like QMan API does */ 353 priv->cgr_data.congested_jiffies = 0; 354 priv->cgr_data.cgr_congested_count = 0; 355 } 356 357 offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64); 358 data[offset++] = cg_time; 359 data[offset++] = cg_num; 360 data[offset++] = cg_status; 361 } 362 363 static void dpaa_get_strings(struct net_device *net_dev, u32 stringset, 364 u8 *data) 365 { 366 unsigned int i, j, num_cpus, size; 367 char string_cpu[ETH_GSTRING_LEN]; 368 u8 *strings; 369 370 memset(string_cpu, 0, sizeof(string_cpu)); 371 strings = data; 372 num_cpus = num_online_cpus(); 373 size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN; 374 375 for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) { 376 for (j = 0; j < num_cpus; j++) { 377 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", 378 dpaa_stats_percpu[i], j); 379 memcpy(strings, string_cpu, ETH_GSTRING_LEN); 380 strings += ETH_GSTRING_LEN; 381 } 382 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", 383 dpaa_stats_percpu[i]); 384 memcpy(strings, string_cpu, ETH_GSTRING_LEN); 385 strings += ETH_GSTRING_LEN; 386 } 387 for (i = 0; i < DPAA_BPS_NUM; i++) { 388 for (j = 0; j < num_cpus; j++) { 389 snprintf(string_cpu, ETH_GSTRING_LEN, 390 "bpool %c [CPU %d]", 'a' + i, j); 391 memcpy(strings, string_cpu, ETH_GSTRING_LEN); 392 strings += ETH_GSTRING_LEN; 393 } 394 snprintf(string_cpu, ETH_GSTRING_LEN, "bpool %c [TOTAL]", 395 'a' + i); 396 memcpy(strings, string_cpu, ETH_GSTRING_LEN); 397 strings += ETH_GSTRING_LEN; 398 } 399 memcpy(strings, dpaa_stats_global, size); 400 } 401 402 static int dpaa_get_hash_opts(struct net_device *dev, 403 struct ethtool_rxnfc *cmd) 404 { 405 struct dpaa_priv *priv = netdev_priv(dev); 406 407 cmd->data = 0; 408 409 switch (cmd->flow_type) { 410 case TCP_V4_FLOW: 411 case TCP_V6_FLOW: 412 case UDP_V4_FLOW: 413 case UDP_V6_FLOW: 414 if (priv->keygen_in_use) 415 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 416 /* Fall through */ 417 case IPV4_FLOW: 418 case IPV6_FLOW: 419 case SCTP_V4_FLOW: 420 case SCTP_V6_FLOW: 421 case AH_ESP_V4_FLOW: 422 case AH_ESP_V6_FLOW: 423 case AH_V4_FLOW: 424 case AH_V6_FLOW: 425 case ESP_V4_FLOW: 426 case ESP_V6_FLOW: 427 if (priv->keygen_in_use) 428 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 429 break; 430 default: 431 cmd->data = 0; 432 break; 433 } 434 435 return 0; 436 } 437 438 static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 439 u32 *unused) 440 { 441 int ret = -EOPNOTSUPP; 442 443 switch (cmd->cmd) { 444 case ETHTOOL_GRXFH: 445 ret = dpaa_get_hash_opts(dev, cmd); 446 break; 447 default: 448 break; 449 } 450 451 return ret; 452 } 453 454 static void dpaa_set_hash(struct net_device *net_dev, bool enable) 455 { 456 struct mac_device *mac_dev; 457 struct fman_port *rxport; 458 struct dpaa_priv *priv; 459 460 priv = netdev_priv(net_dev); 461 mac_dev = priv->mac_dev; 462 rxport = mac_dev->port[0]; 463 464 fman_port_use_kg_hash(rxport, enable); 465 priv->keygen_in_use = enable; 466 } 467 468 static int dpaa_set_hash_opts(struct net_device *dev, 469 struct ethtool_rxnfc *nfc) 470 { 471 int ret = -EINVAL; 472 473 /* we support hashing on IPv4/v6 src/dest IP and L4 src/dest port */ 474 if (nfc->data & 475 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 476 return -EINVAL; 477 478 switch (nfc->flow_type) { 479 case TCP_V4_FLOW: 480 case TCP_V6_FLOW: 481 case UDP_V4_FLOW: 482 case UDP_V6_FLOW: 483 case IPV4_FLOW: 484 case IPV6_FLOW: 485 case SCTP_V4_FLOW: 486 case SCTP_V6_FLOW: 487 case AH_ESP_V4_FLOW: 488 case AH_ESP_V6_FLOW: 489 case AH_V4_FLOW: 490 case AH_V6_FLOW: 491 case ESP_V4_FLOW: 492 case ESP_V6_FLOW: 493 dpaa_set_hash(dev, !!nfc->data); 494 ret = 0; 495 break; 496 default: 497 break; 498 } 499 500 return ret; 501 } 502 503 static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 504 { 505 int ret = -EOPNOTSUPP; 506 507 switch (cmd->cmd) { 508 case ETHTOOL_SRXFH: 509 ret = dpaa_set_hash_opts(dev, cmd); 510 break; 511 default: 512 break; 513 } 514 515 return ret; 516 } 517 518 const struct ethtool_ops dpaa_ethtool_ops = { 519 .get_drvinfo = dpaa_get_drvinfo, 520 .get_msglevel = dpaa_get_msglevel, 521 .set_msglevel = dpaa_set_msglevel, 522 .nway_reset = dpaa_nway_reset, 523 .get_pauseparam = dpaa_get_pauseparam, 524 .set_pauseparam = dpaa_set_pauseparam, 525 .get_link = ethtool_op_get_link, 526 .get_sset_count = dpaa_get_sset_count, 527 .get_ethtool_stats = dpaa_get_ethtool_stats, 528 .get_strings = dpaa_get_strings, 529 .get_link_ksettings = dpaa_get_link_ksettings, 530 .set_link_ksettings = dpaa_set_link_ksettings, 531 .get_rxnfc = dpaa_get_rxnfc, 532 .set_rxnfc = dpaa_set_rxnfc, 533 }; 534