1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * drivers/net/ethernet/freescale/gianfar_ethtool.c 4 * 5 * Gianfar Ethernet Driver 6 * Ethtool support for Gianfar Enet 7 * Based on e1000 ethtool support 8 * 9 * Author: Andy Fleming 10 * Maintainer: Kumar Gala 11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 12 * 13 * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc. 14 */ 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/interrupt.h> 22 #include <linux/delay.h> 23 #include <linux/netdevice.h> 24 #include <linux/etherdevice.h> 25 #include <linux/net_tstamp.h> 26 #include <linux/skbuff.h> 27 #include <linux/spinlock.h> 28 #include <linux/mm.h> 29 30 #include <asm/io.h> 31 #include <asm/irq.h> 32 #include <linux/uaccess.h> 33 #include <linux/module.h> 34 #include <linux/crc32.h> 35 #include <asm/types.h> 36 #include <linux/ethtool.h> 37 #include <linux/mii.h> 38 #include <linux/phy.h> 39 #include <linux/sort.h> 40 #include <linux/if_vlan.h> 41 #include <linux/of_platform.h> 42 #include <linux/fsl/ptp_qoriq.h> 43 44 #include "gianfar.h" 45 46 #define GFAR_MAX_COAL_USECS 0xffff 47 #define GFAR_MAX_COAL_FRAMES 0xff 48 49 static const char stat_gstrings[][ETH_GSTRING_LEN] = { 50 /* extra stats */ 51 "rx-allocation-errors", 52 "rx-large-frame-errors", 53 "rx-short-frame-errors", 54 "rx-non-octet-errors", 55 "rx-crc-errors", 56 "rx-overrun-errors", 57 "rx-busy-errors", 58 "rx-babbling-errors", 59 "rx-truncated-frames", 60 "ethernet-bus-error", 61 "tx-babbling-errors", 62 "tx-underrun-errors", 63 "tx-timeout-errors", 64 /* rmon stats */ 65 "tx-rx-64-frames", 66 "tx-rx-65-127-frames", 67 "tx-rx-128-255-frames", 68 "tx-rx-256-511-frames", 69 "tx-rx-512-1023-frames", 70 "tx-rx-1024-1518-frames", 71 "tx-rx-1519-1522-good-vlan", 72 "rx-bytes", 73 "rx-packets", 74 "rx-fcs-errors", 75 "receive-multicast-packet", 76 "receive-broadcast-packet", 77 "rx-control-frame-packets", 78 "rx-pause-frame-packets", 79 "rx-unknown-op-code", 80 "rx-alignment-error", 81 "rx-frame-length-error", 82 "rx-code-error", 83 "rx-carrier-sense-error", 84 "rx-undersize-packets", 85 "rx-oversize-packets", 86 "rx-fragmented-frames", 87 "rx-jabber-frames", 88 "rx-dropped-frames", 89 "tx-byte-counter", 90 "tx-packets", 91 "tx-multicast-packets", 92 "tx-broadcast-packets", 93 "tx-pause-control-frames", 94 "tx-deferral-packets", 95 "tx-excessive-deferral-packets", 96 "tx-single-collision-packets", 97 "tx-multiple-collision-packets", 98 "tx-late-collision-packets", 99 "tx-excessive-collision-packets", 100 "tx-total-collision", 101 "reserved", 102 "tx-dropped-frames", 103 "tx-jabber-frames", 104 "tx-fcs-errors", 105 "tx-control-frames", 106 "tx-oversize-frames", 107 "tx-undersize-frames", 108 "tx-fragmented-frames", 109 }; 110 111 /* Fill in a buffer with the strings which correspond to the 112 * stats */ 113 static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf) 114 { 115 struct gfar_private *priv = netdev_priv(dev); 116 117 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) 118 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN); 119 else 120 memcpy(buf, stat_gstrings, 121 GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN); 122 } 123 124 /* Fill in an array of 64-bit statistics from various sources. 125 * This array will be appended to the end of the ethtool_stats 126 * structure, and returned to user space 127 */ 128 static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, 129 u64 *buf) 130 { 131 int i; 132 struct gfar_private *priv = netdev_priv(dev); 133 struct gfar __iomem *regs = priv->gfargrp[0].regs; 134 atomic64_t *extra = (atomic64_t *)&priv->extra_stats; 135 136 for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) 137 buf[i] = atomic64_read(&extra[i]); 138 139 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 140 u32 __iomem *rmon = (u32 __iomem *) ®s->rmon; 141 142 for (; i < GFAR_STATS_LEN; i++, rmon++) 143 buf[i] = (u64) gfar_read(rmon); 144 } 145 } 146 147 static int gfar_sset_count(struct net_device *dev, int sset) 148 { 149 struct gfar_private *priv = netdev_priv(dev); 150 151 switch (sset) { 152 case ETH_SS_STATS: 153 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) 154 return GFAR_STATS_LEN; 155 else 156 return GFAR_EXTRA_STATS_LEN; 157 default: 158 return -EOPNOTSUPP; 159 } 160 } 161 162 /* Fills in the drvinfo structure with some basic info */ 163 static void gfar_gdrvinfo(struct net_device *dev, 164 struct ethtool_drvinfo *drvinfo) 165 { 166 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 167 strlcpy(drvinfo->version, gfar_driver_version, 168 sizeof(drvinfo->version)); 169 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 170 strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info)); 171 } 172 173 /* Return the length of the register structure */ 174 static int gfar_reglen(struct net_device *dev) 175 { 176 return sizeof (struct gfar); 177 } 178 179 /* Return a dump of the GFAR register space */ 180 static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, 181 void *regbuf) 182 { 183 int i; 184 struct gfar_private *priv = netdev_priv(dev); 185 u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs; 186 u32 *buf = (u32 *) regbuf; 187 188 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++) 189 buf[i] = gfar_read(&theregs[i]); 190 } 191 192 /* Convert microseconds to ethernet clock ticks, which changes 193 * depending on what speed the controller is running at */ 194 static unsigned int gfar_usecs2ticks(struct gfar_private *priv, 195 unsigned int usecs) 196 { 197 struct net_device *ndev = priv->ndev; 198 struct phy_device *phydev = ndev->phydev; 199 unsigned int count; 200 201 /* The timer is different, depending on the interface speed */ 202 switch (phydev->speed) { 203 case SPEED_1000: 204 count = GFAR_GBIT_TIME; 205 break; 206 case SPEED_100: 207 count = GFAR_100_TIME; 208 break; 209 case SPEED_10: 210 default: 211 count = GFAR_10_TIME; 212 break; 213 } 214 215 /* Make sure we return a number greater than 0 216 * if usecs > 0 */ 217 return DIV_ROUND_UP(usecs * 1000, count); 218 } 219 220 /* Convert ethernet clock ticks to microseconds */ 221 static unsigned int gfar_ticks2usecs(struct gfar_private *priv, 222 unsigned int ticks) 223 { 224 struct net_device *ndev = priv->ndev; 225 struct phy_device *phydev = ndev->phydev; 226 unsigned int count; 227 228 /* The timer is different, depending on the interface speed */ 229 switch (phydev->speed) { 230 case SPEED_1000: 231 count = GFAR_GBIT_TIME; 232 break; 233 case SPEED_100: 234 count = GFAR_100_TIME; 235 break; 236 case SPEED_10: 237 default: 238 count = GFAR_10_TIME; 239 break; 240 } 241 242 /* Make sure we return a number greater than 0 */ 243 /* if ticks is > 0 */ 244 return (ticks * count) / 1000; 245 } 246 247 /* Get the coalescing parameters, and put them in the cvals 248 * structure. */ 249 static int gfar_gcoalesce(struct net_device *dev, 250 struct ethtool_coalesce *cvals) 251 { 252 struct gfar_private *priv = netdev_priv(dev); 253 struct gfar_priv_rx_q *rx_queue = NULL; 254 struct gfar_priv_tx_q *tx_queue = NULL; 255 unsigned long rxtime; 256 unsigned long rxcount; 257 unsigned long txtime; 258 unsigned long txcount; 259 260 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 261 return -EOPNOTSUPP; 262 263 if (!dev->phydev) 264 return -ENODEV; 265 266 rx_queue = priv->rx_queue[0]; 267 tx_queue = priv->tx_queue[0]; 268 269 rxtime = get_ictt_value(rx_queue->rxic); 270 rxcount = get_icft_value(rx_queue->rxic); 271 txtime = get_ictt_value(tx_queue->txic); 272 txcount = get_icft_value(tx_queue->txic); 273 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime); 274 cvals->rx_max_coalesced_frames = rxcount; 275 276 cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime); 277 cvals->tx_max_coalesced_frames = txcount; 278 279 cvals->use_adaptive_rx_coalesce = 0; 280 cvals->use_adaptive_tx_coalesce = 0; 281 282 cvals->pkt_rate_low = 0; 283 cvals->rx_coalesce_usecs_low = 0; 284 cvals->rx_max_coalesced_frames_low = 0; 285 cvals->tx_coalesce_usecs_low = 0; 286 cvals->tx_max_coalesced_frames_low = 0; 287 288 /* When the packet rate is below pkt_rate_high but above 289 * pkt_rate_low (both measured in packets per second) the 290 * normal {rx,tx}_* coalescing parameters are used. 291 */ 292 293 /* When the packet rate is (measured in packets per second) 294 * is above pkt_rate_high, the {rx,tx}_*_high parameters are 295 * used. 296 */ 297 cvals->pkt_rate_high = 0; 298 cvals->rx_coalesce_usecs_high = 0; 299 cvals->rx_max_coalesced_frames_high = 0; 300 cvals->tx_coalesce_usecs_high = 0; 301 cvals->tx_max_coalesced_frames_high = 0; 302 303 /* How often to do adaptive coalescing packet rate sampling, 304 * measured in seconds. Must not be zero. 305 */ 306 cvals->rate_sample_interval = 0; 307 308 return 0; 309 } 310 311 /* Change the coalescing values. 312 * Both cvals->*_usecs and cvals->*_frames have to be > 0 313 * in order for coalescing to be active 314 */ 315 static int gfar_scoalesce(struct net_device *dev, 316 struct ethtool_coalesce *cvals) 317 { 318 struct gfar_private *priv = netdev_priv(dev); 319 int i, err = 0; 320 321 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 322 return -EOPNOTSUPP; 323 324 if (!dev->phydev) 325 return -ENODEV; 326 327 /* Check the bounds of the values */ 328 if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) { 329 netdev_info(dev, "Coalescing is limited to %d microseconds\n", 330 GFAR_MAX_COAL_USECS); 331 return -EINVAL; 332 } 333 334 if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { 335 netdev_info(dev, "Coalescing is limited to %d frames\n", 336 GFAR_MAX_COAL_FRAMES); 337 return -EINVAL; 338 } 339 340 /* Check the bounds of the values */ 341 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { 342 netdev_info(dev, "Coalescing is limited to %d microseconds\n", 343 GFAR_MAX_COAL_USECS); 344 return -EINVAL; 345 } 346 347 if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { 348 netdev_info(dev, "Coalescing is limited to %d frames\n", 349 GFAR_MAX_COAL_FRAMES); 350 return -EINVAL; 351 } 352 353 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) 354 cpu_relax(); 355 356 /* Set up rx coalescing */ 357 if ((cvals->rx_coalesce_usecs == 0) || 358 (cvals->rx_max_coalesced_frames == 0)) { 359 for (i = 0; i < priv->num_rx_queues; i++) 360 priv->rx_queue[i]->rxcoalescing = 0; 361 } else { 362 for (i = 0; i < priv->num_rx_queues; i++) 363 priv->rx_queue[i]->rxcoalescing = 1; 364 } 365 366 for (i = 0; i < priv->num_rx_queues; i++) { 367 priv->rx_queue[i]->rxic = mk_ic_value( 368 cvals->rx_max_coalesced_frames, 369 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); 370 } 371 372 /* Set up tx coalescing */ 373 if ((cvals->tx_coalesce_usecs == 0) || 374 (cvals->tx_max_coalesced_frames == 0)) { 375 for (i = 0; i < priv->num_tx_queues; i++) 376 priv->tx_queue[i]->txcoalescing = 0; 377 } else { 378 for (i = 0; i < priv->num_tx_queues; i++) 379 priv->tx_queue[i]->txcoalescing = 1; 380 } 381 382 for (i = 0; i < priv->num_tx_queues; i++) { 383 priv->tx_queue[i]->txic = mk_ic_value( 384 cvals->tx_max_coalesced_frames, 385 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); 386 } 387 388 if (dev->flags & IFF_UP) { 389 stop_gfar(dev); 390 err = startup_gfar(dev); 391 } else { 392 gfar_mac_reset(priv); 393 } 394 395 clear_bit_unlock(GFAR_RESETTING, &priv->state); 396 397 return err; 398 } 399 400 /* Fills in rvals with the current ring parameters. Currently, 401 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and 402 * jumbo are ignored by the driver */ 403 static void gfar_gringparam(struct net_device *dev, 404 struct ethtool_ringparam *rvals) 405 { 406 struct gfar_private *priv = netdev_priv(dev); 407 struct gfar_priv_tx_q *tx_queue = NULL; 408 struct gfar_priv_rx_q *rx_queue = NULL; 409 410 tx_queue = priv->tx_queue[0]; 411 rx_queue = priv->rx_queue[0]; 412 413 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; 414 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; 415 rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE; 416 rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE; 417 418 /* Values changeable by the user. The valid values are 419 * in the range 1 to the "*_max_pending" counterpart above. 420 */ 421 rvals->rx_pending = rx_queue->rx_ring_size; 422 rvals->rx_mini_pending = rx_queue->rx_ring_size; 423 rvals->rx_jumbo_pending = rx_queue->rx_ring_size; 424 rvals->tx_pending = tx_queue->tx_ring_size; 425 } 426 427 /* Change the current ring parameters, stopping the controller if 428 * necessary so that we don't mess things up while we're in motion. 429 */ 430 static int gfar_sringparam(struct net_device *dev, 431 struct ethtool_ringparam *rvals) 432 { 433 struct gfar_private *priv = netdev_priv(dev); 434 int err = 0, i; 435 436 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) 437 return -EINVAL; 438 439 if (!is_power_of_2(rvals->rx_pending)) { 440 netdev_err(dev, "Ring sizes must be a power of 2\n"); 441 return -EINVAL; 442 } 443 444 if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE) 445 return -EINVAL; 446 447 if (!is_power_of_2(rvals->tx_pending)) { 448 netdev_err(dev, "Ring sizes must be a power of 2\n"); 449 return -EINVAL; 450 } 451 452 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) 453 cpu_relax(); 454 455 if (dev->flags & IFF_UP) 456 stop_gfar(dev); 457 458 /* Change the sizes */ 459 for (i = 0; i < priv->num_rx_queues; i++) 460 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending; 461 462 for (i = 0; i < priv->num_tx_queues; i++) 463 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; 464 465 /* Rebuild the rings with the new size */ 466 if (dev->flags & IFF_UP) 467 err = startup_gfar(dev); 468 469 clear_bit_unlock(GFAR_RESETTING, &priv->state); 470 471 return err; 472 } 473 474 static void gfar_gpauseparam(struct net_device *dev, 475 struct ethtool_pauseparam *epause) 476 { 477 struct gfar_private *priv = netdev_priv(dev); 478 479 epause->autoneg = !!priv->pause_aneg_en; 480 epause->rx_pause = !!priv->rx_pause_en; 481 epause->tx_pause = !!priv->tx_pause_en; 482 } 483 484 static int gfar_spauseparam(struct net_device *dev, 485 struct ethtool_pauseparam *epause) 486 { 487 struct gfar_private *priv = netdev_priv(dev); 488 struct phy_device *phydev = dev->phydev; 489 struct gfar __iomem *regs = priv->gfargrp[0].regs; 490 491 if (!phydev) 492 return -ENODEV; 493 494 if (!phy_validate_pause(phydev, epause)) 495 return -EINVAL; 496 497 priv->rx_pause_en = priv->tx_pause_en = 0; 498 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); 499 if (epause->rx_pause) { 500 priv->rx_pause_en = 1; 501 502 if (epause->tx_pause) { 503 priv->tx_pause_en = 1; 504 } 505 } else if (epause->tx_pause) { 506 priv->tx_pause_en = 1; 507 } 508 509 if (epause->autoneg) 510 priv->pause_aneg_en = 1; 511 else 512 priv->pause_aneg_en = 0; 513 514 if (!epause->autoneg) { 515 u32 tempval = gfar_read(®s->maccfg1); 516 517 tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 518 519 priv->tx_actual_en = 0; 520 if (priv->tx_pause_en) { 521 priv->tx_actual_en = 1; 522 tempval |= MACCFG1_TX_FLOW; 523 } 524 525 if (priv->rx_pause_en) 526 tempval |= MACCFG1_RX_FLOW; 527 gfar_write(®s->maccfg1, tempval); 528 } 529 530 return 0; 531 } 532 533 int gfar_set_features(struct net_device *dev, netdev_features_t features) 534 { 535 netdev_features_t changed = dev->features ^ features; 536 struct gfar_private *priv = netdev_priv(dev); 537 int err = 0; 538 539 if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 540 NETIF_F_RXCSUM))) 541 return 0; 542 543 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) 544 cpu_relax(); 545 546 dev->features = features; 547 548 if (dev->flags & IFF_UP) { 549 /* Now we take down the rings to rebuild them */ 550 stop_gfar(dev); 551 err = startup_gfar(dev); 552 } else { 553 gfar_mac_reset(priv); 554 } 555 556 clear_bit_unlock(GFAR_RESETTING, &priv->state); 557 558 return err; 559 } 560 561 static uint32_t gfar_get_msglevel(struct net_device *dev) 562 { 563 struct gfar_private *priv = netdev_priv(dev); 564 565 return priv->msg_enable; 566 } 567 568 static void gfar_set_msglevel(struct net_device *dev, uint32_t data) 569 { 570 struct gfar_private *priv = netdev_priv(dev); 571 572 priv->msg_enable = data; 573 } 574 575 #ifdef CONFIG_PM 576 static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 577 { 578 struct gfar_private *priv = netdev_priv(dev); 579 580 wol->supported = 0; 581 wol->wolopts = 0; 582 583 if (priv->wol_supported & GFAR_WOL_MAGIC) 584 wol->supported |= WAKE_MAGIC; 585 586 if (priv->wol_supported & GFAR_WOL_FILER_UCAST) 587 wol->supported |= WAKE_UCAST; 588 589 if (priv->wol_opts & GFAR_WOL_MAGIC) 590 wol->wolopts |= WAKE_MAGIC; 591 592 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) 593 wol->wolopts |= WAKE_UCAST; 594 } 595 596 static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 597 { 598 struct gfar_private *priv = netdev_priv(dev); 599 u16 wol_opts = 0; 600 int err; 601 602 if (!priv->wol_supported && wol->wolopts) 603 return -EINVAL; 604 605 if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST)) 606 return -EINVAL; 607 608 if (wol->wolopts & WAKE_MAGIC) { 609 wol_opts |= GFAR_WOL_MAGIC; 610 } else { 611 if (wol->wolopts & WAKE_UCAST) 612 wol_opts |= GFAR_WOL_FILER_UCAST; 613 } 614 615 wol_opts &= priv->wol_supported; 616 priv->wol_opts = 0; 617 618 err = device_set_wakeup_enable(priv->dev, wol_opts); 619 if (err) 620 return err; 621 622 priv->wol_opts = wol_opts; 623 624 return 0; 625 } 626 #endif 627 628 static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) 629 { 630 u32 fcr = 0x0, fpr = FPR_FILER_MASK; 631 632 if (ethflow & RXH_L2DA) { 633 fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH | 634 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 635 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 636 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 637 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 638 priv->cur_filer_idx = priv->cur_filer_idx - 1; 639 640 fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH | 641 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 642 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 643 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 644 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 645 priv->cur_filer_idx = priv->cur_filer_idx - 1; 646 } 647 648 if (ethflow & RXH_VLAN) { 649 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH | 650 RQFCR_AND | RQFCR_HASHTBL_0; 651 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 652 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 653 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 654 priv->cur_filer_idx = priv->cur_filer_idx - 1; 655 } 656 657 if (ethflow & RXH_IP_SRC) { 658 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | 659 RQFCR_AND | RQFCR_HASHTBL_0; 660 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 661 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 662 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 663 priv->cur_filer_idx = priv->cur_filer_idx - 1; 664 } 665 666 if (ethflow & (RXH_IP_DST)) { 667 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | 668 RQFCR_AND | RQFCR_HASHTBL_0; 669 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 670 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 671 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 672 priv->cur_filer_idx = priv->cur_filer_idx - 1; 673 } 674 675 if (ethflow & RXH_L3_PROTO) { 676 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH | 677 RQFCR_AND | RQFCR_HASHTBL_0; 678 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 679 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 680 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 681 priv->cur_filer_idx = priv->cur_filer_idx - 1; 682 } 683 684 if (ethflow & RXH_L4_B_0_1) { 685 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | 686 RQFCR_AND | RQFCR_HASHTBL_0; 687 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 688 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 689 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 690 priv->cur_filer_idx = priv->cur_filer_idx - 1; 691 } 692 693 if (ethflow & RXH_L4_B_2_3) { 694 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | 695 RQFCR_AND | RQFCR_HASHTBL_0; 696 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 697 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 698 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 699 priv->cur_filer_idx = priv->cur_filer_idx - 1; 700 } 701 } 702 703 static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, 704 u64 class) 705 { 706 unsigned int cmp_rqfpr; 707 unsigned int *local_rqfpr; 708 unsigned int *local_rqfcr; 709 int i = 0x0, k = 0x0; 710 int j = MAX_FILER_IDX, l = 0x0; 711 int ret = 1; 712 713 local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int), 714 GFP_KERNEL); 715 local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int), 716 GFP_KERNEL); 717 if (!local_rqfpr || !local_rqfcr) { 718 ret = 0; 719 goto err; 720 } 721 722 switch (class) { 723 case TCP_V4_FLOW: 724 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP; 725 break; 726 case UDP_V4_FLOW: 727 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP; 728 break; 729 case TCP_V6_FLOW: 730 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP; 731 break; 732 case UDP_V6_FLOW: 733 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP; 734 break; 735 default: 736 netdev_err(priv->ndev, 737 "Right now this class is not supported\n"); 738 ret = 0; 739 goto err; 740 } 741 742 for (i = 0; i < MAX_FILER_IDX + 1; i++) { 743 local_rqfpr[j] = priv->ftp_rqfpr[i]; 744 local_rqfcr[j] = priv->ftp_rqfcr[i]; 745 j--; 746 if ((priv->ftp_rqfcr[i] == 747 (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) && 748 (priv->ftp_rqfpr[i] == cmp_rqfpr)) 749 break; 750 } 751 752 if (i == MAX_FILER_IDX + 1) { 753 netdev_err(priv->ndev, 754 "No parse rule found, can't create hash rules\n"); 755 ret = 0; 756 goto err; 757 } 758 759 /* If a match was found, then it begins the starting of a cluster rule 760 * if it was already programmed, we need to overwrite these rules 761 */ 762 for (l = i+1; l < MAX_FILER_IDX; l++) { 763 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) && 764 !(priv->ftp_rqfcr[l] & RQFCR_AND)) { 765 priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT | 766 RQFCR_HASHTBL_0 | RQFCR_PID_MASK; 767 priv->ftp_rqfpr[l] = FPR_FILER_MASK; 768 gfar_write_filer(priv, l, priv->ftp_rqfcr[l], 769 priv->ftp_rqfpr[l]); 770 break; 771 } 772 773 if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) && 774 (priv->ftp_rqfcr[l] & RQFCR_AND)) 775 continue; 776 else { 777 local_rqfpr[j] = priv->ftp_rqfpr[l]; 778 local_rqfcr[j] = priv->ftp_rqfcr[l]; 779 j--; 780 } 781 } 782 783 priv->cur_filer_idx = l - 1; 784 785 /* hash rules */ 786 ethflow_to_filer_rules(priv, ethflow); 787 788 /* Write back the popped out rules again */ 789 for (k = j+1; k < MAX_FILER_IDX; k++) { 790 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k]; 791 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k]; 792 gfar_write_filer(priv, priv->cur_filer_idx, 793 local_rqfcr[k], local_rqfpr[k]); 794 if (!priv->cur_filer_idx) 795 break; 796 priv->cur_filer_idx = priv->cur_filer_idx - 1; 797 } 798 799 err: 800 kfree(local_rqfcr); 801 kfree(local_rqfpr); 802 return ret; 803 } 804 805 static int gfar_set_hash_opts(struct gfar_private *priv, 806 struct ethtool_rxnfc *cmd) 807 { 808 /* write the filer rules here */ 809 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type)) 810 return -EINVAL; 811 812 return 0; 813 } 814 815 static int gfar_check_filer_hardware(struct gfar_private *priv) 816 { 817 struct gfar __iomem *regs = priv->gfargrp[0].regs; 818 u32 i; 819 820 /* Check if we are in FIFO mode */ 821 i = gfar_read(®s->ecntrl); 822 i &= ECNTRL_FIFM; 823 if (i == ECNTRL_FIFM) { 824 netdev_notice(priv->ndev, "Interface in FIFO mode\n"); 825 i = gfar_read(®s->rctrl); 826 i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM; 827 if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) { 828 netdev_info(priv->ndev, 829 "Receive Queue Filtering enabled\n"); 830 } else { 831 netdev_warn(priv->ndev, 832 "Receive Queue Filtering disabled\n"); 833 return -EOPNOTSUPP; 834 } 835 } 836 /* Or in standard mode */ 837 else { 838 i = gfar_read(®s->rctrl); 839 i &= RCTRL_PRSDEP_MASK; 840 if (i == RCTRL_PRSDEP_MASK) { 841 netdev_info(priv->ndev, 842 "Receive Queue Filtering enabled\n"); 843 } else { 844 netdev_warn(priv->ndev, 845 "Receive Queue Filtering disabled\n"); 846 return -EOPNOTSUPP; 847 } 848 } 849 850 /* Sets the properties for arbitrary filer rule 851 * to the first 4 Layer 4 Bytes 852 */ 853 gfar_write(®s->rbifx, 0xC0C1C2C3); 854 return 0; 855 } 856 857 /* Write a mask to filer cache */ 858 static void gfar_set_mask(u32 mask, struct filer_table *tab) 859 { 860 tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT; 861 tab->fe[tab->index].prop = mask; 862 tab->index++; 863 } 864 865 /* Sets parse bits (e.g. IP or TCP) */ 866 static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab) 867 { 868 gfar_set_mask(mask, tab); 869 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | 870 RQFCR_AND; 871 tab->fe[tab->index].prop = value; 872 tab->index++; 873 } 874 875 static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag, 876 struct filer_table *tab) 877 { 878 gfar_set_mask(mask, tab); 879 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag; 880 tab->fe[tab->index].prop = value; 881 tab->index++; 882 } 883 884 /* For setting a tuple of value and mask of type flag 885 * Example: 886 * IP-Src = 10.0.0.0/255.0.0.0 887 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4 888 * 889 * Ethtool gives us a value=0 and mask=~0 for don't care a tuple 890 * For a don't care mask it gives us a 0 891 * 892 * The check if don't care and the mask adjustment if mask=0 is done for VLAN 893 * and MAC stuff on an upper level (due to missing information on this level). 894 * For these guys we can discard them if they are value=0 and mask=0. 895 * 896 * Further the all masks are one-padded for better hardware efficiency. 897 */ 898 static void gfar_set_attribute(u32 value, u32 mask, u32 flag, 899 struct filer_table *tab) 900 { 901 switch (flag) { 902 /* 3bit */ 903 case RQFCR_PID_PRI: 904 if (!(value | mask)) 905 return; 906 mask |= RQFCR_PID_PRI_MASK; 907 break; 908 /* 8bit */ 909 case RQFCR_PID_L4P: 910 case RQFCR_PID_TOS: 911 if (!~(mask | RQFCR_PID_L4P_MASK)) 912 return; 913 if (!mask) 914 mask = ~0; 915 else 916 mask |= RQFCR_PID_L4P_MASK; 917 break; 918 /* 12bit */ 919 case RQFCR_PID_VID: 920 if (!(value | mask)) 921 return; 922 mask |= RQFCR_PID_VID_MASK; 923 break; 924 /* 16bit */ 925 case RQFCR_PID_DPT: 926 case RQFCR_PID_SPT: 927 case RQFCR_PID_ETY: 928 if (!~(mask | RQFCR_PID_PORT_MASK)) 929 return; 930 if (!mask) 931 mask = ~0; 932 else 933 mask |= RQFCR_PID_PORT_MASK; 934 break; 935 /* 24bit */ 936 case RQFCR_PID_DAH: 937 case RQFCR_PID_DAL: 938 case RQFCR_PID_SAH: 939 case RQFCR_PID_SAL: 940 if (!(value | mask)) 941 return; 942 mask |= RQFCR_PID_MAC_MASK; 943 break; 944 /* for all real 32bit masks */ 945 default: 946 if (!~mask) 947 return; 948 if (!mask) 949 mask = ~0; 950 break; 951 } 952 gfar_set_general_attribute(value, mask, flag, tab); 953 } 954 955 /* Translates value and mask for UDP, TCP or SCTP */ 956 static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value, 957 struct ethtool_tcpip4_spec *mask, 958 struct filer_table *tab) 959 { 960 gfar_set_attribute(be32_to_cpu(value->ip4src), 961 be32_to_cpu(mask->ip4src), 962 RQFCR_PID_SIA, tab); 963 gfar_set_attribute(be32_to_cpu(value->ip4dst), 964 be32_to_cpu(mask->ip4dst), 965 RQFCR_PID_DIA, tab); 966 gfar_set_attribute(be16_to_cpu(value->pdst), 967 be16_to_cpu(mask->pdst), 968 RQFCR_PID_DPT, tab); 969 gfar_set_attribute(be16_to_cpu(value->psrc), 970 be16_to_cpu(mask->psrc), 971 RQFCR_PID_SPT, tab); 972 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); 973 } 974 975 /* Translates value and mask for RAW-IP4 */ 976 static void gfar_set_user_ip(struct ethtool_usrip4_spec *value, 977 struct ethtool_usrip4_spec *mask, 978 struct filer_table *tab) 979 { 980 gfar_set_attribute(be32_to_cpu(value->ip4src), 981 be32_to_cpu(mask->ip4src), 982 RQFCR_PID_SIA, tab); 983 gfar_set_attribute(be32_to_cpu(value->ip4dst), 984 be32_to_cpu(mask->ip4dst), 985 RQFCR_PID_DIA, tab); 986 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); 987 gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab); 988 gfar_set_attribute(be32_to_cpu(value->l4_4_bytes), 989 be32_to_cpu(mask->l4_4_bytes), 990 RQFCR_PID_ARB, tab); 991 992 } 993 994 /* Translates value and mask for ETHER spec */ 995 static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask, 996 struct filer_table *tab) 997 { 998 u32 upper_temp_mask = 0; 999 u32 lower_temp_mask = 0; 1000 1001 /* Source address */ 1002 if (!is_broadcast_ether_addr(mask->h_source)) { 1003 if (is_zero_ether_addr(mask->h_source)) { 1004 upper_temp_mask = 0xFFFFFFFF; 1005 lower_temp_mask = 0xFFFFFFFF; 1006 } else { 1007 upper_temp_mask = mask->h_source[0] << 16 | 1008 mask->h_source[1] << 8 | 1009 mask->h_source[2]; 1010 lower_temp_mask = mask->h_source[3] << 16 | 1011 mask->h_source[4] << 8 | 1012 mask->h_source[5]; 1013 } 1014 /* Upper 24bit */ 1015 gfar_set_attribute(value->h_source[0] << 16 | 1016 value->h_source[1] << 8 | 1017 value->h_source[2], 1018 upper_temp_mask, RQFCR_PID_SAH, tab); 1019 /* And the same for the lower part */ 1020 gfar_set_attribute(value->h_source[3] << 16 | 1021 value->h_source[4] << 8 | 1022 value->h_source[5], 1023 lower_temp_mask, RQFCR_PID_SAL, tab); 1024 } 1025 /* Destination address */ 1026 if (!is_broadcast_ether_addr(mask->h_dest)) { 1027 /* Special for destination is limited broadcast */ 1028 if ((is_broadcast_ether_addr(value->h_dest) && 1029 is_zero_ether_addr(mask->h_dest))) { 1030 gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab); 1031 } else { 1032 if (is_zero_ether_addr(mask->h_dest)) { 1033 upper_temp_mask = 0xFFFFFFFF; 1034 lower_temp_mask = 0xFFFFFFFF; 1035 } else { 1036 upper_temp_mask = mask->h_dest[0] << 16 | 1037 mask->h_dest[1] << 8 | 1038 mask->h_dest[2]; 1039 lower_temp_mask = mask->h_dest[3] << 16 | 1040 mask->h_dest[4] << 8 | 1041 mask->h_dest[5]; 1042 } 1043 1044 /* Upper 24bit */ 1045 gfar_set_attribute(value->h_dest[0] << 16 | 1046 value->h_dest[1] << 8 | 1047 value->h_dest[2], 1048 upper_temp_mask, RQFCR_PID_DAH, tab); 1049 /* And the same for the lower part */ 1050 gfar_set_attribute(value->h_dest[3] << 16 | 1051 value->h_dest[4] << 8 | 1052 value->h_dest[5], 1053 lower_temp_mask, RQFCR_PID_DAL, tab); 1054 } 1055 } 1056 1057 gfar_set_attribute(be16_to_cpu(value->h_proto), 1058 be16_to_cpu(mask->h_proto), 1059 RQFCR_PID_ETY, tab); 1060 } 1061 1062 static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule) 1063 { 1064 return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK; 1065 } 1066 1067 static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule) 1068 { 1069 return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK; 1070 } 1071 1072 static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule) 1073 { 1074 return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK; 1075 } 1076 1077 static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule) 1078 { 1079 return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK; 1080 } 1081 1082 static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule) 1083 { 1084 return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >> 1085 VLAN_PRIO_SHIFT; 1086 } 1087 1088 static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule) 1089 { 1090 return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >> 1091 VLAN_PRIO_SHIFT; 1092 } 1093 1094 /* Convert a rule to binary filter format of gianfar */ 1095 static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, 1096 struct filer_table *tab) 1097 { 1098 u32 vlan = 0, vlan_mask = 0; 1099 u32 id = 0, id_mask = 0; 1100 u32 cfi = 0, cfi_mask = 0; 1101 u32 prio = 0, prio_mask = 0; 1102 u32 old_index = tab->index; 1103 1104 /* Check if vlan is wanted */ 1105 if ((rule->flow_type & FLOW_EXT) && 1106 (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) { 1107 if (!rule->m_ext.vlan_tci) 1108 rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF); 1109 1110 vlan = RQFPR_VLN; 1111 vlan_mask = RQFPR_VLN; 1112 1113 /* Separate the fields */ 1114 id = vlan_tci_vid(rule); 1115 id_mask = vlan_tci_vidm(rule); 1116 cfi = vlan_tci_cfi(rule); 1117 cfi_mask = vlan_tci_cfim(rule); 1118 prio = vlan_tci_prio(rule); 1119 prio_mask = vlan_tci_priom(rule); 1120 1121 if (cfi_mask) { 1122 if (cfi) 1123 vlan |= RQFPR_CFI; 1124 vlan_mask |= RQFPR_CFI; 1125 } 1126 } 1127 1128 switch (rule->flow_type & ~FLOW_EXT) { 1129 case TCP_V4_FLOW: 1130 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan, 1131 RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab); 1132 gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec, 1133 &rule->m_u.tcp_ip4_spec, tab); 1134 break; 1135 case UDP_V4_FLOW: 1136 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan, 1137 RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab); 1138 gfar_set_basic_ip(&rule->h_u.udp_ip4_spec, 1139 &rule->m_u.udp_ip4_spec, tab); 1140 break; 1141 case SCTP_V4_FLOW: 1142 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, 1143 tab); 1144 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab); 1145 gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u, 1146 (struct ethtool_tcpip4_spec *)&rule->m_u, 1147 tab); 1148 break; 1149 case IP_USER_FLOW: 1150 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, 1151 tab); 1152 gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u, 1153 (struct ethtool_usrip4_spec *) &rule->m_u, 1154 tab); 1155 break; 1156 case ETHER_FLOW: 1157 if (vlan) 1158 gfar_set_parse_bits(vlan, vlan_mask, tab); 1159 gfar_set_ether((struct ethhdr *) &rule->h_u, 1160 (struct ethhdr *) &rule->m_u, tab); 1161 break; 1162 default: 1163 return -1; 1164 } 1165 1166 /* Set the vlan attributes in the end */ 1167 if (vlan) { 1168 gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab); 1169 gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab); 1170 } 1171 1172 /* If there has been nothing written till now, it must be a default */ 1173 if (tab->index == old_index) { 1174 gfar_set_mask(0xFFFFFFFF, tab); 1175 tab->fe[tab->index].ctrl = 0x20; 1176 tab->fe[tab->index].prop = 0x0; 1177 tab->index++; 1178 } 1179 1180 /* Remove last AND */ 1181 tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND); 1182 1183 /* Specify which queue to use or to drop */ 1184 if (rule->ring_cookie == RX_CLS_FLOW_DISC) 1185 tab->fe[tab->index - 1].ctrl |= RQFCR_RJE; 1186 else 1187 tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10); 1188 1189 /* Only big enough entries can be clustered */ 1190 if (tab->index > (old_index + 2)) { 1191 tab->fe[old_index + 1].ctrl |= RQFCR_CLE; 1192 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE; 1193 } 1194 1195 /* In rare cases the cache can be full while there is 1196 * free space in hw 1197 */ 1198 if (tab->index > MAX_FILER_CACHE_IDX - 1) 1199 return -EBUSY; 1200 1201 return 0; 1202 } 1203 1204 /* Write the bit-pattern from software's buffer to hardware registers */ 1205 static int gfar_write_filer_table(struct gfar_private *priv, 1206 struct filer_table *tab) 1207 { 1208 u32 i = 0; 1209 if (tab->index > MAX_FILER_IDX - 1) 1210 return -EBUSY; 1211 1212 /* Fill regular entries */ 1213 for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++) 1214 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); 1215 /* Fill the rest with fall-troughs */ 1216 for (; i < MAX_FILER_IDX; i++) 1217 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); 1218 /* Last entry must be default accept 1219 * because that's what people expect 1220 */ 1221 gfar_write_filer(priv, i, 0x20, 0x0); 1222 1223 return 0; 1224 } 1225 1226 static int gfar_check_capability(struct ethtool_rx_flow_spec *flow, 1227 struct gfar_private *priv) 1228 { 1229 1230 if (flow->flow_type & FLOW_EXT) { 1231 if (~flow->m_ext.data[0] || ~flow->m_ext.data[1]) 1232 netdev_warn(priv->ndev, 1233 "User-specific data not supported!\n"); 1234 if (~flow->m_ext.vlan_etype) 1235 netdev_warn(priv->ndev, 1236 "VLAN-etype not supported!\n"); 1237 } 1238 if (flow->flow_type == IP_USER_FLOW) 1239 if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) 1240 netdev_warn(priv->ndev, 1241 "IP-Version differing from IPv4 not supported!\n"); 1242 1243 return 0; 1244 } 1245 1246 static int gfar_process_filer_changes(struct gfar_private *priv) 1247 { 1248 struct ethtool_flow_spec_container *j; 1249 struct filer_table *tab; 1250 s32 ret = 0; 1251 1252 /* So index is set to zero, too! */ 1253 tab = kzalloc(sizeof(*tab), GFP_KERNEL); 1254 if (tab == NULL) 1255 return -ENOMEM; 1256 1257 /* Now convert the existing filer data from flow_spec into 1258 * filer tables binary format 1259 */ 1260 list_for_each_entry(j, &priv->rx_list.list, list) { 1261 ret = gfar_convert_to_filer(&j->fs, tab); 1262 if (ret == -EBUSY) { 1263 netdev_err(priv->ndev, 1264 "Rule not added: No free space!\n"); 1265 goto end; 1266 } 1267 if (ret == -1) { 1268 netdev_err(priv->ndev, 1269 "Rule not added: Unsupported Flow-type!\n"); 1270 goto end; 1271 } 1272 } 1273 1274 /* Write everything to hardware */ 1275 ret = gfar_write_filer_table(priv, tab); 1276 if (ret == -EBUSY) { 1277 netdev_err(priv->ndev, "Rule not added: No free space!\n"); 1278 goto end; 1279 } 1280 1281 end: 1282 kfree(tab); 1283 return ret; 1284 } 1285 1286 static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow) 1287 { 1288 u32 i = 0; 1289 1290 for (i = 0; i < sizeof(flow->m_u); i++) 1291 flow->m_u.hdata[i] ^= 0xFF; 1292 1293 flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF); 1294 flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF); 1295 flow->m_ext.data[0] ^= cpu_to_be32(~0); 1296 flow->m_ext.data[1] ^= cpu_to_be32(~0); 1297 } 1298 1299 static int gfar_add_cls(struct gfar_private *priv, 1300 struct ethtool_rx_flow_spec *flow) 1301 { 1302 struct ethtool_flow_spec_container *temp, *comp; 1303 int ret = 0; 1304 1305 temp = kmalloc(sizeof(*temp), GFP_KERNEL); 1306 if (temp == NULL) 1307 return -ENOMEM; 1308 memcpy(&temp->fs, flow, sizeof(temp->fs)); 1309 1310 gfar_invert_masks(&temp->fs); 1311 ret = gfar_check_capability(&temp->fs, priv); 1312 if (ret) 1313 goto clean_mem; 1314 /* Link in the new element at the right @location */ 1315 if (list_empty(&priv->rx_list.list)) { 1316 ret = gfar_check_filer_hardware(priv); 1317 if (ret != 0) 1318 goto clean_mem; 1319 list_add(&temp->list, &priv->rx_list.list); 1320 goto process; 1321 } else { 1322 list_for_each_entry(comp, &priv->rx_list.list, list) { 1323 if (comp->fs.location > flow->location) { 1324 list_add_tail(&temp->list, &comp->list); 1325 goto process; 1326 } 1327 if (comp->fs.location == flow->location) { 1328 netdev_err(priv->ndev, 1329 "Rule not added: ID %d not free!\n", 1330 flow->location); 1331 ret = -EBUSY; 1332 goto clean_mem; 1333 } 1334 } 1335 list_add_tail(&temp->list, &priv->rx_list.list); 1336 } 1337 1338 process: 1339 priv->rx_list.count++; 1340 ret = gfar_process_filer_changes(priv); 1341 if (ret) 1342 goto clean_list; 1343 return ret; 1344 1345 clean_list: 1346 priv->rx_list.count--; 1347 list_del(&temp->list); 1348 clean_mem: 1349 kfree(temp); 1350 return ret; 1351 } 1352 1353 static int gfar_del_cls(struct gfar_private *priv, u32 loc) 1354 { 1355 struct ethtool_flow_spec_container *comp; 1356 u32 ret = -EINVAL; 1357 1358 if (list_empty(&priv->rx_list.list)) 1359 return ret; 1360 1361 list_for_each_entry(comp, &priv->rx_list.list, list) { 1362 if (comp->fs.location == loc) { 1363 list_del(&comp->list); 1364 kfree(comp); 1365 priv->rx_list.count--; 1366 gfar_process_filer_changes(priv); 1367 ret = 0; 1368 break; 1369 } 1370 } 1371 1372 return ret; 1373 } 1374 1375 static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd) 1376 { 1377 struct ethtool_flow_spec_container *comp; 1378 u32 ret = -EINVAL; 1379 1380 list_for_each_entry(comp, &priv->rx_list.list, list) { 1381 if (comp->fs.location == cmd->fs.location) { 1382 memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs)); 1383 gfar_invert_masks(&cmd->fs); 1384 ret = 0; 1385 break; 1386 } 1387 } 1388 1389 return ret; 1390 } 1391 1392 static int gfar_get_cls_all(struct gfar_private *priv, 1393 struct ethtool_rxnfc *cmd, u32 *rule_locs) 1394 { 1395 struct ethtool_flow_spec_container *comp; 1396 u32 i = 0; 1397 1398 list_for_each_entry(comp, &priv->rx_list.list, list) { 1399 if (i == cmd->rule_cnt) 1400 return -EMSGSIZE; 1401 rule_locs[i] = comp->fs.location; 1402 i++; 1403 } 1404 1405 cmd->data = MAX_FILER_IDX; 1406 cmd->rule_cnt = i; 1407 1408 return 0; 1409 } 1410 1411 static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1412 { 1413 struct gfar_private *priv = netdev_priv(dev); 1414 int ret = 0; 1415 1416 if (test_bit(GFAR_RESETTING, &priv->state)) 1417 return -EBUSY; 1418 1419 mutex_lock(&priv->rx_queue_access); 1420 1421 switch (cmd->cmd) { 1422 case ETHTOOL_SRXFH: 1423 ret = gfar_set_hash_opts(priv, cmd); 1424 break; 1425 case ETHTOOL_SRXCLSRLINS: 1426 if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC && 1427 cmd->fs.ring_cookie >= priv->num_rx_queues) || 1428 cmd->fs.location >= MAX_FILER_IDX) { 1429 ret = -EINVAL; 1430 break; 1431 } 1432 ret = gfar_add_cls(priv, &cmd->fs); 1433 break; 1434 case ETHTOOL_SRXCLSRLDEL: 1435 ret = gfar_del_cls(priv, cmd->fs.location); 1436 break; 1437 default: 1438 ret = -EINVAL; 1439 } 1440 1441 mutex_unlock(&priv->rx_queue_access); 1442 1443 return ret; 1444 } 1445 1446 static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1447 u32 *rule_locs) 1448 { 1449 struct gfar_private *priv = netdev_priv(dev); 1450 int ret = 0; 1451 1452 switch (cmd->cmd) { 1453 case ETHTOOL_GRXRINGS: 1454 cmd->data = priv->num_rx_queues; 1455 break; 1456 case ETHTOOL_GRXCLSRLCNT: 1457 cmd->rule_cnt = priv->rx_list.count; 1458 break; 1459 case ETHTOOL_GRXCLSRULE: 1460 ret = gfar_get_cls(priv, cmd); 1461 break; 1462 case ETHTOOL_GRXCLSRLALL: 1463 ret = gfar_get_cls_all(priv, cmd, rule_locs); 1464 break; 1465 default: 1466 ret = -EINVAL; 1467 break; 1468 } 1469 1470 return ret; 1471 } 1472 1473 static int gfar_get_ts_info(struct net_device *dev, 1474 struct ethtool_ts_info *info) 1475 { 1476 struct gfar_private *priv = netdev_priv(dev); 1477 struct platform_device *ptp_dev; 1478 struct device_node *ptp_node; 1479 struct ptp_qoriq *ptp = NULL; 1480 1481 info->phc_index = -1; 1482 1483 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) { 1484 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | 1485 SOF_TIMESTAMPING_SOFTWARE; 1486 return 0; 1487 } 1488 1489 ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp"); 1490 if (ptp_node) { 1491 ptp_dev = of_find_device_by_node(ptp_node); 1492 if (ptp_dev) 1493 ptp = platform_get_drvdata(ptp_dev); 1494 } 1495 1496 if (ptp) 1497 info->phc_index = ptp->phc_index; 1498 1499 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | 1500 SOF_TIMESTAMPING_RX_HARDWARE | 1501 SOF_TIMESTAMPING_RAW_HARDWARE; 1502 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 1503 (1 << HWTSTAMP_TX_ON); 1504 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 1505 (1 << HWTSTAMP_FILTER_ALL); 1506 return 0; 1507 } 1508 1509 const struct ethtool_ops gfar_ethtool_ops = { 1510 .get_drvinfo = gfar_gdrvinfo, 1511 .get_regs_len = gfar_reglen, 1512 .get_regs = gfar_get_regs, 1513 .get_link = ethtool_op_get_link, 1514 .get_coalesce = gfar_gcoalesce, 1515 .set_coalesce = gfar_scoalesce, 1516 .get_ringparam = gfar_gringparam, 1517 .set_ringparam = gfar_sringparam, 1518 .get_pauseparam = gfar_gpauseparam, 1519 .set_pauseparam = gfar_spauseparam, 1520 .get_strings = gfar_gstrings, 1521 .get_sset_count = gfar_sset_count, 1522 .get_ethtool_stats = gfar_fill_stats, 1523 .get_msglevel = gfar_get_msglevel, 1524 .set_msglevel = gfar_set_msglevel, 1525 #ifdef CONFIG_PM 1526 .get_wol = gfar_get_wol, 1527 .set_wol = gfar_set_wol, 1528 #endif 1529 .set_rxnfc = gfar_set_nfc, 1530 .get_rxnfc = gfar_get_nfc, 1531 .get_ts_info = gfar_get_ts_info, 1532 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1533 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1534 }; 1535