1 /* 2 * drivers/net/ethernet/freescale/gianfar_ethtool.c 3 * 4 * Gianfar Ethernet Driver 5 * Ethtool support for Gianfar Enet 6 * Based on e1000 ethtool support 7 * 8 * Author: Andy Fleming 9 * Maintainer: Kumar Gala 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 11 * 12 * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc. 13 * 14 * This software may be used and distributed according to 15 * the terms of the GNU Public License, Version 2, incorporated herein 16 * by reference. 17 */ 18 19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21 #include <linux/kernel.h> 22 #include <linux/string.h> 23 #include <linux/errno.h> 24 #include <linux/interrupt.h> 25 #include <linux/init.h> 26 #include <linux/delay.h> 27 #include <linux/netdevice.h> 28 #include <linux/etherdevice.h> 29 #include <linux/net_tstamp.h> 30 #include <linux/skbuff.h> 31 #include <linux/spinlock.h> 32 #include <linux/mm.h> 33 34 #include <asm/io.h> 35 #include <asm/irq.h> 36 #include <asm/uaccess.h> 37 #include <linux/module.h> 38 #include <linux/crc32.h> 39 #include <asm/types.h> 40 #include <linux/ethtool.h> 41 #include <linux/mii.h> 42 #include <linux/phy.h> 43 #include <linux/sort.h> 44 #include <linux/if_vlan.h> 45 46 #include "gianfar.h" 47 48 extern void gfar_start(struct net_device *dev); 49 extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, 50 int rx_work_limit); 51 52 #define GFAR_MAX_COAL_USECS 0xffff 53 #define GFAR_MAX_COAL_FRAMES 0xff 54 static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, 55 u64 *buf); 56 static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf); 57 static int gfar_gcoalesce(struct net_device *dev, 58 struct ethtool_coalesce *cvals); 59 static int gfar_scoalesce(struct net_device *dev, 60 struct ethtool_coalesce *cvals); 61 static void gfar_gringparam(struct net_device *dev, 62 struct ethtool_ringparam *rvals); 63 static int gfar_sringparam(struct net_device *dev, 64 struct ethtool_ringparam *rvals); 65 static void gfar_gdrvinfo(struct net_device *dev, 66 struct ethtool_drvinfo *drvinfo); 67 68 static const char stat_gstrings[][ETH_GSTRING_LEN] = { 69 "rx-large-frame-errors", 70 "rx-short-frame-errors", 71 "rx-non-octet-errors", 72 "rx-crc-errors", 73 "rx-overrun-errors", 74 "rx-busy-errors", 75 "rx-babbling-errors", 76 "rx-truncated-frames", 77 "ethernet-bus-error", 78 "tx-babbling-errors", 79 "tx-underrun-errors", 80 "rx-skb-missing-errors", 81 "tx-timeout-errors", 82 "tx-rx-64-frames", 83 "tx-rx-65-127-frames", 84 "tx-rx-128-255-frames", 85 "tx-rx-256-511-frames", 86 "tx-rx-512-1023-frames", 87 "tx-rx-1024-1518-frames", 88 "tx-rx-1519-1522-good-vlan", 89 "rx-bytes", 90 "rx-packets", 91 "rx-fcs-errors", 92 "receive-multicast-packet", 93 "receive-broadcast-packet", 94 "rx-control-frame-packets", 95 "rx-pause-frame-packets", 96 "rx-unknown-op-code", 97 "rx-alignment-error", 98 "rx-frame-length-error", 99 "rx-code-error", 100 "rx-carrier-sense-error", 101 "rx-undersize-packets", 102 "rx-oversize-packets", 103 "rx-fragmented-frames", 104 "rx-jabber-frames", 105 "rx-dropped-frames", 106 "tx-byte-counter", 107 "tx-packets", 108 "tx-multicast-packets", 109 "tx-broadcast-packets", 110 "tx-pause-control-frames", 111 "tx-deferral-packets", 112 "tx-excessive-deferral-packets", 113 "tx-single-collision-packets", 114 "tx-multiple-collision-packets", 115 "tx-late-collision-packets", 116 "tx-excessive-collision-packets", 117 "tx-total-collision", 118 "reserved", 119 "tx-dropped-frames", 120 "tx-jabber-frames", 121 "tx-fcs-errors", 122 "tx-control-frames", 123 "tx-oversize-frames", 124 "tx-undersize-frames", 125 "tx-fragmented-frames", 126 }; 127 128 /* Fill in a buffer with the strings which correspond to the 129 * stats */ 130 static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf) 131 { 132 struct gfar_private *priv = netdev_priv(dev); 133 134 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) 135 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN); 136 else 137 memcpy(buf, stat_gstrings, 138 GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN); 139 } 140 141 /* Fill in an array of 64-bit statistics from various sources. 142 * This array will be appended to the end of the ethtool_stats 143 * structure, and returned to user space 144 */ 145 static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, 146 u64 *buf) 147 { 148 int i; 149 struct gfar_private *priv = netdev_priv(dev); 150 struct gfar __iomem *regs = priv->gfargrp[0].regs; 151 atomic64_t *extra = (atomic64_t *)&priv->extra_stats; 152 153 for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) 154 buf[i] = atomic64_read(&extra[i]); 155 156 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 157 u32 __iomem *rmon = (u32 __iomem *) ®s->rmon; 158 159 for (; i < GFAR_STATS_LEN; i++, rmon++) 160 buf[i] = (u64) gfar_read(rmon); 161 } 162 } 163 164 static int gfar_sset_count(struct net_device *dev, int sset) 165 { 166 struct gfar_private *priv = netdev_priv(dev); 167 168 switch (sset) { 169 case ETH_SS_STATS: 170 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) 171 return GFAR_STATS_LEN; 172 else 173 return GFAR_EXTRA_STATS_LEN; 174 default: 175 return -EOPNOTSUPP; 176 } 177 } 178 179 /* Fills in the drvinfo structure with some basic info */ 180 static void gfar_gdrvinfo(struct net_device *dev, 181 struct ethtool_drvinfo *drvinfo) 182 { 183 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 184 strlcpy(drvinfo->version, gfar_driver_version, 185 sizeof(drvinfo->version)); 186 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 187 strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info)); 188 drvinfo->regdump_len = 0; 189 drvinfo->eedump_len = 0; 190 } 191 192 193 static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd) 194 { 195 struct gfar_private *priv = netdev_priv(dev); 196 struct phy_device *phydev = priv->phydev; 197 198 if (NULL == phydev) 199 return -ENODEV; 200 201 return phy_ethtool_sset(phydev, cmd); 202 } 203 204 205 /* Return the current settings in the ethtool_cmd structure */ 206 static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd) 207 { 208 struct gfar_private *priv = netdev_priv(dev); 209 struct phy_device *phydev = priv->phydev; 210 struct gfar_priv_rx_q *rx_queue = NULL; 211 struct gfar_priv_tx_q *tx_queue = NULL; 212 213 if (NULL == phydev) 214 return -ENODEV; 215 tx_queue = priv->tx_queue[0]; 216 rx_queue = priv->rx_queue[0]; 217 218 /* etsec-1.7 and older versions have only one txic 219 * and rxic regs although they support multiple queues */ 220 cmd->maxtxpkt = get_icft_value(tx_queue->txic); 221 cmd->maxrxpkt = get_icft_value(rx_queue->rxic); 222 223 return phy_ethtool_gset(phydev, cmd); 224 } 225 226 /* Return the length of the register structure */ 227 static int gfar_reglen(struct net_device *dev) 228 { 229 return sizeof (struct gfar); 230 } 231 232 /* Return a dump of the GFAR register space */ 233 static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, 234 void *regbuf) 235 { 236 int i; 237 struct gfar_private *priv = netdev_priv(dev); 238 u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs; 239 u32 *buf = (u32 *) regbuf; 240 241 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++) 242 buf[i] = gfar_read(&theregs[i]); 243 } 244 245 /* Convert microseconds to ethernet clock ticks, which changes 246 * depending on what speed the controller is running at */ 247 static unsigned int gfar_usecs2ticks(struct gfar_private *priv, 248 unsigned int usecs) 249 { 250 unsigned int count; 251 252 /* The timer is different, depending on the interface speed */ 253 switch (priv->phydev->speed) { 254 case SPEED_1000: 255 count = GFAR_GBIT_TIME; 256 break; 257 case SPEED_100: 258 count = GFAR_100_TIME; 259 break; 260 case SPEED_10: 261 default: 262 count = GFAR_10_TIME; 263 break; 264 } 265 266 /* Make sure we return a number greater than 0 267 * if usecs > 0 */ 268 return (usecs * 1000 + count - 1) / count; 269 } 270 271 /* Convert ethernet clock ticks to microseconds */ 272 static unsigned int gfar_ticks2usecs(struct gfar_private *priv, 273 unsigned int ticks) 274 { 275 unsigned int count; 276 277 /* The timer is different, depending on the interface speed */ 278 switch (priv->phydev->speed) { 279 case SPEED_1000: 280 count = GFAR_GBIT_TIME; 281 break; 282 case SPEED_100: 283 count = GFAR_100_TIME; 284 break; 285 case SPEED_10: 286 default: 287 count = GFAR_10_TIME; 288 break; 289 } 290 291 /* Make sure we return a number greater than 0 */ 292 /* if ticks is > 0 */ 293 return (ticks * count) / 1000; 294 } 295 296 /* Get the coalescing parameters, and put them in the cvals 297 * structure. */ 298 static int gfar_gcoalesce(struct net_device *dev, 299 struct ethtool_coalesce *cvals) 300 { 301 struct gfar_private *priv = netdev_priv(dev); 302 struct gfar_priv_rx_q *rx_queue = NULL; 303 struct gfar_priv_tx_q *tx_queue = NULL; 304 unsigned long rxtime; 305 unsigned long rxcount; 306 unsigned long txtime; 307 unsigned long txcount; 308 309 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 310 return -EOPNOTSUPP; 311 312 if (NULL == priv->phydev) 313 return -ENODEV; 314 315 rx_queue = priv->rx_queue[0]; 316 tx_queue = priv->tx_queue[0]; 317 318 rxtime = get_ictt_value(rx_queue->rxic); 319 rxcount = get_icft_value(rx_queue->rxic); 320 txtime = get_ictt_value(tx_queue->txic); 321 txcount = get_icft_value(tx_queue->txic); 322 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime); 323 cvals->rx_max_coalesced_frames = rxcount; 324 325 cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime); 326 cvals->tx_max_coalesced_frames = txcount; 327 328 cvals->use_adaptive_rx_coalesce = 0; 329 cvals->use_adaptive_tx_coalesce = 0; 330 331 cvals->pkt_rate_low = 0; 332 cvals->rx_coalesce_usecs_low = 0; 333 cvals->rx_max_coalesced_frames_low = 0; 334 cvals->tx_coalesce_usecs_low = 0; 335 cvals->tx_max_coalesced_frames_low = 0; 336 337 /* When the packet rate is below pkt_rate_high but above 338 * pkt_rate_low (both measured in packets per second) the 339 * normal {rx,tx}_* coalescing parameters are used. 340 */ 341 342 /* When the packet rate is (measured in packets per second) 343 * is above pkt_rate_high, the {rx,tx}_*_high parameters are 344 * used. 345 */ 346 cvals->pkt_rate_high = 0; 347 cvals->rx_coalesce_usecs_high = 0; 348 cvals->rx_max_coalesced_frames_high = 0; 349 cvals->tx_coalesce_usecs_high = 0; 350 cvals->tx_max_coalesced_frames_high = 0; 351 352 /* How often to do adaptive coalescing packet rate sampling, 353 * measured in seconds. Must not be zero. 354 */ 355 cvals->rate_sample_interval = 0; 356 357 return 0; 358 } 359 360 /* Change the coalescing values. 361 * Both cvals->*_usecs and cvals->*_frames have to be > 0 362 * in order for coalescing to be active 363 */ 364 static int gfar_scoalesce(struct net_device *dev, 365 struct ethtool_coalesce *cvals) 366 { 367 struct gfar_private *priv = netdev_priv(dev); 368 int i = 0; 369 370 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 371 return -EOPNOTSUPP; 372 373 /* Set up rx coalescing */ 374 /* As of now, we will enable/disable coalescing for all 375 * queues together in case of eTSEC2, this will be modified 376 * along with the ethtool interface 377 */ 378 if ((cvals->rx_coalesce_usecs == 0) || 379 (cvals->rx_max_coalesced_frames == 0)) { 380 for (i = 0; i < priv->num_rx_queues; i++) 381 priv->rx_queue[i]->rxcoalescing = 0; 382 } else { 383 for (i = 0; i < priv->num_rx_queues; i++) 384 priv->rx_queue[i]->rxcoalescing = 1; 385 } 386 387 if (NULL == priv->phydev) 388 return -ENODEV; 389 390 /* Check the bounds of the values */ 391 if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) { 392 netdev_info(dev, "Coalescing is limited to %d microseconds\n", 393 GFAR_MAX_COAL_USECS); 394 return -EINVAL; 395 } 396 397 if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { 398 netdev_info(dev, "Coalescing is limited to %d frames\n", 399 GFAR_MAX_COAL_FRAMES); 400 return -EINVAL; 401 } 402 403 for (i = 0; i < priv->num_rx_queues; i++) { 404 priv->rx_queue[i]->rxic = mk_ic_value( 405 cvals->rx_max_coalesced_frames, 406 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); 407 } 408 409 /* Set up tx coalescing */ 410 if ((cvals->tx_coalesce_usecs == 0) || 411 (cvals->tx_max_coalesced_frames == 0)) { 412 for (i = 0; i < priv->num_tx_queues; i++) 413 priv->tx_queue[i]->txcoalescing = 0; 414 } else { 415 for (i = 0; i < priv->num_tx_queues; i++) 416 priv->tx_queue[i]->txcoalescing = 1; 417 } 418 419 /* Check the bounds of the values */ 420 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { 421 netdev_info(dev, "Coalescing is limited to %d microseconds\n", 422 GFAR_MAX_COAL_USECS); 423 return -EINVAL; 424 } 425 426 if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { 427 netdev_info(dev, "Coalescing is limited to %d frames\n", 428 GFAR_MAX_COAL_FRAMES); 429 return -EINVAL; 430 } 431 432 for (i = 0; i < priv->num_tx_queues; i++) { 433 priv->tx_queue[i]->txic = mk_ic_value( 434 cvals->tx_max_coalesced_frames, 435 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); 436 } 437 438 gfar_configure_coalescing_all(priv); 439 440 return 0; 441 } 442 443 /* Fills in rvals with the current ring parameters. Currently, 444 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and 445 * jumbo are ignored by the driver */ 446 static void gfar_gringparam(struct net_device *dev, 447 struct ethtool_ringparam *rvals) 448 { 449 struct gfar_private *priv = netdev_priv(dev); 450 struct gfar_priv_tx_q *tx_queue = NULL; 451 struct gfar_priv_rx_q *rx_queue = NULL; 452 453 tx_queue = priv->tx_queue[0]; 454 rx_queue = priv->rx_queue[0]; 455 456 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; 457 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; 458 rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE; 459 rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE; 460 461 /* Values changeable by the user. The valid values are 462 * in the range 1 to the "*_max_pending" counterpart above. 463 */ 464 rvals->rx_pending = rx_queue->rx_ring_size; 465 rvals->rx_mini_pending = rx_queue->rx_ring_size; 466 rvals->rx_jumbo_pending = rx_queue->rx_ring_size; 467 rvals->tx_pending = tx_queue->tx_ring_size; 468 } 469 470 /* Change the current ring parameters, stopping the controller if 471 * necessary so that we don't mess things up while we're in 472 * motion. We wait for the ring to be clean before reallocating 473 * the rings. 474 */ 475 static int gfar_sringparam(struct net_device *dev, 476 struct ethtool_ringparam *rvals) 477 { 478 struct gfar_private *priv = netdev_priv(dev); 479 int err = 0, i = 0; 480 481 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) 482 return -EINVAL; 483 484 if (!is_power_of_2(rvals->rx_pending)) { 485 netdev_err(dev, "Ring sizes must be a power of 2\n"); 486 return -EINVAL; 487 } 488 489 if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE) 490 return -EINVAL; 491 492 if (!is_power_of_2(rvals->tx_pending)) { 493 netdev_err(dev, "Ring sizes must be a power of 2\n"); 494 return -EINVAL; 495 } 496 497 498 if (dev->flags & IFF_UP) { 499 unsigned long flags; 500 501 /* Halt TX and RX, and process the frames which 502 * have already been received 503 */ 504 local_irq_save(flags); 505 lock_tx_qs(priv); 506 lock_rx_qs(priv); 507 508 gfar_halt(dev); 509 510 unlock_rx_qs(priv); 511 unlock_tx_qs(priv); 512 local_irq_restore(flags); 513 514 for (i = 0; i < priv->num_rx_queues; i++) 515 gfar_clean_rx_ring(priv->rx_queue[i], 516 priv->rx_queue[i]->rx_ring_size); 517 518 /* Now we take down the rings to rebuild them */ 519 stop_gfar(dev); 520 } 521 522 /* Change the size */ 523 for (i = 0; i < priv->num_rx_queues; i++) { 524 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending; 525 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; 526 priv->tx_queue[i]->num_txbdfree = 527 priv->tx_queue[i]->tx_ring_size; 528 } 529 530 /* Rebuild the rings with the new size */ 531 if (dev->flags & IFF_UP) { 532 err = startup_gfar(dev); 533 netif_tx_wake_all_queues(dev); 534 } 535 return err; 536 } 537 538 int gfar_set_features(struct net_device *dev, netdev_features_t features) 539 { 540 struct gfar_private *priv = netdev_priv(dev); 541 unsigned long flags; 542 int err = 0, i = 0; 543 netdev_features_t changed = dev->features ^ features; 544 545 if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX)) 546 gfar_vlan_mode(dev, features); 547 548 if (!(changed & NETIF_F_RXCSUM)) 549 return 0; 550 551 if (dev->flags & IFF_UP) { 552 /* Halt TX and RX, and process the frames which 553 * have already been received 554 */ 555 local_irq_save(flags); 556 lock_tx_qs(priv); 557 lock_rx_qs(priv); 558 559 gfar_halt(dev); 560 561 unlock_tx_qs(priv); 562 unlock_rx_qs(priv); 563 local_irq_restore(flags); 564 565 for (i = 0; i < priv->num_rx_queues; i++) 566 gfar_clean_rx_ring(priv->rx_queue[i], 567 priv->rx_queue[i]->rx_ring_size); 568 569 /* Now we take down the rings to rebuild them */ 570 stop_gfar(dev); 571 572 dev->features = features; 573 574 err = startup_gfar(dev); 575 netif_tx_wake_all_queues(dev); 576 } 577 return err; 578 } 579 580 static uint32_t gfar_get_msglevel(struct net_device *dev) 581 { 582 struct gfar_private *priv = netdev_priv(dev); 583 584 return priv->msg_enable; 585 } 586 587 static void gfar_set_msglevel(struct net_device *dev, uint32_t data) 588 { 589 struct gfar_private *priv = netdev_priv(dev); 590 591 priv->msg_enable = data; 592 } 593 594 #ifdef CONFIG_PM 595 static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 596 { 597 struct gfar_private *priv = netdev_priv(dev); 598 599 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) { 600 wol->supported = WAKE_MAGIC; 601 wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0; 602 } else { 603 wol->supported = wol->wolopts = 0; 604 } 605 } 606 607 static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 608 { 609 struct gfar_private *priv = netdev_priv(dev); 610 unsigned long flags; 611 612 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 613 wol->wolopts != 0) 614 return -EINVAL; 615 616 if (wol->wolopts & ~WAKE_MAGIC) 617 return -EINVAL; 618 619 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); 620 621 spin_lock_irqsave(&priv->bflock, flags); 622 priv->wol_en = !!device_may_wakeup(&dev->dev); 623 spin_unlock_irqrestore(&priv->bflock, flags); 624 625 return 0; 626 } 627 #endif 628 629 static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) 630 { 631 u32 fcr = 0x0, fpr = FPR_FILER_MASK; 632 633 if (ethflow & RXH_L2DA) { 634 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH | 635 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 636 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 637 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 638 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 639 priv->cur_filer_idx = priv->cur_filer_idx - 1; 640 641 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH | 642 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 643 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 644 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 645 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 646 priv->cur_filer_idx = priv->cur_filer_idx - 1; 647 } 648 649 if (ethflow & RXH_VLAN) { 650 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH | 651 RQFCR_AND | RQFCR_HASHTBL_0; 652 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 653 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 654 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 655 priv->cur_filer_idx = priv->cur_filer_idx - 1; 656 } 657 658 if (ethflow & RXH_IP_SRC) { 659 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | 660 RQFCR_AND | RQFCR_HASHTBL_0; 661 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 662 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 663 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 664 priv->cur_filer_idx = priv->cur_filer_idx - 1; 665 } 666 667 if (ethflow & (RXH_IP_DST)) { 668 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | 669 RQFCR_AND | RQFCR_HASHTBL_0; 670 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 671 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 672 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 673 priv->cur_filer_idx = priv->cur_filer_idx - 1; 674 } 675 676 if (ethflow & RXH_L3_PROTO) { 677 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH | 678 RQFCR_AND | RQFCR_HASHTBL_0; 679 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 680 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 681 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 682 priv->cur_filer_idx = priv->cur_filer_idx - 1; 683 } 684 685 if (ethflow & RXH_L4_B_0_1) { 686 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | 687 RQFCR_AND | RQFCR_HASHTBL_0; 688 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 689 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 690 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 691 priv->cur_filer_idx = priv->cur_filer_idx - 1; 692 } 693 694 if (ethflow & RXH_L4_B_2_3) { 695 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | 696 RQFCR_AND | RQFCR_HASHTBL_0; 697 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 698 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 699 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 700 priv->cur_filer_idx = priv->cur_filer_idx - 1; 701 } 702 } 703 704 static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, 705 u64 class) 706 { 707 unsigned int last_rule_idx = priv->cur_filer_idx; 708 unsigned int cmp_rqfpr; 709 unsigned int *local_rqfpr; 710 unsigned int *local_rqfcr; 711 int i = 0x0, k = 0x0; 712 int j = MAX_FILER_IDX, l = 0x0; 713 int ret = 1; 714 715 local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int), 716 GFP_KERNEL); 717 local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int), 718 GFP_KERNEL); 719 if (!local_rqfpr || !local_rqfcr) { 720 ret = 0; 721 goto err; 722 } 723 724 switch (class) { 725 case TCP_V4_FLOW: 726 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP; 727 break; 728 case UDP_V4_FLOW: 729 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP; 730 break; 731 case TCP_V6_FLOW: 732 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP; 733 break; 734 case UDP_V6_FLOW: 735 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP; 736 break; 737 default: 738 netdev_err(priv->ndev, 739 "Right now this class is not supported\n"); 740 ret = 0; 741 goto err; 742 } 743 744 for (i = 0; i < MAX_FILER_IDX + 1; i++) { 745 local_rqfpr[j] = priv->ftp_rqfpr[i]; 746 local_rqfcr[j] = priv->ftp_rqfcr[i]; 747 j--; 748 if ((priv->ftp_rqfcr[i] == 749 (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) && 750 (priv->ftp_rqfpr[i] == cmp_rqfpr)) 751 break; 752 } 753 754 if (i == MAX_FILER_IDX + 1) { 755 netdev_err(priv->ndev, 756 "No parse rule found, can't create hash rules\n"); 757 ret = 0; 758 goto err; 759 } 760 761 /* If a match was found, then it begins the starting of a cluster rule 762 * if it was already programmed, we need to overwrite these rules 763 */ 764 for (l = i+1; l < MAX_FILER_IDX; l++) { 765 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) && 766 !(priv->ftp_rqfcr[l] & RQFCR_AND)) { 767 priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT | 768 RQFCR_HASHTBL_0 | RQFCR_PID_MASK; 769 priv->ftp_rqfpr[l] = FPR_FILER_MASK; 770 gfar_write_filer(priv, l, priv->ftp_rqfcr[l], 771 priv->ftp_rqfpr[l]); 772 break; 773 } 774 775 if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) && 776 (priv->ftp_rqfcr[l] & RQFCR_AND)) 777 continue; 778 else { 779 local_rqfpr[j] = priv->ftp_rqfpr[l]; 780 local_rqfcr[j] = priv->ftp_rqfcr[l]; 781 j--; 782 } 783 } 784 785 priv->cur_filer_idx = l - 1; 786 last_rule_idx = l; 787 788 /* hash rules */ 789 ethflow_to_filer_rules(priv, ethflow); 790 791 /* Write back the popped out rules again */ 792 for (k = j+1; k < MAX_FILER_IDX; k++) { 793 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k]; 794 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k]; 795 gfar_write_filer(priv, priv->cur_filer_idx, 796 local_rqfcr[k], local_rqfpr[k]); 797 if (!priv->cur_filer_idx) 798 break; 799 priv->cur_filer_idx = priv->cur_filer_idx - 1; 800 } 801 802 err: 803 kfree(local_rqfcr); 804 kfree(local_rqfpr); 805 return ret; 806 } 807 808 static int gfar_set_hash_opts(struct gfar_private *priv, 809 struct ethtool_rxnfc *cmd) 810 { 811 /* write the filer rules here */ 812 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type)) 813 return -EINVAL; 814 815 return 0; 816 } 817 818 static int gfar_check_filer_hardware(struct gfar_private *priv) 819 { 820 struct gfar __iomem *regs = NULL; 821 u32 i; 822 823 regs = priv->gfargrp[0].regs; 824 825 /* Check if we are in FIFO mode */ 826 i = gfar_read(®s->ecntrl); 827 i &= ECNTRL_FIFM; 828 if (i == ECNTRL_FIFM) { 829 netdev_notice(priv->ndev, "Interface in FIFO mode\n"); 830 i = gfar_read(®s->rctrl); 831 i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM; 832 if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) { 833 netdev_info(priv->ndev, 834 "Receive Queue Filtering enabled\n"); 835 } else { 836 netdev_warn(priv->ndev, 837 "Receive Queue Filtering disabled\n"); 838 return -EOPNOTSUPP; 839 } 840 } 841 /* Or in standard mode */ 842 else { 843 i = gfar_read(®s->rctrl); 844 i &= RCTRL_PRSDEP_MASK; 845 if (i == RCTRL_PRSDEP_MASK) { 846 netdev_info(priv->ndev, 847 "Receive Queue Filtering enabled\n"); 848 } else { 849 netdev_warn(priv->ndev, 850 "Receive Queue Filtering disabled\n"); 851 return -EOPNOTSUPP; 852 } 853 } 854 855 /* Sets the properties for arbitrary filer rule 856 * to the first 4 Layer 4 Bytes 857 */ 858 regs->rbifx = 0xC0C1C2C3; 859 return 0; 860 } 861 862 static int gfar_comp_asc(const void *a, const void *b) 863 { 864 return memcmp(a, b, 4); 865 } 866 867 static int gfar_comp_desc(const void *a, const void *b) 868 { 869 return -memcmp(a, b, 4); 870 } 871 872 static void gfar_swap(void *a, void *b, int size) 873 { 874 u32 *_a = a; 875 u32 *_b = b; 876 877 swap(_a[0], _b[0]); 878 swap(_a[1], _b[1]); 879 swap(_a[2], _b[2]); 880 swap(_a[3], _b[3]); 881 } 882 883 /* Write a mask to filer cache */ 884 static void gfar_set_mask(u32 mask, struct filer_table *tab) 885 { 886 tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT; 887 tab->fe[tab->index].prop = mask; 888 tab->index++; 889 } 890 891 /* Sets parse bits (e.g. IP or TCP) */ 892 static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab) 893 { 894 gfar_set_mask(mask, tab); 895 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | 896 RQFCR_AND; 897 tab->fe[tab->index].prop = value; 898 tab->index++; 899 } 900 901 static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag, 902 struct filer_table *tab) 903 { 904 gfar_set_mask(mask, tab); 905 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag; 906 tab->fe[tab->index].prop = value; 907 tab->index++; 908 } 909 910 /* For setting a tuple of value and mask of type flag 911 * Example: 912 * IP-Src = 10.0.0.0/255.0.0.0 913 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4 914 * 915 * Ethtool gives us a value=0 and mask=~0 for don't care a tuple 916 * For a don't care mask it gives us a 0 917 * 918 * The check if don't care and the mask adjustment if mask=0 is done for VLAN 919 * and MAC stuff on an upper level (due to missing information on this level). 920 * For these guys we can discard them if they are value=0 and mask=0. 921 * 922 * Further the all masks are one-padded for better hardware efficiency. 923 */ 924 static void gfar_set_attribute(u32 value, u32 mask, u32 flag, 925 struct filer_table *tab) 926 { 927 switch (flag) { 928 /* 3bit */ 929 case RQFCR_PID_PRI: 930 if (!(value | mask)) 931 return; 932 mask |= RQFCR_PID_PRI_MASK; 933 break; 934 /* 8bit */ 935 case RQFCR_PID_L4P: 936 case RQFCR_PID_TOS: 937 if (!~(mask | RQFCR_PID_L4P_MASK)) 938 return; 939 if (!mask) 940 mask = ~0; 941 else 942 mask |= RQFCR_PID_L4P_MASK; 943 break; 944 /* 12bit */ 945 case RQFCR_PID_VID: 946 if (!(value | mask)) 947 return; 948 mask |= RQFCR_PID_VID_MASK; 949 break; 950 /* 16bit */ 951 case RQFCR_PID_DPT: 952 case RQFCR_PID_SPT: 953 case RQFCR_PID_ETY: 954 if (!~(mask | RQFCR_PID_PORT_MASK)) 955 return; 956 if (!mask) 957 mask = ~0; 958 else 959 mask |= RQFCR_PID_PORT_MASK; 960 break; 961 /* 24bit */ 962 case RQFCR_PID_DAH: 963 case RQFCR_PID_DAL: 964 case RQFCR_PID_SAH: 965 case RQFCR_PID_SAL: 966 if (!(value | mask)) 967 return; 968 mask |= RQFCR_PID_MAC_MASK; 969 break; 970 /* for all real 32bit masks */ 971 default: 972 if (!~mask) 973 return; 974 if (!mask) 975 mask = ~0; 976 break; 977 } 978 gfar_set_general_attribute(value, mask, flag, tab); 979 } 980 981 /* Translates value and mask for UDP, TCP or SCTP */ 982 static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value, 983 struct ethtool_tcpip4_spec *mask, 984 struct filer_table *tab) 985 { 986 gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); 987 gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); 988 gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab); 989 gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab); 990 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); 991 } 992 993 /* Translates value and mask for RAW-IP4 */ 994 static void gfar_set_user_ip(struct ethtool_usrip4_spec *value, 995 struct ethtool_usrip4_spec *mask, 996 struct filer_table *tab) 997 { 998 gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); 999 gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); 1000 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); 1001 gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab); 1002 gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB, 1003 tab); 1004 1005 } 1006 1007 /* Translates value and mask for ETHER spec */ 1008 static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask, 1009 struct filer_table *tab) 1010 { 1011 u32 upper_temp_mask = 0; 1012 u32 lower_temp_mask = 0; 1013 1014 /* Source address */ 1015 if (!is_broadcast_ether_addr(mask->h_source)) { 1016 if (is_zero_ether_addr(mask->h_source)) { 1017 upper_temp_mask = 0xFFFFFFFF; 1018 lower_temp_mask = 0xFFFFFFFF; 1019 } else { 1020 upper_temp_mask = mask->h_source[0] << 16 | 1021 mask->h_source[1] << 8 | 1022 mask->h_source[2]; 1023 lower_temp_mask = mask->h_source[3] << 16 | 1024 mask->h_source[4] << 8 | 1025 mask->h_source[5]; 1026 } 1027 /* Upper 24bit */ 1028 gfar_set_attribute(value->h_source[0] << 16 | 1029 value->h_source[1] << 8 | 1030 value->h_source[2], 1031 upper_temp_mask, RQFCR_PID_SAH, tab); 1032 /* And the same for the lower part */ 1033 gfar_set_attribute(value->h_source[3] << 16 | 1034 value->h_source[4] << 8 | 1035 value->h_source[5], 1036 lower_temp_mask, RQFCR_PID_SAL, tab); 1037 } 1038 /* Destination address */ 1039 if (!is_broadcast_ether_addr(mask->h_dest)) { 1040 /* Special for destination is limited broadcast */ 1041 if ((is_broadcast_ether_addr(value->h_dest) && 1042 is_zero_ether_addr(mask->h_dest))) { 1043 gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab); 1044 } else { 1045 if (is_zero_ether_addr(mask->h_dest)) { 1046 upper_temp_mask = 0xFFFFFFFF; 1047 lower_temp_mask = 0xFFFFFFFF; 1048 } else { 1049 upper_temp_mask = mask->h_dest[0] << 16 | 1050 mask->h_dest[1] << 8 | 1051 mask->h_dest[2]; 1052 lower_temp_mask = mask->h_dest[3] << 16 | 1053 mask->h_dest[4] << 8 | 1054 mask->h_dest[5]; 1055 } 1056 1057 /* Upper 24bit */ 1058 gfar_set_attribute(value->h_dest[0] << 16 | 1059 value->h_dest[1] << 8 | 1060 value->h_dest[2], 1061 upper_temp_mask, RQFCR_PID_DAH, tab); 1062 /* And the same for the lower part */ 1063 gfar_set_attribute(value->h_dest[3] << 16 | 1064 value->h_dest[4] << 8 | 1065 value->h_dest[5], 1066 lower_temp_mask, RQFCR_PID_DAL, tab); 1067 } 1068 } 1069 1070 gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab); 1071 } 1072 1073 /* Convert a rule to binary filter format of gianfar */ 1074 static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, 1075 struct filer_table *tab) 1076 { 1077 u32 vlan = 0, vlan_mask = 0; 1078 u32 id = 0, id_mask = 0; 1079 u32 cfi = 0, cfi_mask = 0; 1080 u32 prio = 0, prio_mask = 0; 1081 u32 old_index = tab->index; 1082 1083 /* Check if vlan is wanted */ 1084 if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) { 1085 if (!rule->m_ext.vlan_tci) 1086 rule->m_ext.vlan_tci = 0xFFFF; 1087 1088 vlan = RQFPR_VLN; 1089 vlan_mask = RQFPR_VLN; 1090 1091 /* Separate the fields */ 1092 id = rule->h_ext.vlan_tci & VLAN_VID_MASK; 1093 id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK; 1094 cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK; 1095 cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK; 1096 prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >> 1097 VLAN_PRIO_SHIFT; 1098 prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >> 1099 VLAN_PRIO_SHIFT; 1100 1101 if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) { 1102 vlan |= RQFPR_CFI; 1103 vlan_mask |= RQFPR_CFI; 1104 } else if (cfi != VLAN_TAG_PRESENT && 1105 cfi_mask == VLAN_TAG_PRESENT) { 1106 vlan_mask |= RQFPR_CFI; 1107 } 1108 } 1109 1110 switch (rule->flow_type & ~FLOW_EXT) { 1111 case TCP_V4_FLOW: 1112 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan, 1113 RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab); 1114 gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec, 1115 &rule->m_u.tcp_ip4_spec, tab); 1116 break; 1117 case UDP_V4_FLOW: 1118 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan, 1119 RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab); 1120 gfar_set_basic_ip(&rule->h_u.udp_ip4_spec, 1121 &rule->m_u.udp_ip4_spec, tab); 1122 break; 1123 case SCTP_V4_FLOW: 1124 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, 1125 tab); 1126 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab); 1127 gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u, 1128 (struct ethtool_tcpip4_spec *)&rule->m_u, 1129 tab); 1130 break; 1131 case IP_USER_FLOW: 1132 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, 1133 tab); 1134 gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u, 1135 (struct ethtool_usrip4_spec *) &rule->m_u, 1136 tab); 1137 break; 1138 case ETHER_FLOW: 1139 if (vlan) 1140 gfar_set_parse_bits(vlan, vlan_mask, tab); 1141 gfar_set_ether((struct ethhdr *) &rule->h_u, 1142 (struct ethhdr *) &rule->m_u, tab); 1143 break; 1144 default: 1145 return -1; 1146 } 1147 1148 /* Set the vlan attributes in the end */ 1149 if (vlan) { 1150 gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab); 1151 gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab); 1152 } 1153 1154 /* If there has been nothing written till now, it must be a default */ 1155 if (tab->index == old_index) { 1156 gfar_set_mask(0xFFFFFFFF, tab); 1157 tab->fe[tab->index].ctrl = 0x20; 1158 tab->fe[tab->index].prop = 0x0; 1159 tab->index++; 1160 } 1161 1162 /* Remove last AND */ 1163 tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND); 1164 1165 /* Specify which queue to use or to drop */ 1166 if (rule->ring_cookie == RX_CLS_FLOW_DISC) 1167 tab->fe[tab->index - 1].ctrl |= RQFCR_RJE; 1168 else 1169 tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10); 1170 1171 /* Only big enough entries can be clustered */ 1172 if (tab->index > (old_index + 2)) { 1173 tab->fe[old_index + 1].ctrl |= RQFCR_CLE; 1174 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE; 1175 } 1176 1177 /* In rare cases the cache can be full while there is 1178 * free space in hw 1179 */ 1180 if (tab->index > MAX_FILER_CACHE_IDX - 1) 1181 return -EBUSY; 1182 1183 return 0; 1184 } 1185 1186 /* Copy size filer entries */ 1187 static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0], 1188 struct gfar_filer_entry src[0], s32 size) 1189 { 1190 while (size > 0) { 1191 size--; 1192 dst[size].ctrl = src[size].ctrl; 1193 dst[size].prop = src[size].prop; 1194 } 1195 } 1196 1197 /* Delete the contents of the filer-table between start and end 1198 * and collapse them 1199 */ 1200 static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab) 1201 { 1202 int length; 1203 1204 if (end > MAX_FILER_CACHE_IDX || end < begin) 1205 return -EINVAL; 1206 1207 end++; 1208 length = end - begin; 1209 1210 /* Copy */ 1211 while (end < tab->index) { 1212 tab->fe[begin].ctrl = tab->fe[end].ctrl; 1213 tab->fe[begin++].prop = tab->fe[end++].prop; 1214 1215 } 1216 /* Fill up with don't cares */ 1217 while (begin < tab->index) { 1218 tab->fe[begin].ctrl = 0x60; 1219 tab->fe[begin].prop = 0xFFFFFFFF; 1220 begin++; 1221 } 1222 1223 tab->index -= length; 1224 return 0; 1225 } 1226 1227 /* Make space on the wanted location */ 1228 static int gfar_expand_filer_entries(u32 begin, u32 length, 1229 struct filer_table *tab) 1230 { 1231 if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || 1232 begin > MAX_FILER_CACHE_IDX) 1233 return -EINVAL; 1234 1235 gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]), 1236 tab->index - length + 1); 1237 1238 tab->index += length; 1239 return 0; 1240 } 1241 1242 static int gfar_get_next_cluster_start(int start, struct filer_table *tab) 1243 { 1244 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); 1245 start++) { 1246 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == 1247 (RQFCR_AND | RQFCR_CLE)) 1248 return start; 1249 } 1250 return -1; 1251 } 1252 1253 static int gfar_get_next_cluster_end(int start, struct filer_table *tab) 1254 { 1255 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); 1256 start++) { 1257 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == 1258 (RQFCR_CLE)) 1259 return start; 1260 } 1261 return -1; 1262 } 1263 1264 /* Uses hardwares clustering option to reduce 1265 * the number of filer table entries 1266 */ 1267 static void gfar_cluster_filer(struct filer_table *tab) 1268 { 1269 s32 i = -1, j, iend, jend; 1270 1271 while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) { 1272 j = i; 1273 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) { 1274 /* The cluster entries self and the previous one 1275 * (a mask) must be identical! 1276 */ 1277 if (tab->fe[i].ctrl != tab->fe[j].ctrl) 1278 break; 1279 if (tab->fe[i].prop != tab->fe[j].prop) 1280 break; 1281 if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl) 1282 break; 1283 if (tab->fe[i - 1].prop != tab->fe[j - 1].prop) 1284 break; 1285 iend = gfar_get_next_cluster_end(i, tab); 1286 jend = gfar_get_next_cluster_end(j, tab); 1287 if (jend == -1 || iend == -1) 1288 break; 1289 1290 /* First we make some free space, where our cluster 1291 * element should be. Then we copy it there and finally 1292 * delete in from its old location. 1293 */ 1294 if (gfar_expand_filer_entries(iend, (jend - j), tab) == 1295 -EINVAL) 1296 break; 1297 1298 gfar_copy_filer_entries(&(tab->fe[iend + 1]), 1299 &(tab->fe[jend + 1]), jend - j); 1300 1301 if (gfar_trim_filer_entries(jend - 1, 1302 jend + (jend - j), 1303 tab) == -EINVAL) 1304 return; 1305 1306 /* Mask out cluster bit */ 1307 tab->fe[iend].ctrl &= ~(RQFCR_CLE); 1308 } 1309 } 1310 } 1311 1312 /* Swaps the masked bits of a1<>a2 and b1<>b2 */ 1313 static void gfar_swap_bits(struct gfar_filer_entry *a1, 1314 struct gfar_filer_entry *a2, 1315 struct gfar_filer_entry *b1, 1316 struct gfar_filer_entry *b2, u32 mask) 1317 { 1318 u32 temp[4]; 1319 temp[0] = a1->ctrl & mask; 1320 temp[1] = a2->ctrl & mask; 1321 temp[2] = b1->ctrl & mask; 1322 temp[3] = b2->ctrl & mask; 1323 1324 a1->ctrl &= ~mask; 1325 a2->ctrl &= ~mask; 1326 b1->ctrl &= ~mask; 1327 b2->ctrl &= ~mask; 1328 1329 a1->ctrl |= temp[1]; 1330 a2->ctrl |= temp[0]; 1331 b1->ctrl |= temp[3]; 1332 b2->ctrl |= temp[2]; 1333 } 1334 1335 /* Generate a list consisting of masks values with their start and 1336 * end of validity and block as indicator for parts belonging 1337 * together (glued by ANDs) in mask_table 1338 */ 1339 static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table, 1340 struct filer_table *tab) 1341 { 1342 u32 i, and_index = 0, block_index = 1; 1343 1344 for (i = 0; i < tab->index; i++) { 1345 1346 /* LSByte of control = 0 sets a mask */ 1347 if (!(tab->fe[i].ctrl & 0xF)) { 1348 mask_table[and_index].mask = tab->fe[i].prop; 1349 mask_table[and_index].start = i; 1350 mask_table[and_index].block = block_index; 1351 if (and_index >= 1) 1352 mask_table[and_index - 1].end = i - 1; 1353 and_index++; 1354 } 1355 /* cluster starts and ends will be separated because they should 1356 * hold their position 1357 */ 1358 if (tab->fe[i].ctrl & RQFCR_CLE) 1359 block_index++; 1360 /* A not set AND indicates the end of a depended block */ 1361 if (!(tab->fe[i].ctrl & RQFCR_AND)) 1362 block_index++; 1363 } 1364 1365 mask_table[and_index - 1].end = i - 1; 1366 1367 return and_index; 1368 } 1369 1370 /* Sorts the entries of mask_table by the values of the masks. 1371 * Important: The 0xFF80 flags of the first and last entry of a 1372 * block must hold their position (which queue, CLusterEnable, ReJEct, 1373 * AND) 1374 */ 1375 static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, 1376 struct filer_table *temp_table, u32 and_index) 1377 { 1378 /* Pointer to compare function (_asc or _desc) */ 1379 int (*gfar_comp)(const void *, const void *); 1380 1381 u32 i, size = 0, start = 0, prev = 1; 1382 u32 old_first, old_last, new_first, new_last; 1383 1384 gfar_comp = &gfar_comp_desc; 1385 1386 for (i = 0; i < and_index; i++) { 1387 if (prev != mask_table[i].block) { 1388 old_first = mask_table[start].start + 1; 1389 old_last = mask_table[i - 1].end; 1390 sort(mask_table + start, size, 1391 sizeof(struct gfar_mask_entry), 1392 gfar_comp, &gfar_swap); 1393 1394 /* Toggle order for every block. This makes the 1395 * thing more efficient! 1396 */ 1397 if (gfar_comp == gfar_comp_desc) 1398 gfar_comp = &gfar_comp_asc; 1399 else 1400 gfar_comp = &gfar_comp_desc; 1401 1402 new_first = mask_table[start].start + 1; 1403 new_last = mask_table[i - 1].end; 1404 1405 gfar_swap_bits(&temp_table->fe[new_first], 1406 &temp_table->fe[old_first], 1407 &temp_table->fe[new_last], 1408 &temp_table->fe[old_last], 1409 RQFCR_QUEUE | RQFCR_CLE | 1410 RQFCR_RJE | RQFCR_AND); 1411 1412 start = i; 1413 size = 0; 1414 } 1415 size++; 1416 prev = mask_table[i].block; 1417 } 1418 } 1419 1420 /* Reduces the number of masks needed in the filer table to save entries 1421 * This is done by sorting the masks of a depended block. A depended block is 1422 * identified by gluing ANDs or CLE. The sorting order toggles after every 1423 * block. Of course entries in scope of a mask must change their location with 1424 * it. 1425 */ 1426 static int gfar_optimize_filer_masks(struct filer_table *tab) 1427 { 1428 struct filer_table *temp_table; 1429 struct gfar_mask_entry *mask_table; 1430 1431 u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0; 1432 s32 ret = 0; 1433 1434 /* We need a copy of the filer table because 1435 * we want to change its order 1436 */ 1437 temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL); 1438 if (temp_table == NULL) 1439 return -ENOMEM; 1440 1441 mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1, 1442 sizeof(struct gfar_mask_entry), GFP_KERNEL); 1443 1444 if (mask_table == NULL) { 1445 ret = -ENOMEM; 1446 goto end; 1447 } 1448 1449 and_index = gfar_generate_mask_table(mask_table, tab); 1450 1451 gfar_sort_mask_table(mask_table, temp_table, and_index); 1452 1453 /* Now we can copy the data from our duplicated filer table to 1454 * the real one in the order the mask table says 1455 */ 1456 for (i = 0; i < and_index; i++) { 1457 size = mask_table[i].end - mask_table[i].start + 1; 1458 gfar_copy_filer_entries(&(tab->fe[j]), 1459 &(temp_table->fe[mask_table[i].start]), size); 1460 j += size; 1461 } 1462 1463 /* And finally we just have to check for duplicated masks and drop the 1464 * second ones 1465 */ 1466 for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { 1467 if (tab->fe[i].ctrl == 0x80) { 1468 previous_mask = i++; 1469 break; 1470 } 1471 } 1472 for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { 1473 if (tab->fe[i].ctrl == 0x80) { 1474 if (tab->fe[i].prop == tab->fe[previous_mask].prop) { 1475 /* Two identical ones found! 1476 * So drop the second one! 1477 */ 1478 gfar_trim_filer_entries(i, i, tab); 1479 } else 1480 /* Not identical! */ 1481 previous_mask = i; 1482 } 1483 } 1484 1485 kfree(mask_table); 1486 end: kfree(temp_table); 1487 return ret; 1488 } 1489 1490 /* Write the bit-pattern from software's buffer to hardware registers */ 1491 static int gfar_write_filer_table(struct gfar_private *priv, 1492 struct filer_table *tab) 1493 { 1494 u32 i = 0; 1495 if (tab->index > MAX_FILER_IDX - 1) 1496 return -EBUSY; 1497 1498 /* Avoid inconsistent filer table to be processed */ 1499 lock_rx_qs(priv); 1500 1501 /* Fill regular entries */ 1502 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); 1503 i++) 1504 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); 1505 /* Fill the rest with fall-troughs */ 1506 for (; i < MAX_FILER_IDX - 1; i++) 1507 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); 1508 /* Last entry must be default accept 1509 * because that's what people expect 1510 */ 1511 gfar_write_filer(priv, i, 0x20, 0x0); 1512 1513 unlock_rx_qs(priv); 1514 1515 return 0; 1516 } 1517 1518 static int gfar_check_capability(struct ethtool_rx_flow_spec *flow, 1519 struct gfar_private *priv) 1520 { 1521 1522 if (flow->flow_type & FLOW_EXT) { 1523 if (~flow->m_ext.data[0] || ~flow->m_ext.data[1]) 1524 netdev_warn(priv->ndev, 1525 "User-specific data not supported!\n"); 1526 if (~flow->m_ext.vlan_etype) 1527 netdev_warn(priv->ndev, 1528 "VLAN-etype not supported!\n"); 1529 } 1530 if (flow->flow_type == IP_USER_FLOW) 1531 if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) 1532 netdev_warn(priv->ndev, 1533 "IP-Version differing from IPv4 not supported!\n"); 1534 1535 return 0; 1536 } 1537 1538 static int gfar_process_filer_changes(struct gfar_private *priv) 1539 { 1540 struct ethtool_flow_spec_container *j; 1541 struct filer_table *tab; 1542 s32 i = 0; 1543 s32 ret = 0; 1544 1545 /* So index is set to zero, too! */ 1546 tab = kzalloc(sizeof(*tab), GFP_KERNEL); 1547 if (tab == NULL) 1548 return -ENOMEM; 1549 1550 /* Now convert the existing filer data from flow_spec into 1551 * filer tables binary format 1552 */ 1553 list_for_each_entry(j, &priv->rx_list.list, list) { 1554 ret = gfar_convert_to_filer(&j->fs, tab); 1555 if (ret == -EBUSY) { 1556 netdev_err(priv->ndev, 1557 "Rule not added: No free space!\n"); 1558 goto end; 1559 } 1560 if (ret == -1) { 1561 netdev_err(priv->ndev, 1562 "Rule not added: Unsupported Flow-type!\n"); 1563 goto end; 1564 } 1565 } 1566 1567 i = tab->index; 1568 1569 /* Optimizations to save entries */ 1570 gfar_cluster_filer(tab); 1571 gfar_optimize_filer_masks(tab); 1572 1573 pr_debug("\tSummary:\n" 1574 "\tData on hardware: %d\n" 1575 "\tCompression rate: %d%%\n", 1576 tab->index, 100 - (100 * tab->index) / i); 1577 1578 /* Write everything to hardware */ 1579 ret = gfar_write_filer_table(priv, tab); 1580 if (ret == -EBUSY) { 1581 netdev_err(priv->ndev, "Rule not added: No free space!\n"); 1582 goto end; 1583 } 1584 1585 end: 1586 kfree(tab); 1587 return ret; 1588 } 1589 1590 static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow) 1591 { 1592 u32 i = 0; 1593 1594 for (i = 0; i < sizeof(flow->m_u); i++) 1595 flow->m_u.hdata[i] ^= 0xFF; 1596 1597 flow->m_ext.vlan_etype ^= 0xFFFF; 1598 flow->m_ext.vlan_tci ^= 0xFFFF; 1599 flow->m_ext.data[0] ^= ~0; 1600 flow->m_ext.data[1] ^= ~0; 1601 } 1602 1603 static int gfar_add_cls(struct gfar_private *priv, 1604 struct ethtool_rx_flow_spec *flow) 1605 { 1606 struct ethtool_flow_spec_container *temp, *comp; 1607 int ret = 0; 1608 1609 temp = kmalloc(sizeof(*temp), GFP_KERNEL); 1610 if (temp == NULL) 1611 return -ENOMEM; 1612 memcpy(&temp->fs, flow, sizeof(temp->fs)); 1613 1614 gfar_invert_masks(&temp->fs); 1615 ret = gfar_check_capability(&temp->fs, priv); 1616 if (ret) 1617 goto clean_mem; 1618 /* Link in the new element at the right @location */ 1619 if (list_empty(&priv->rx_list.list)) { 1620 ret = gfar_check_filer_hardware(priv); 1621 if (ret != 0) 1622 goto clean_mem; 1623 list_add(&temp->list, &priv->rx_list.list); 1624 goto process; 1625 } else { 1626 list_for_each_entry(comp, &priv->rx_list.list, list) { 1627 if (comp->fs.location > flow->location) { 1628 list_add_tail(&temp->list, &comp->list); 1629 goto process; 1630 } 1631 if (comp->fs.location == flow->location) { 1632 netdev_err(priv->ndev, 1633 "Rule not added: ID %d not free!\n", 1634 flow->location); 1635 ret = -EBUSY; 1636 goto clean_mem; 1637 } 1638 } 1639 list_add_tail(&temp->list, &priv->rx_list.list); 1640 } 1641 1642 process: 1643 ret = gfar_process_filer_changes(priv); 1644 if (ret) 1645 goto clean_list; 1646 priv->rx_list.count++; 1647 return ret; 1648 1649 clean_list: 1650 list_del(&temp->list); 1651 clean_mem: 1652 kfree(temp); 1653 return ret; 1654 } 1655 1656 static int gfar_del_cls(struct gfar_private *priv, u32 loc) 1657 { 1658 struct ethtool_flow_spec_container *comp; 1659 u32 ret = -EINVAL; 1660 1661 if (list_empty(&priv->rx_list.list)) 1662 return ret; 1663 1664 list_for_each_entry(comp, &priv->rx_list.list, list) { 1665 if (comp->fs.location == loc) { 1666 list_del(&comp->list); 1667 kfree(comp); 1668 priv->rx_list.count--; 1669 gfar_process_filer_changes(priv); 1670 ret = 0; 1671 break; 1672 } 1673 } 1674 1675 return ret; 1676 } 1677 1678 static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd) 1679 { 1680 struct ethtool_flow_spec_container *comp; 1681 u32 ret = -EINVAL; 1682 1683 list_for_each_entry(comp, &priv->rx_list.list, list) { 1684 if (comp->fs.location == cmd->fs.location) { 1685 memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs)); 1686 gfar_invert_masks(&cmd->fs); 1687 ret = 0; 1688 break; 1689 } 1690 } 1691 1692 return ret; 1693 } 1694 1695 static int gfar_get_cls_all(struct gfar_private *priv, 1696 struct ethtool_rxnfc *cmd, u32 *rule_locs) 1697 { 1698 struct ethtool_flow_spec_container *comp; 1699 u32 i = 0; 1700 1701 list_for_each_entry(comp, &priv->rx_list.list, list) { 1702 if (i == cmd->rule_cnt) 1703 return -EMSGSIZE; 1704 rule_locs[i] = comp->fs.location; 1705 i++; 1706 } 1707 1708 cmd->data = MAX_FILER_IDX; 1709 cmd->rule_cnt = i; 1710 1711 return 0; 1712 } 1713 1714 static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1715 { 1716 struct gfar_private *priv = netdev_priv(dev); 1717 int ret = 0; 1718 1719 mutex_lock(&priv->rx_queue_access); 1720 1721 switch (cmd->cmd) { 1722 case ETHTOOL_SRXFH: 1723 ret = gfar_set_hash_opts(priv, cmd); 1724 break; 1725 case ETHTOOL_SRXCLSRLINS: 1726 if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC && 1727 cmd->fs.ring_cookie >= priv->num_rx_queues) || 1728 cmd->fs.location >= MAX_FILER_IDX) { 1729 ret = -EINVAL; 1730 break; 1731 } 1732 ret = gfar_add_cls(priv, &cmd->fs); 1733 break; 1734 case ETHTOOL_SRXCLSRLDEL: 1735 ret = gfar_del_cls(priv, cmd->fs.location); 1736 break; 1737 default: 1738 ret = -EINVAL; 1739 } 1740 1741 mutex_unlock(&priv->rx_queue_access); 1742 1743 return ret; 1744 } 1745 1746 static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1747 u32 *rule_locs) 1748 { 1749 struct gfar_private *priv = netdev_priv(dev); 1750 int ret = 0; 1751 1752 switch (cmd->cmd) { 1753 case ETHTOOL_GRXRINGS: 1754 cmd->data = priv->num_rx_queues; 1755 break; 1756 case ETHTOOL_GRXCLSRLCNT: 1757 cmd->rule_cnt = priv->rx_list.count; 1758 break; 1759 case ETHTOOL_GRXCLSRULE: 1760 ret = gfar_get_cls(priv, cmd); 1761 break; 1762 case ETHTOOL_GRXCLSRLALL: 1763 ret = gfar_get_cls_all(priv, cmd, rule_locs); 1764 break; 1765 default: 1766 ret = -EINVAL; 1767 break; 1768 } 1769 1770 return ret; 1771 } 1772 1773 int gfar_phc_index = -1; 1774 EXPORT_SYMBOL(gfar_phc_index); 1775 1776 static int gfar_get_ts_info(struct net_device *dev, 1777 struct ethtool_ts_info *info) 1778 { 1779 struct gfar_private *priv = netdev_priv(dev); 1780 1781 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) { 1782 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | 1783 SOF_TIMESTAMPING_SOFTWARE; 1784 info->phc_index = -1; 1785 return 0; 1786 } 1787 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | 1788 SOF_TIMESTAMPING_RX_HARDWARE | 1789 SOF_TIMESTAMPING_RAW_HARDWARE; 1790 info->phc_index = gfar_phc_index; 1791 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 1792 (1 << HWTSTAMP_TX_ON); 1793 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 1794 (1 << HWTSTAMP_FILTER_ALL); 1795 return 0; 1796 } 1797 1798 const struct ethtool_ops gfar_ethtool_ops = { 1799 .get_settings = gfar_gsettings, 1800 .set_settings = gfar_ssettings, 1801 .get_drvinfo = gfar_gdrvinfo, 1802 .get_regs_len = gfar_reglen, 1803 .get_regs = gfar_get_regs, 1804 .get_link = ethtool_op_get_link, 1805 .get_coalesce = gfar_gcoalesce, 1806 .set_coalesce = gfar_scoalesce, 1807 .get_ringparam = gfar_gringparam, 1808 .set_ringparam = gfar_sringparam, 1809 .get_strings = gfar_gstrings, 1810 .get_sset_count = gfar_sset_count, 1811 .get_ethtool_stats = gfar_fill_stats, 1812 .get_msglevel = gfar_get_msglevel, 1813 .set_msglevel = gfar_set_msglevel, 1814 #ifdef CONFIG_PM 1815 .get_wol = gfar_get_wol, 1816 .set_wol = gfar_set_wol, 1817 #endif 1818 .set_rxnfc = gfar_set_nfc, 1819 .get_rxnfc = gfar_get_nfc, 1820 .get_ts_info = gfar_get_ts_info, 1821 }; 1822