1 /* 2 * New driver for Marvell Yukon chipset and SysKonnect Gigabit 3 * Ethernet adapters. Based on earlier sk98lin, e100 and 4 * FreeBSD if_sk drivers. 5 * 6 * This driver intentionally does not support all the features 7 * of the original driver such as link fail-over and link management because 8 * those should be done at higher levels. 9 * 10 * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 24 */ 25 26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 27 28 #include <linux/in.h> 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/moduleparam.h> 32 #include <linux/netdevice.h> 33 #include <linux/etherdevice.h> 34 #include <linux/ethtool.h> 35 #include <linux/pci.h> 36 #include <linux/if_vlan.h> 37 #include <linux/ip.h> 38 #include <linux/delay.h> 39 #include <linux/crc32.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/debugfs.h> 42 #include <linux/sched.h> 43 #include <linux/seq_file.h> 44 #include <linux/mii.h> 45 #include <linux/slab.h> 46 #include <linux/dmi.h> 47 #include <linux/prefetch.h> 48 #include <asm/irq.h> 49 50 #include "skge.h" 51 52 #define DRV_NAME "skge" 53 #define DRV_VERSION "1.14" 54 55 #define DEFAULT_TX_RING_SIZE 128 56 #define DEFAULT_RX_RING_SIZE 512 57 #define MAX_TX_RING_SIZE 1024 58 #define TX_LOW_WATER (MAX_SKB_FRAGS + 1) 59 #define MAX_RX_RING_SIZE 4096 60 #define RX_COPY_THRESHOLD 128 61 #define RX_BUF_SIZE 1536 62 #define PHY_RETRIES 1000 63 #define ETH_JUMBO_MTU 9000 64 #define TX_WATCHDOG (5 * HZ) 65 #define NAPI_WEIGHT 64 66 #define BLINK_MS 250 67 #define LINK_HZ HZ 68 69 #define SKGE_EEPROM_MAGIC 0x9933aabb 70 71 72 MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); 73 MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); 74 MODULE_LICENSE("GPL"); 75 MODULE_VERSION(DRV_VERSION); 76 77 static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 78 NETIF_MSG_LINK | NETIF_MSG_IFUP | 79 NETIF_MSG_IFDOWN); 80 81 static int debug = -1; /* defaults above */ 82 module_param(debug, int, 0); 83 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 84 85 static const struct pci_device_id skge_id_table[] = { 86 { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x1700) }, /* 3Com 3C940 */ 87 { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x80EB) }, /* 3Com 3C940B */ 88 #ifdef CONFIG_SKGE_GENESIS 89 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */ 90 #endif 91 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */ 92 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */ 93 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */ 94 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */ 95 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, /* Marvell Yukon 88E8001/8003/8010 */ 96 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ 97 { PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */ 98 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, 0x1064) }, /* Linksys EG1064 v2 */ 99 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, /* Linksys EG1032 v2 */ 100 { 0 } 101 }; 102 MODULE_DEVICE_TABLE(pci, skge_id_table); 103 104 static int skge_up(struct net_device *dev); 105 static int skge_down(struct net_device *dev); 106 static void skge_phy_reset(struct skge_port *skge); 107 static void skge_tx_clean(struct net_device *dev); 108 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 109 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 110 static void genesis_get_stats(struct skge_port *skge, u64 *data); 111 static void yukon_get_stats(struct skge_port *skge, u64 *data); 112 static void yukon_init(struct skge_hw *hw, int port); 113 static void genesis_mac_init(struct skge_hw *hw, int port); 114 static void genesis_link_up(struct skge_port *skge); 115 static void skge_set_multicast(struct net_device *dev); 116 static irqreturn_t skge_intr(int irq, void *dev_id); 117 118 /* Avoid conditionals by using array */ 119 static const int txqaddr[] = { Q_XA1, Q_XA2 }; 120 static const int rxqaddr[] = { Q_R1, Q_R2 }; 121 static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; 122 static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 123 static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; 124 static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 }; 125 126 static inline bool is_genesis(const struct skge_hw *hw) 127 { 128 #ifdef CONFIG_SKGE_GENESIS 129 return hw->chip_id == CHIP_ID_GENESIS; 130 #else 131 return false; 132 #endif 133 } 134 135 static int skge_get_regs_len(struct net_device *dev) 136 { 137 return 0x4000; 138 } 139 140 /* 141 * Returns copy of whole control register region 142 * Note: skip RAM address register because accessing it will 143 * cause bus hangs! 144 */ 145 static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, 146 void *p) 147 { 148 const struct skge_port *skge = netdev_priv(dev); 149 const void __iomem *io = skge->hw->regs; 150 151 regs->version = 1; 152 memset(p, 0, regs->len); 153 memcpy_fromio(p, io, B3_RAM_ADDR); 154 155 if (regs->len > B3_RI_WTO_R1) { 156 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, 157 regs->len - B3_RI_WTO_R1); 158 } 159 } 160 161 /* Wake on Lan only supported on Yukon chips with rev 1 or above */ 162 static u32 wol_supported(const struct skge_hw *hw) 163 { 164 if (is_genesis(hw)) 165 return 0; 166 167 if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) 168 return 0; 169 170 return WAKE_MAGIC | WAKE_PHY; 171 } 172 173 static void skge_wol_init(struct skge_port *skge) 174 { 175 struct skge_hw *hw = skge->hw; 176 int port = skge->port; 177 u16 ctrl; 178 179 skge_write16(hw, B0_CTST, CS_RST_CLR); 180 skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); 181 182 /* Turn on Vaux */ 183 skge_write8(hw, B0_POWER_CTRL, 184 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 185 186 /* WA code for COMA mode -- clear PHY reset */ 187 if (hw->chip_id == CHIP_ID_YUKON_LITE && 188 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { 189 u32 reg = skge_read32(hw, B2_GP_IO); 190 reg |= GP_DIR_9; 191 reg &= ~GP_IO_9; 192 skge_write32(hw, B2_GP_IO, reg); 193 } 194 195 skge_write32(hw, SK_REG(port, GPHY_CTRL), 196 GPC_DIS_SLEEP | 197 GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | 198 GPC_ANEG_1 | GPC_RST_SET); 199 200 skge_write32(hw, SK_REG(port, GPHY_CTRL), 201 GPC_DIS_SLEEP | 202 GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | 203 GPC_ANEG_1 | GPC_RST_CLR); 204 205 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); 206 207 /* Force to 10/100 skge_reset will re-enable on resume */ 208 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, 209 (PHY_AN_100FULL | PHY_AN_100HALF | 210 PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA)); 211 /* no 1000 HD/FD */ 212 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0); 213 gm_phy_write(hw, port, PHY_MARV_CTRL, 214 PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE | 215 PHY_CT_RE_CFG | PHY_CT_DUP_MD); 216 217 218 /* Set GMAC to no flow control and auto update for speed/duplex */ 219 gma_write16(hw, port, GM_GP_CTRL, 220 GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| 221 GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); 222 223 /* Set WOL address */ 224 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), 225 skge->netdev->dev_addr, ETH_ALEN); 226 227 /* Turn on appropriate WOL control bits */ 228 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); 229 ctrl = 0; 230 if (skge->wol & WAKE_PHY) 231 ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; 232 else 233 ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; 234 235 if (skge->wol & WAKE_MAGIC) 236 ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; 237 else 238 ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; 239 240 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; 241 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); 242 243 /* block receiver */ 244 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 245 } 246 247 static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 248 { 249 struct skge_port *skge = netdev_priv(dev); 250 251 wol->supported = wol_supported(skge->hw); 252 wol->wolopts = skge->wol; 253 } 254 255 static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 256 { 257 struct skge_port *skge = netdev_priv(dev); 258 struct skge_hw *hw = skge->hw; 259 260 if ((wol->wolopts & ~wol_supported(hw)) || 261 !device_can_wakeup(&hw->pdev->dev)) 262 return -EOPNOTSUPP; 263 264 skge->wol = wol->wolopts; 265 266 device_set_wakeup_enable(&hw->pdev->dev, skge->wol); 267 268 return 0; 269 } 270 271 /* Determine supported/advertised modes based on hardware. 272 * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx 273 */ 274 static u32 skge_supported_modes(const struct skge_hw *hw) 275 { 276 u32 supported; 277 278 if (hw->copper) { 279 supported = (SUPPORTED_10baseT_Half | 280 SUPPORTED_10baseT_Full | 281 SUPPORTED_100baseT_Half | 282 SUPPORTED_100baseT_Full | 283 SUPPORTED_1000baseT_Half | 284 SUPPORTED_1000baseT_Full | 285 SUPPORTED_Autoneg | 286 SUPPORTED_TP); 287 288 if (is_genesis(hw)) 289 supported &= ~(SUPPORTED_10baseT_Half | 290 SUPPORTED_10baseT_Full | 291 SUPPORTED_100baseT_Half | 292 SUPPORTED_100baseT_Full); 293 294 else if (hw->chip_id == CHIP_ID_YUKON) 295 supported &= ~SUPPORTED_1000baseT_Half; 296 } else 297 supported = (SUPPORTED_1000baseT_Full | 298 SUPPORTED_1000baseT_Half | 299 SUPPORTED_FIBRE | 300 SUPPORTED_Autoneg); 301 302 return supported; 303 } 304 305 static int skge_get_link_ksettings(struct net_device *dev, 306 struct ethtool_link_ksettings *cmd) 307 { 308 struct skge_port *skge = netdev_priv(dev); 309 struct skge_hw *hw = skge->hw; 310 u32 supported, advertising; 311 312 supported = skge_supported_modes(hw); 313 314 if (hw->copper) { 315 cmd->base.port = PORT_TP; 316 cmd->base.phy_address = hw->phy_addr; 317 } else 318 cmd->base.port = PORT_FIBRE; 319 320 advertising = skge->advertising; 321 cmd->base.autoneg = skge->autoneg; 322 cmd->base.speed = skge->speed; 323 cmd->base.duplex = skge->duplex; 324 325 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 326 supported); 327 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 328 advertising); 329 330 return 0; 331 } 332 333 static int skge_set_link_ksettings(struct net_device *dev, 334 const struct ethtool_link_ksettings *cmd) 335 { 336 struct skge_port *skge = netdev_priv(dev); 337 const struct skge_hw *hw = skge->hw; 338 u32 supported = skge_supported_modes(hw); 339 int err = 0; 340 u32 advertising; 341 342 ethtool_convert_link_mode_to_legacy_u32(&advertising, 343 cmd->link_modes.advertising); 344 345 if (cmd->base.autoneg == AUTONEG_ENABLE) { 346 advertising = supported; 347 skge->duplex = -1; 348 skge->speed = -1; 349 } else { 350 u32 setting; 351 u32 speed = cmd->base.speed; 352 353 switch (speed) { 354 case SPEED_1000: 355 if (cmd->base.duplex == DUPLEX_FULL) 356 setting = SUPPORTED_1000baseT_Full; 357 else if (cmd->base.duplex == DUPLEX_HALF) 358 setting = SUPPORTED_1000baseT_Half; 359 else 360 return -EINVAL; 361 break; 362 case SPEED_100: 363 if (cmd->base.duplex == DUPLEX_FULL) 364 setting = SUPPORTED_100baseT_Full; 365 else if (cmd->base.duplex == DUPLEX_HALF) 366 setting = SUPPORTED_100baseT_Half; 367 else 368 return -EINVAL; 369 break; 370 371 case SPEED_10: 372 if (cmd->base.duplex == DUPLEX_FULL) 373 setting = SUPPORTED_10baseT_Full; 374 else if (cmd->base.duplex == DUPLEX_HALF) 375 setting = SUPPORTED_10baseT_Half; 376 else 377 return -EINVAL; 378 break; 379 default: 380 return -EINVAL; 381 } 382 383 if ((setting & supported) == 0) 384 return -EINVAL; 385 386 skge->speed = speed; 387 skge->duplex = cmd->base.duplex; 388 } 389 390 skge->autoneg = cmd->base.autoneg; 391 skge->advertising = advertising; 392 393 if (netif_running(dev)) { 394 skge_down(dev); 395 err = skge_up(dev); 396 if (err) { 397 dev_close(dev); 398 return err; 399 } 400 } 401 402 return 0; 403 } 404 405 static void skge_get_drvinfo(struct net_device *dev, 406 struct ethtool_drvinfo *info) 407 { 408 struct skge_port *skge = netdev_priv(dev); 409 410 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 411 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 412 strlcpy(info->bus_info, pci_name(skge->hw->pdev), 413 sizeof(info->bus_info)); 414 } 415 416 static const struct skge_stat { 417 char name[ETH_GSTRING_LEN]; 418 u16 xmac_offset; 419 u16 gma_offset; 420 } skge_stats[] = { 421 { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI }, 422 { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI }, 423 424 { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK }, 425 { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK }, 426 { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK }, 427 { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK }, 428 { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK }, 429 { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK }, 430 { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE }, 431 { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE }, 432 433 { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL }, 434 { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL }, 435 { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL }, 436 { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL }, 437 { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR }, 438 { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV }, 439 440 { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, 441 { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT }, 442 { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG }, 443 { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, 444 { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR }, 445 }; 446 447 static int skge_get_sset_count(struct net_device *dev, int sset) 448 { 449 switch (sset) { 450 case ETH_SS_STATS: 451 return ARRAY_SIZE(skge_stats); 452 default: 453 return -EOPNOTSUPP; 454 } 455 } 456 457 static void skge_get_ethtool_stats(struct net_device *dev, 458 struct ethtool_stats *stats, u64 *data) 459 { 460 struct skge_port *skge = netdev_priv(dev); 461 462 if (is_genesis(skge->hw)) 463 genesis_get_stats(skge, data); 464 else 465 yukon_get_stats(skge, data); 466 } 467 468 /* Use hardware MIB variables for critical path statistics and 469 * transmit feedback not reported at interrupt. 470 * Other errors are accounted for in interrupt handler. 471 */ 472 static struct net_device_stats *skge_get_stats(struct net_device *dev) 473 { 474 struct skge_port *skge = netdev_priv(dev); 475 u64 data[ARRAY_SIZE(skge_stats)]; 476 477 if (is_genesis(skge->hw)) 478 genesis_get_stats(skge, data); 479 else 480 yukon_get_stats(skge, data); 481 482 dev->stats.tx_bytes = data[0]; 483 dev->stats.rx_bytes = data[1]; 484 dev->stats.tx_packets = data[2] + data[4] + data[6]; 485 dev->stats.rx_packets = data[3] + data[5] + data[7]; 486 dev->stats.multicast = data[3] + data[5]; 487 dev->stats.collisions = data[10]; 488 dev->stats.tx_aborted_errors = data[12]; 489 490 return &dev->stats; 491 } 492 493 static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) 494 { 495 int i; 496 497 switch (stringset) { 498 case ETH_SS_STATS: 499 for (i = 0; i < ARRAY_SIZE(skge_stats); i++) 500 memcpy(data + i * ETH_GSTRING_LEN, 501 skge_stats[i].name, ETH_GSTRING_LEN); 502 break; 503 } 504 } 505 506 static void skge_get_ring_param(struct net_device *dev, 507 struct ethtool_ringparam *p) 508 { 509 struct skge_port *skge = netdev_priv(dev); 510 511 p->rx_max_pending = MAX_RX_RING_SIZE; 512 p->tx_max_pending = MAX_TX_RING_SIZE; 513 514 p->rx_pending = skge->rx_ring.count; 515 p->tx_pending = skge->tx_ring.count; 516 } 517 518 static int skge_set_ring_param(struct net_device *dev, 519 struct ethtool_ringparam *p) 520 { 521 struct skge_port *skge = netdev_priv(dev); 522 int err = 0; 523 524 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || 525 p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE) 526 return -EINVAL; 527 528 skge->rx_ring.count = p->rx_pending; 529 skge->tx_ring.count = p->tx_pending; 530 531 if (netif_running(dev)) { 532 skge_down(dev); 533 err = skge_up(dev); 534 if (err) 535 dev_close(dev); 536 } 537 538 return err; 539 } 540 541 static u32 skge_get_msglevel(struct net_device *netdev) 542 { 543 struct skge_port *skge = netdev_priv(netdev); 544 return skge->msg_enable; 545 } 546 547 static void skge_set_msglevel(struct net_device *netdev, u32 value) 548 { 549 struct skge_port *skge = netdev_priv(netdev); 550 skge->msg_enable = value; 551 } 552 553 static int skge_nway_reset(struct net_device *dev) 554 { 555 struct skge_port *skge = netdev_priv(dev); 556 557 if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev)) 558 return -EINVAL; 559 560 skge_phy_reset(skge); 561 return 0; 562 } 563 564 static void skge_get_pauseparam(struct net_device *dev, 565 struct ethtool_pauseparam *ecmd) 566 { 567 struct skge_port *skge = netdev_priv(dev); 568 569 ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) || 570 (skge->flow_control == FLOW_MODE_SYM_OR_REM)); 571 ecmd->tx_pause = (ecmd->rx_pause || 572 (skge->flow_control == FLOW_MODE_LOC_SEND)); 573 574 ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause; 575 } 576 577 static int skge_set_pauseparam(struct net_device *dev, 578 struct ethtool_pauseparam *ecmd) 579 { 580 struct skge_port *skge = netdev_priv(dev); 581 struct ethtool_pauseparam old; 582 int err = 0; 583 584 skge_get_pauseparam(dev, &old); 585 586 if (ecmd->autoneg != old.autoneg) 587 skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC; 588 else { 589 if (ecmd->rx_pause && ecmd->tx_pause) 590 skge->flow_control = FLOW_MODE_SYMMETRIC; 591 else if (ecmd->rx_pause && !ecmd->tx_pause) 592 skge->flow_control = FLOW_MODE_SYM_OR_REM; 593 else if (!ecmd->rx_pause && ecmd->tx_pause) 594 skge->flow_control = FLOW_MODE_LOC_SEND; 595 else 596 skge->flow_control = FLOW_MODE_NONE; 597 } 598 599 if (netif_running(dev)) { 600 skge_down(dev); 601 err = skge_up(dev); 602 if (err) { 603 dev_close(dev); 604 return err; 605 } 606 } 607 608 return 0; 609 } 610 611 /* Chip internal frequency for clock calculations */ 612 static inline u32 hwkhz(const struct skge_hw *hw) 613 { 614 return is_genesis(hw) ? 53125 : 78125; 615 } 616 617 /* Chip HZ to microseconds */ 618 static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) 619 { 620 return (ticks * 1000) / hwkhz(hw); 621 } 622 623 /* Microseconds to chip HZ */ 624 static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) 625 { 626 return hwkhz(hw) * usec / 1000; 627 } 628 629 static int skge_get_coalesce(struct net_device *dev, 630 struct ethtool_coalesce *ecmd) 631 { 632 struct skge_port *skge = netdev_priv(dev); 633 struct skge_hw *hw = skge->hw; 634 int port = skge->port; 635 636 ecmd->rx_coalesce_usecs = 0; 637 ecmd->tx_coalesce_usecs = 0; 638 639 if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) { 640 u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI)); 641 u32 msk = skge_read32(hw, B2_IRQM_MSK); 642 643 if (msk & rxirqmask[port]) 644 ecmd->rx_coalesce_usecs = delay; 645 if (msk & txirqmask[port]) 646 ecmd->tx_coalesce_usecs = delay; 647 } 648 649 return 0; 650 } 651 652 /* Note: interrupt timer is per board, but can turn on/off per port */ 653 static int skge_set_coalesce(struct net_device *dev, 654 struct ethtool_coalesce *ecmd) 655 { 656 struct skge_port *skge = netdev_priv(dev); 657 struct skge_hw *hw = skge->hw; 658 int port = skge->port; 659 u32 msk = skge_read32(hw, B2_IRQM_MSK); 660 u32 delay = 25; 661 662 if (ecmd->rx_coalesce_usecs == 0) 663 msk &= ~rxirqmask[port]; 664 else if (ecmd->rx_coalesce_usecs < 25 || 665 ecmd->rx_coalesce_usecs > 33333) 666 return -EINVAL; 667 else { 668 msk |= rxirqmask[port]; 669 delay = ecmd->rx_coalesce_usecs; 670 } 671 672 if (ecmd->tx_coalesce_usecs == 0) 673 msk &= ~txirqmask[port]; 674 else if (ecmd->tx_coalesce_usecs < 25 || 675 ecmd->tx_coalesce_usecs > 33333) 676 return -EINVAL; 677 else { 678 msk |= txirqmask[port]; 679 delay = min(delay, ecmd->rx_coalesce_usecs); 680 } 681 682 skge_write32(hw, B2_IRQM_MSK, msk); 683 if (msk == 0) 684 skge_write32(hw, B2_IRQM_CTRL, TIM_STOP); 685 else { 686 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay)); 687 skge_write32(hw, B2_IRQM_CTRL, TIM_START); 688 } 689 return 0; 690 } 691 692 enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST }; 693 static void skge_led(struct skge_port *skge, enum led_mode mode) 694 { 695 struct skge_hw *hw = skge->hw; 696 int port = skge->port; 697 698 spin_lock_bh(&hw->phy_lock); 699 if (is_genesis(hw)) { 700 switch (mode) { 701 case LED_MODE_OFF: 702 if (hw->phy_type == SK_PHY_BCOM) 703 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF); 704 else { 705 skge_write32(hw, SK_REG(port, TX_LED_VAL), 0); 706 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF); 707 } 708 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); 709 skge_write32(hw, SK_REG(port, RX_LED_VAL), 0); 710 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF); 711 break; 712 713 case LED_MODE_ON: 714 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON); 715 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); 716 717 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); 718 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); 719 720 break; 721 722 case LED_MODE_TST: 723 skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON); 724 skge_write32(hw, SK_REG(port, RX_LED_VAL), 100); 725 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); 726 727 if (hw->phy_type == SK_PHY_BCOM) 728 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON); 729 else { 730 skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON); 731 skge_write32(hw, SK_REG(port, TX_LED_VAL), 100); 732 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); 733 } 734 735 } 736 } else { 737 switch (mode) { 738 case LED_MODE_OFF: 739 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); 740 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 741 PHY_M_LED_MO_DUP(MO_LED_OFF) | 742 PHY_M_LED_MO_10(MO_LED_OFF) | 743 PHY_M_LED_MO_100(MO_LED_OFF) | 744 PHY_M_LED_MO_1000(MO_LED_OFF) | 745 PHY_M_LED_MO_RX(MO_LED_OFF)); 746 break; 747 case LED_MODE_ON: 748 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 749 PHY_M_LED_PULS_DUR(PULS_170MS) | 750 PHY_M_LED_BLINK_RT(BLINK_84MS) | 751 PHY_M_LEDC_TX_CTRL | 752 PHY_M_LEDC_DP_CTRL); 753 754 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 755 PHY_M_LED_MO_RX(MO_LED_OFF) | 756 (skge->speed == SPEED_100 ? 757 PHY_M_LED_MO_100(MO_LED_ON) : 0)); 758 break; 759 case LED_MODE_TST: 760 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); 761 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 762 PHY_M_LED_MO_DUP(MO_LED_ON) | 763 PHY_M_LED_MO_10(MO_LED_ON) | 764 PHY_M_LED_MO_100(MO_LED_ON) | 765 PHY_M_LED_MO_1000(MO_LED_ON) | 766 PHY_M_LED_MO_RX(MO_LED_ON)); 767 } 768 } 769 spin_unlock_bh(&hw->phy_lock); 770 } 771 772 /* blink LED's for finding board */ 773 static int skge_set_phys_id(struct net_device *dev, 774 enum ethtool_phys_id_state state) 775 { 776 struct skge_port *skge = netdev_priv(dev); 777 778 switch (state) { 779 case ETHTOOL_ID_ACTIVE: 780 return 2; /* cycle on/off twice per second */ 781 782 case ETHTOOL_ID_ON: 783 skge_led(skge, LED_MODE_TST); 784 break; 785 786 case ETHTOOL_ID_OFF: 787 skge_led(skge, LED_MODE_OFF); 788 break; 789 790 case ETHTOOL_ID_INACTIVE: 791 /* back to regular LED state */ 792 skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF); 793 } 794 795 return 0; 796 } 797 798 static int skge_get_eeprom_len(struct net_device *dev) 799 { 800 struct skge_port *skge = netdev_priv(dev); 801 u32 reg2; 802 803 pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, ®2); 804 return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); 805 } 806 807 static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset) 808 { 809 u32 val; 810 811 pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); 812 813 do { 814 pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); 815 } while (!(offset & PCI_VPD_ADDR_F)); 816 817 pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); 818 return val; 819 } 820 821 static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) 822 { 823 pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val); 824 pci_write_config_word(pdev, cap + PCI_VPD_ADDR, 825 offset | PCI_VPD_ADDR_F); 826 827 do { 828 pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); 829 } while (offset & PCI_VPD_ADDR_F); 830 } 831 832 static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 833 u8 *data) 834 { 835 struct skge_port *skge = netdev_priv(dev); 836 struct pci_dev *pdev = skge->hw->pdev; 837 int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); 838 int length = eeprom->len; 839 u16 offset = eeprom->offset; 840 841 if (!cap) 842 return -EINVAL; 843 844 eeprom->magic = SKGE_EEPROM_MAGIC; 845 846 while (length > 0) { 847 u32 val = skge_vpd_read(pdev, cap, offset); 848 int n = min_t(int, length, sizeof(val)); 849 850 memcpy(data, &val, n); 851 length -= n; 852 data += n; 853 offset += n; 854 } 855 return 0; 856 } 857 858 static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 859 u8 *data) 860 { 861 struct skge_port *skge = netdev_priv(dev); 862 struct pci_dev *pdev = skge->hw->pdev; 863 int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); 864 int length = eeprom->len; 865 u16 offset = eeprom->offset; 866 867 if (!cap) 868 return -EINVAL; 869 870 if (eeprom->magic != SKGE_EEPROM_MAGIC) 871 return -EINVAL; 872 873 while (length > 0) { 874 u32 val; 875 int n = min_t(int, length, sizeof(val)); 876 877 if (n < sizeof(val)) 878 val = skge_vpd_read(pdev, cap, offset); 879 memcpy(&val, data, n); 880 881 skge_vpd_write(pdev, cap, offset, val); 882 883 length -= n; 884 data += n; 885 offset += n; 886 } 887 return 0; 888 } 889 890 static const struct ethtool_ops skge_ethtool_ops = { 891 .get_drvinfo = skge_get_drvinfo, 892 .get_regs_len = skge_get_regs_len, 893 .get_regs = skge_get_regs, 894 .get_wol = skge_get_wol, 895 .set_wol = skge_set_wol, 896 .get_msglevel = skge_get_msglevel, 897 .set_msglevel = skge_set_msglevel, 898 .nway_reset = skge_nway_reset, 899 .get_link = ethtool_op_get_link, 900 .get_eeprom_len = skge_get_eeprom_len, 901 .get_eeprom = skge_get_eeprom, 902 .set_eeprom = skge_set_eeprom, 903 .get_ringparam = skge_get_ring_param, 904 .set_ringparam = skge_set_ring_param, 905 .get_pauseparam = skge_get_pauseparam, 906 .set_pauseparam = skge_set_pauseparam, 907 .get_coalesce = skge_get_coalesce, 908 .set_coalesce = skge_set_coalesce, 909 .get_strings = skge_get_strings, 910 .set_phys_id = skge_set_phys_id, 911 .get_sset_count = skge_get_sset_count, 912 .get_ethtool_stats = skge_get_ethtool_stats, 913 .get_link_ksettings = skge_get_link_ksettings, 914 .set_link_ksettings = skge_set_link_ksettings, 915 }; 916 917 /* 918 * Allocate ring elements and chain them together 919 * One-to-one association of board descriptors with ring elements 920 */ 921 static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) 922 { 923 struct skge_tx_desc *d; 924 struct skge_element *e; 925 int i; 926 927 ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL); 928 if (!ring->start) 929 return -ENOMEM; 930 931 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { 932 e->desc = d; 933 if (i == ring->count - 1) { 934 e->next = ring->start; 935 d->next_offset = base; 936 } else { 937 e->next = e + 1; 938 d->next_offset = base + (i+1) * sizeof(*d); 939 } 940 } 941 ring->to_use = ring->to_clean = ring->start; 942 943 return 0; 944 } 945 946 /* Allocate and setup a new buffer for receiving */ 947 static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, 948 struct sk_buff *skb, unsigned int bufsize) 949 { 950 struct skge_rx_desc *rd = e->desc; 951 dma_addr_t map; 952 953 map = pci_map_single(skge->hw->pdev, skb->data, bufsize, 954 PCI_DMA_FROMDEVICE); 955 956 if (pci_dma_mapping_error(skge->hw->pdev, map)) 957 return -1; 958 959 rd->dma_lo = lower_32_bits(map); 960 rd->dma_hi = upper_32_bits(map); 961 e->skb = skb; 962 rd->csum1_start = ETH_HLEN; 963 rd->csum2_start = ETH_HLEN; 964 rd->csum1 = 0; 965 rd->csum2 = 0; 966 967 wmb(); 968 969 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 970 dma_unmap_addr_set(e, mapaddr, map); 971 dma_unmap_len_set(e, maplen, bufsize); 972 return 0; 973 } 974 975 /* Resume receiving using existing skb, 976 * Note: DMA address is not changed by chip. 977 * MTU not changed while receiver active. 978 */ 979 static inline void skge_rx_reuse(struct skge_element *e, unsigned int size) 980 { 981 struct skge_rx_desc *rd = e->desc; 982 983 rd->csum2 = 0; 984 rd->csum2_start = ETH_HLEN; 985 986 wmb(); 987 988 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size; 989 } 990 991 992 /* Free all buffers in receive ring, assumes receiver stopped */ 993 static void skge_rx_clean(struct skge_port *skge) 994 { 995 struct skge_hw *hw = skge->hw; 996 struct skge_ring *ring = &skge->rx_ring; 997 struct skge_element *e; 998 999 e = ring->start; 1000 do { 1001 struct skge_rx_desc *rd = e->desc; 1002 rd->control = 0; 1003 if (e->skb) { 1004 pci_unmap_single(hw->pdev, 1005 dma_unmap_addr(e, mapaddr), 1006 dma_unmap_len(e, maplen), 1007 PCI_DMA_FROMDEVICE); 1008 dev_kfree_skb(e->skb); 1009 e->skb = NULL; 1010 } 1011 } while ((e = e->next) != ring->start); 1012 } 1013 1014 1015 /* Allocate buffers for receive ring 1016 * For receive: to_clean is next received frame. 1017 */ 1018 static int skge_rx_fill(struct net_device *dev) 1019 { 1020 struct skge_port *skge = netdev_priv(dev); 1021 struct skge_ring *ring = &skge->rx_ring; 1022 struct skge_element *e; 1023 1024 e = ring->start; 1025 do { 1026 struct sk_buff *skb; 1027 1028 skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN, 1029 GFP_KERNEL); 1030 if (!skb) 1031 return -ENOMEM; 1032 1033 skb_reserve(skb, NET_IP_ALIGN); 1034 if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { 1035 dev_kfree_skb(skb); 1036 return -EIO; 1037 } 1038 } while ((e = e->next) != ring->start); 1039 1040 ring->to_clean = ring->start; 1041 return 0; 1042 } 1043 1044 static const char *skge_pause(enum pause_status status) 1045 { 1046 switch (status) { 1047 case FLOW_STAT_NONE: 1048 return "none"; 1049 case FLOW_STAT_REM_SEND: 1050 return "rx only"; 1051 case FLOW_STAT_LOC_SEND: 1052 return "tx_only"; 1053 case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */ 1054 return "both"; 1055 default: 1056 return "indeterminated"; 1057 } 1058 } 1059 1060 1061 static void skge_link_up(struct skge_port *skge) 1062 { 1063 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), 1064 LED_BLK_OFF|LED_SYNC_OFF|LED_REG_ON); 1065 1066 netif_carrier_on(skge->netdev); 1067 netif_wake_queue(skge->netdev); 1068 1069 netif_info(skge, link, skge->netdev, 1070 "Link is up at %d Mbps, %s duplex, flow control %s\n", 1071 skge->speed, 1072 skge->duplex == DUPLEX_FULL ? "full" : "half", 1073 skge_pause(skge->flow_status)); 1074 } 1075 1076 static void skge_link_down(struct skge_port *skge) 1077 { 1078 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF); 1079 netif_carrier_off(skge->netdev); 1080 netif_stop_queue(skge->netdev); 1081 1082 netif_info(skge, link, skge->netdev, "Link is down\n"); 1083 } 1084 1085 static void xm_link_down(struct skge_hw *hw, int port) 1086 { 1087 struct net_device *dev = hw->dev[port]; 1088 struct skge_port *skge = netdev_priv(dev); 1089 1090 xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); 1091 1092 if (netif_carrier_ok(dev)) 1093 skge_link_down(skge); 1094 } 1095 1096 static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) 1097 { 1098 int i; 1099 1100 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); 1101 *val = xm_read16(hw, port, XM_PHY_DATA); 1102 1103 if (hw->phy_type == SK_PHY_XMAC) 1104 goto ready; 1105 1106 for (i = 0; i < PHY_RETRIES; i++) { 1107 if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) 1108 goto ready; 1109 udelay(1); 1110 } 1111 1112 return -ETIMEDOUT; 1113 ready: 1114 *val = xm_read16(hw, port, XM_PHY_DATA); 1115 1116 return 0; 1117 } 1118 1119 static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) 1120 { 1121 u16 v = 0; 1122 if (__xm_phy_read(hw, port, reg, &v)) 1123 pr_warn("%s: phy read timed out\n", hw->dev[port]->name); 1124 return v; 1125 } 1126 1127 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) 1128 { 1129 int i; 1130 1131 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); 1132 for (i = 0; i < PHY_RETRIES; i++) { 1133 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) 1134 goto ready; 1135 udelay(1); 1136 } 1137 return -EIO; 1138 1139 ready: 1140 xm_write16(hw, port, XM_PHY_DATA, val); 1141 for (i = 0; i < PHY_RETRIES; i++) { 1142 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) 1143 return 0; 1144 udelay(1); 1145 } 1146 return -ETIMEDOUT; 1147 } 1148 1149 static void genesis_init(struct skge_hw *hw) 1150 { 1151 /* set blink source counter */ 1152 skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100); 1153 skge_write8(hw, B2_BSC_CTRL, BSC_START); 1154 1155 /* configure mac arbiter */ 1156 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); 1157 1158 /* configure mac arbiter timeout values */ 1159 skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53); 1160 skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53); 1161 skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53); 1162 skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53); 1163 1164 skge_write8(hw, B3_MA_RCINI_RX1, 0); 1165 skge_write8(hw, B3_MA_RCINI_RX2, 0); 1166 skge_write8(hw, B3_MA_RCINI_TX1, 0); 1167 skge_write8(hw, B3_MA_RCINI_TX2, 0); 1168 1169 /* configure packet arbiter timeout */ 1170 skge_write16(hw, B3_PA_CTRL, PA_RST_CLR); 1171 skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX); 1172 skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX); 1173 skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX); 1174 skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX); 1175 } 1176 1177 static void genesis_reset(struct skge_hw *hw, int port) 1178 { 1179 static const u8 zero[8] = { 0 }; 1180 u32 reg; 1181 1182 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); 1183 1184 /* reset the statistics module */ 1185 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); 1186 xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); 1187 xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ 1188 xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ 1189 xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ 1190 1191 /* disable Broadcom PHY IRQ */ 1192 if (hw->phy_type == SK_PHY_BCOM) 1193 xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); 1194 1195 xm_outhash(hw, port, XM_HSM, zero); 1196 1197 /* Flush TX and RX fifo */ 1198 reg = xm_read32(hw, port, XM_MODE); 1199 xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF); 1200 xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF); 1201 } 1202 1203 /* Convert mode to MII values */ 1204 static const u16 phy_pause_map[] = { 1205 [FLOW_MODE_NONE] = 0, 1206 [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM, 1207 [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP, 1208 [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, 1209 }; 1210 1211 /* special defines for FIBER (88E1011S only) */ 1212 static const u16 fiber_pause_map[] = { 1213 [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE, 1214 [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD, 1215 [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD, 1216 [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD, 1217 }; 1218 1219 1220 /* Check status of Broadcom phy link */ 1221 static void bcom_check_link(struct skge_hw *hw, int port) 1222 { 1223 struct net_device *dev = hw->dev[port]; 1224 struct skge_port *skge = netdev_priv(dev); 1225 u16 status; 1226 1227 /* read twice because of latch */ 1228 xm_phy_read(hw, port, PHY_BCOM_STAT); 1229 status = xm_phy_read(hw, port, PHY_BCOM_STAT); 1230 1231 if ((status & PHY_ST_LSYNC) == 0) { 1232 xm_link_down(hw, port); 1233 return; 1234 } 1235 1236 if (skge->autoneg == AUTONEG_ENABLE) { 1237 u16 lpa, aux; 1238 1239 if (!(status & PHY_ST_AN_OVER)) 1240 return; 1241 1242 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); 1243 if (lpa & PHY_B_AN_RF) { 1244 netdev_notice(dev, "remote fault\n"); 1245 return; 1246 } 1247 1248 aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT); 1249 1250 /* Check Duplex mismatch */ 1251 switch (aux & PHY_B_AS_AN_RES_MSK) { 1252 case PHY_B_RES_1000FD: 1253 skge->duplex = DUPLEX_FULL; 1254 break; 1255 case PHY_B_RES_1000HD: 1256 skge->duplex = DUPLEX_HALF; 1257 break; 1258 default: 1259 netdev_notice(dev, "duplex mismatch\n"); 1260 return; 1261 } 1262 1263 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 1264 switch (aux & PHY_B_AS_PAUSE_MSK) { 1265 case PHY_B_AS_PAUSE_MSK: 1266 skge->flow_status = FLOW_STAT_SYMMETRIC; 1267 break; 1268 case PHY_B_AS_PRR: 1269 skge->flow_status = FLOW_STAT_REM_SEND; 1270 break; 1271 case PHY_B_AS_PRT: 1272 skge->flow_status = FLOW_STAT_LOC_SEND; 1273 break; 1274 default: 1275 skge->flow_status = FLOW_STAT_NONE; 1276 } 1277 skge->speed = SPEED_1000; 1278 } 1279 1280 if (!netif_carrier_ok(dev)) 1281 genesis_link_up(skge); 1282 } 1283 1284 /* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional 1285 * Phy on for 100 or 10Mbit operation 1286 */ 1287 static void bcom_phy_init(struct skge_port *skge) 1288 { 1289 struct skge_hw *hw = skge->hw; 1290 int port = skge->port; 1291 int i; 1292 u16 id1, r, ext, ctl; 1293 1294 /* magic workaround patterns for Broadcom */ 1295 static const struct { 1296 u16 reg; 1297 u16 val; 1298 } A1hack[] = { 1299 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, 1300 { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, 1301 { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, 1302 { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 1303 }, C0hack[] = { 1304 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, 1305 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, 1306 }; 1307 1308 /* read Id from external PHY (all have the same address) */ 1309 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); 1310 1311 /* Optimize MDIO transfer by suppressing preamble. */ 1312 r = xm_read16(hw, port, XM_MMU_CMD); 1313 r |= XM_MMU_NO_PRE; 1314 xm_write16(hw, port, XM_MMU_CMD, r); 1315 1316 switch (id1) { 1317 case PHY_BCOM_ID1_C0: 1318 /* 1319 * Workaround BCOM Errata for the C0 type. 1320 * Write magic patterns to reserved registers. 1321 */ 1322 for (i = 0; i < ARRAY_SIZE(C0hack); i++) 1323 xm_phy_write(hw, port, 1324 C0hack[i].reg, C0hack[i].val); 1325 1326 break; 1327 case PHY_BCOM_ID1_A1: 1328 /* 1329 * Workaround BCOM Errata for the A1 type. 1330 * Write magic patterns to reserved registers. 1331 */ 1332 for (i = 0; i < ARRAY_SIZE(A1hack); i++) 1333 xm_phy_write(hw, port, 1334 A1hack[i].reg, A1hack[i].val); 1335 break; 1336 } 1337 1338 /* 1339 * Workaround BCOM Errata (#10523) for all BCom PHYs. 1340 * Disable Power Management after reset. 1341 */ 1342 r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL); 1343 r |= PHY_B_AC_DIS_PM; 1344 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r); 1345 1346 /* Dummy read */ 1347 xm_read16(hw, port, XM_ISRC); 1348 1349 ext = PHY_B_PEC_EN_LTR; /* enable tx led */ 1350 ctl = PHY_CT_SP1000; /* always 1000mbit */ 1351 1352 if (skge->autoneg == AUTONEG_ENABLE) { 1353 /* 1354 * Workaround BCOM Errata #1 for the C5 type. 1355 * 1000Base-T Link Acquisition Failure in Slave Mode 1356 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register 1357 */ 1358 u16 adv = PHY_B_1000C_RD; 1359 if (skge->advertising & ADVERTISED_1000baseT_Half) 1360 adv |= PHY_B_1000C_AHD; 1361 if (skge->advertising & ADVERTISED_1000baseT_Full) 1362 adv |= PHY_B_1000C_AFD; 1363 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv); 1364 1365 ctl |= PHY_CT_ANE | PHY_CT_RE_CFG; 1366 } else { 1367 if (skge->duplex == DUPLEX_FULL) 1368 ctl |= PHY_CT_DUP_MD; 1369 /* Force to slave */ 1370 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE); 1371 } 1372 1373 /* Set autonegotiation pause parameters */ 1374 xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, 1375 phy_pause_map[skge->flow_control] | PHY_AN_CSMA); 1376 1377 /* Handle Jumbo frames */ 1378 if (hw->dev[port]->mtu > ETH_DATA_LEN) { 1379 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, 1380 PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK); 1381 1382 ext |= PHY_B_PEC_HIGH_LA; 1383 1384 } 1385 1386 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); 1387 xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); 1388 1389 /* Use link status change interrupt */ 1390 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); 1391 } 1392 1393 static void xm_phy_init(struct skge_port *skge) 1394 { 1395 struct skge_hw *hw = skge->hw; 1396 int port = skge->port; 1397 u16 ctrl = 0; 1398 1399 if (skge->autoneg == AUTONEG_ENABLE) { 1400 if (skge->advertising & ADVERTISED_1000baseT_Half) 1401 ctrl |= PHY_X_AN_HD; 1402 if (skge->advertising & ADVERTISED_1000baseT_Full) 1403 ctrl |= PHY_X_AN_FD; 1404 1405 ctrl |= fiber_pause_map[skge->flow_control]; 1406 1407 xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl); 1408 1409 /* Restart Auto-negotiation */ 1410 ctrl = PHY_CT_ANE | PHY_CT_RE_CFG; 1411 } else { 1412 /* Set DuplexMode in Config register */ 1413 if (skge->duplex == DUPLEX_FULL) 1414 ctrl |= PHY_CT_DUP_MD; 1415 /* 1416 * Do NOT enable Auto-negotiation here. This would hold 1417 * the link down because no IDLEs are transmitted 1418 */ 1419 } 1420 1421 xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); 1422 1423 /* Poll PHY for status changes */ 1424 mod_timer(&skge->link_timer, jiffies + LINK_HZ); 1425 } 1426 1427 static int xm_check_link(struct net_device *dev) 1428 { 1429 struct skge_port *skge = netdev_priv(dev); 1430 struct skge_hw *hw = skge->hw; 1431 int port = skge->port; 1432 u16 status; 1433 1434 /* read twice because of latch */ 1435 xm_phy_read(hw, port, PHY_XMAC_STAT); 1436 status = xm_phy_read(hw, port, PHY_XMAC_STAT); 1437 1438 if ((status & PHY_ST_LSYNC) == 0) { 1439 xm_link_down(hw, port); 1440 return 0; 1441 } 1442 1443 if (skge->autoneg == AUTONEG_ENABLE) { 1444 u16 lpa, res; 1445 1446 if (!(status & PHY_ST_AN_OVER)) 1447 return 0; 1448 1449 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); 1450 if (lpa & PHY_B_AN_RF) { 1451 netdev_notice(dev, "remote fault\n"); 1452 return 0; 1453 } 1454 1455 res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); 1456 1457 /* Check Duplex mismatch */ 1458 switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) { 1459 case PHY_X_RS_FD: 1460 skge->duplex = DUPLEX_FULL; 1461 break; 1462 case PHY_X_RS_HD: 1463 skge->duplex = DUPLEX_HALF; 1464 break; 1465 default: 1466 netdev_notice(dev, "duplex mismatch\n"); 1467 return 0; 1468 } 1469 1470 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 1471 if ((skge->flow_control == FLOW_MODE_SYMMETRIC || 1472 skge->flow_control == FLOW_MODE_SYM_OR_REM) && 1473 (lpa & PHY_X_P_SYM_MD)) 1474 skge->flow_status = FLOW_STAT_SYMMETRIC; 1475 else if (skge->flow_control == FLOW_MODE_SYM_OR_REM && 1476 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) 1477 /* Enable PAUSE receive, disable PAUSE transmit */ 1478 skge->flow_status = FLOW_STAT_REM_SEND; 1479 else if (skge->flow_control == FLOW_MODE_LOC_SEND && 1480 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) 1481 /* Disable PAUSE receive, enable PAUSE transmit */ 1482 skge->flow_status = FLOW_STAT_LOC_SEND; 1483 else 1484 skge->flow_status = FLOW_STAT_NONE; 1485 1486 skge->speed = SPEED_1000; 1487 } 1488 1489 if (!netif_carrier_ok(dev)) 1490 genesis_link_up(skge); 1491 return 1; 1492 } 1493 1494 /* Poll to check for link coming up. 1495 * 1496 * Since internal PHY is wired to a level triggered pin, can't 1497 * get an interrupt when carrier is detected, need to poll for 1498 * link coming up. 1499 */ 1500 static void xm_link_timer(struct timer_list *t) 1501 { 1502 struct skge_port *skge = from_timer(skge, t, link_timer); 1503 struct net_device *dev = skge->netdev; 1504 struct skge_hw *hw = skge->hw; 1505 int port = skge->port; 1506 int i; 1507 unsigned long flags; 1508 1509 if (!netif_running(dev)) 1510 return; 1511 1512 spin_lock_irqsave(&hw->phy_lock, flags); 1513 1514 /* 1515 * Verify that the link by checking GPIO register three times. 1516 * This pin has the signal from the link_sync pin connected to it. 1517 */ 1518 for (i = 0; i < 3; i++) { 1519 if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) 1520 goto link_down; 1521 } 1522 1523 /* Re-enable interrupt to detect link down */ 1524 if (xm_check_link(dev)) { 1525 u16 msk = xm_read16(hw, port, XM_IMSK); 1526 msk &= ~XM_IS_INP_ASS; 1527 xm_write16(hw, port, XM_IMSK, msk); 1528 xm_read16(hw, port, XM_ISRC); 1529 } else { 1530 link_down: 1531 mod_timer(&skge->link_timer, 1532 round_jiffies(jiffies + LINK_HZ)); 1533 } 1534 spin_unlock_irqrestore(&hw->phy_lock, flags); 1535 } 1536 1537 static void genesis_mac_init(struct skge_hw *hw, int port) 1538 { 1539 struct net_device *dev = hw->dev[port]; 1540 struct skge_port *skge = netdev_priv(dev); 1541 int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN; 1542 int i; 1543 u32 r; 1544 static const u8 zero[6] = { 0 }; 1545 1546 for (i = 0; i < 10; i++) { 1547 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), 1548 MFF_SET_MAC_RST); 1549 if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST) 1550 goto reset_ok; 1551 udelay(1); 1552 } 1553 1554 netdev_warn(dev, "genesis reset failed\n"); 1555 1556 reset_ok: 1557 /* Unreset the XMAC. */ 1558 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); 1559 1560 /* 1561 * Perform additional initialization for external PHYs, 1562 * namely for the 1000baseTX cards that use the XMAC's 1563 * GMII mode. 1564 */ 1565 if (hw->phy_type != SK_PHY_XMAC) { 1566 /* Take external Phy out of reset */ 1567 r = skge_read32(hw, B2_GP_IO); 1568 if (port == 0) 1569 r |= GP_DIR_0|GP_IO_0; 1570 else 1571 r |= GP_DIR_2|GP_IO_2; 1572 1573 skge_write32(hw, B2_GP_IO, r); 1574 1575 /* Enable GMII interface */ 1576 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); 1577 } 1578 1579 1580 switch (hw->phy_type) { 1581 case SK_PHY_XMAC: 1582 xm_phy_init(skge); 1583 break; 1584 case SK_PHY_BCOM: 1585 bcom_phy_init(skge); 1586 bcom_check_link(hw, port); 1587 } 1588 1589 /* Set Station Address */ 1590 xm_outaddr(hw, port, XM_SA, dev->dev_addr); 1591 1592 /* We don't use match addresses so clear */ 1593 for (i = 1; i < 16; i++) 1594 xm_outaddr(hw, port, XM_EXM(i), zero); 1595 1596 /* Clear MIB counters */ 1597 xm_write16(hw, port, XM_STAT_CMD, 1598 XM_SC_CLR_RXC | XM_SC_CLR_TXC); 1599 /* Clear two times according to Errata #3 */ 1600 xm_write16(hw, port, XM_STAT_CMD, 1601 XM_SC_CLR_RXC | XM_SC_CLR_TXC); 1602 1603 /* configure Rx High Water Mark (XM_RX_HI_WM) */ 1604 xm_write16(hw, port, XM_RX_HI_WM, 1450); 1605 1606 /* We don't need the FCS appended to the packet. */ 1607 r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS; 1608 if (jumbo) 1609 r |= XM_RX_BIG_PK_OK; 1610 1611 if (skge->duplex == DUPLEX_HALF) { 1612 /* 1613 * If in manual half duplex mode the other side might be in 1614 * full duplex mode, so ignore if a carrier extension is not seen 1615 * on frames received 1616 */ 1617 r |= XM_RX_DIS_CEXT; 1618 } 1619 xm_write16(hw, port, XM_RX_CMD, r); 1620 1621 /* We want short frames padded to 60 bytes. */ 1622 xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); 1623 1624 /* Increase threshold for jumbo frames on dual port */ 1625 if (hw->ports > 1 && jumbo) 1626 xm_write16(hw, port, XM_TX_THR, 1020); 1627 else 1628 xm_write16(hw, port, XM_TX_THR, 512); 1629 1630 /* 1631 * Enable the reception of all error frames. This is is 1632 * a necessary evil due to the design of the XMAC. The 1633 * XMAC's receive FIFO is only 8K in size, however jumbo 1634 * frames can be up to 9000 bytes in length. When bad 1635 * frame filtering is enabled, the XMAC's RX FIFO operates 1636 * in 'store and forward' mode. For this to work, the 1637 * entire frame has to fit into the FIFO, but that means 1638 * that jumbo frames larger than 8192 bytes will be 1639 * truncated. Disabling all bad frame filtering causes 1640 * the RX FIFO to operate in streaming mode, in which 1641 * case the XMAC will start transferring frames out of the 1642 * RX FIFO as soon as the FIFO threshold is reached. 1643 */ 1644 xm_write32(hw, port, XM_MODE, XM_DEF_MODE); 1645 1646 1647 /* 1648 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK) 1649 * - Enable all bits excepting 'Octets Rx OK Low CntOv' 1650 * and 'Octets Rx OK Hi Cnt Ov'. 1651 */ 1652 xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK); 1653 1654 /* 1655 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK) 1656 * - Enable all bits excepting 'Octets Tx OK Low CntOv' 1657 * and 'Octets Tx OK Hi Cnt Ov'. 1658 */ 1659 xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK); 1660 1661 /* Configure MAC arbiter */ 1662 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); 1663 1664 /* configure timeout values */ 1665 skge_write8(hw, B3_MA_TOINI_RX1, 72); 1666 skge_write8(hw, B3_MA_TOINI_RX2, 72); 1667 skge_write8(hw, B3_MA_TOINI_TX1, 72); 1668 skge_write8(hw, B3_MA_TOINI_TX2, 72); 1669 1670 skge_write8(hw, B3_MA_RCINI_RX1, 0); 1671 skge_write8(hw, B3_MA_RCINI_RX2, 0); 1672 skge_write8(hw, B3_MA_RCINI_TX1, 0); 1673 skge_write8(hw, B3_MA_RCINI_TX2, 0); 1674 1675 /* Configure Rx MAC FIFO */ 1676 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); 1677 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); 1678 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); 1679 1680 /* Configure Tx MAC FIFO */ 1681 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); 1682 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); 1683 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); 1684 1685 if (jumbo) { 1686 /* Enable frame flushing if jumbo frames used */ 1687 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH); 1688 } else { 1689 /* enable timeout timers if normal frames */ 1690 skge_write16(hw, B3_PA_CTRL, 1691 (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); 1692 } 1693 } 1694 1695 static void genesis_stop(struct skge_port *skge) 1696 { 1697 struct skge_hw *hw = skge->hw; 1698 int port = skge->port; 1699 unsigned retries = 1000; 1700 u16 cmd; 1701 1702 /* Disable Tx and Rx */ 1703 cmd = xm_read16(hw, port, XM_MMU_CMD); 1704 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1705 xm_write16(hw, port, XM_MMU_CMD, cmd); 1706 1707 genesis_reset(hw, port); 1708 1709 /* Clear Tx packet arbiter timeout IRQ */ 1710 skge_write16(hw, B3_PA_CTRL, 1711 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); 1712 1713 /* Reset the MAC */ 1714 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); 1715 do { 1716 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); 1717 if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)) 1718 break; 1719 } while (--retries > 0); 1720 1721 /* For external PHYs there must be special handling */ 1722 if (hw->phy_type != SK_PHY_XMAC) { 1723 u32 reg = skge_read32(hw, B2_GP_IO); 1724 if (port == 0) { 1725 reg |= GP_DIR_0; 1726 reg &= ~GP_IO_0; 1727 } else { 1728 reg |= GP_DIR_2; 1729 reg &= ~GP_IO_2; 1730 } 1731 skge_write32(hw, B2_GP_IO, reg); 1732 skge_read32(hw, B2_GP_IO); 1733 } 1734 1735 xm_write16(hw, port, XM_MMU_CMD, 1736 xm_read16(hw, port, XM_MMU_CMD) 1737 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); 1738 1739 xm_read16(hw, port, XM_MMU_CMD); 1740 } 1741 1742 1743 static void genesis_get_stats(struct skge_port *skge, u64 *data) 1744 { 1745 struct skge_hw *hw = skge->hw; 1746 int port = skge->port; 1747 int i; 1748 unsigned long timeout = jiffies + HZ; 1749 1750 xm_write16(hw, port, 1751 XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); 1752 1753 /* wait for update to complete */ 1754 while (xm_read16(hw, port, XM_STAT_CMD) 1755 & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) { 1756 if (time_after(jiffies, timeout)) 1757 break; 1758 udelay(10); 1759 } 1760 1761 /* special case for 64 bit octet counter */ 1762 data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32 1763 | xm_read32(hw, port, XM_TXO_OK_LO); 1764 data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32 1765 | xm_read32(hw, port, XM_RXO_OK_LO); 1766 1767 for (i = 2; i < ARRAY_SIZE(skge_stats); i++) 1768 data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset); 1769 } 1770 1771 static void genesis_mac_intr(struct skge_hw *hw, int port) 1772 { 1773 struct net_device *dev = hw->dev[port]; 1774 struct skge_port *skge = netdev_priv(dev); 1775 u16 status = xm_read16(hw, port, XM_ISRC); 1776 1777 netif_printk(skge, intr, KERN_DEBUG, skge->netdev, 1778 "mac interrupt status 0x%x\n", status); 1779 1780 if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { 1781 xm_link_down(hw, port); 1782 mod_timer(&skge->link_timer, jiffies + 1); 1783 } 1784 1785 if (status & XM_IS_TXF_UR) { 1786 xm_write32(hw, port, XM_MODE, XM_MD_FTF); 1787 ++dev->stats.tx_fifo_errors; 1788 } 1789 } 1790 1791 static void genesis_link_up(struct skge_port *skge) 1792 { 1793 struct skge_hw *hw = skge->hw; 1794 int port = skge->port; 1795 u16 cmd, msk; 1796 u32 mode; 1797 1798 cmd = xm_read16(hw, port, XM_MMU_CMD); 1799 1800 /* 1801 * enabling pause frame reception is required for 1000BT 1802 * because the XMAC is not reset if the link is going down 1803 */ 1804 if (skge->flow_status == FLOW_STAT_NONE || 1805 skge->flow_status == FLOW_STAT_LOC_SEND) 1806 /* Disable Pause Frame Reception */ 1807 cmd |= XM_MMU_IGN_PF; 1808 else 1809 /* Enable Pause Frame Reception */ 1810 cmd &= ~XM_MMU_IGN_PF; 1811 1812 xm_write16(hw, port, XM_MMU_CMD, cmd); 1813 1814 mode = xm_read32(hw, port, XM_MODE); 1815 if (skge->flow_status == FLOW_STAT_SYMMETRIC || 1816 skge->flow_status == FLOW_STAT_LOC_SEND) { 1817 /* 1818 * Configure Pause Frame Generation 1819 * Use internal and external Pause Frame Generation. 1820 * Sending pause frames is edge triggered. 1821 * Send a Pause frame with the maximum pause time if 1822 * internal oder external FIFO full condition occurs. 1823 * Send a zero pause time frame to re-start transmission. 1824 */ 1825 /* XM_PAUSE_DA = '010000C28001' (default) */ 1826 /* XM_MAC_PTIME = 0xffff (maximum) */ 1827 /* remember this value is defined in big endian (!) */ 1828 xm_write16(hw, port, XM_MAC_PTIME, 0xffff); 1829 1830 mode |= XM_PAUSE_MODE; 1831 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); 1832 } else { 1833 /* 1834 * disable pause frame generation is required for 1000BT 1835 * because the XMAC is not reset if the link is going down 1836 */ 1837 /* Disable Pause Mode in Mode Register */ 1838 mode &= ~XM_PAUSE_MODE; 1839 1840 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); 1841 } 1842 1843 xm_write32(hw, port, XM_MODE, mode); 1844 1845 /* Turn on detection of Tx underrun */ 1846 msk = xm_read16(hw, port, XM_IMSK); 1847 msk &= ~XM_IS_TXF_UR; 1848 xm_write16(hw, port, XM_IMSK, msk); 1849 1850 xm_read16(hw, port, XM_ISRC); 1851 1852 /* get MMU Command Reg. */ 1853 cmd = xm_read16(hw, port, XM_MMU_CMD); 1854 if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) 1855 cmd |= XM_MMU_GMII_FD; 1856 1857 /* 1858 * Workaround BCOM Errata (#10523) for all BCom Phys 1859 * Enable Power Management after link up 1860 */ 1861 if (hw->phy_type == SK_PHY_BCOM) { 1862 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, 1863 xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) 1864 & ~PHY_B_AC_DIS_PM); 1865 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); 1866 } 1867 1868 /* enable Rx/Tx */ 1869 xm_write16(hw, port, XM_MMU_CMD, 1870 cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1871 skge_link_up(skge); 1872 } 1873 1874 1875 static inline void bcom_phy_intr(struct skge_port *skge) 1876 { 1877 struct skge_hw *hw = skge->hw; 1878 int port = skge->port; 1879 u16 isrc; 1880 1881 isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT); 1882 netif_printk(skge, intr, KERN_DEBUG, skge->netdev, 1883 "phy interrupt status 0x%x\n", isrc); 1884 1885 if (isrc & PHY_B_IS_PSE) 1886 pr_err("%s: uncorrectable pair swap error\n", 1887 hw->dev[port]->name); 1888 1889 /* Workaround BCom Errata: 1890 * enable and disable loopback mode if "NO HCD" occurs. 1891 */ 1892 if (isrc & PHY_B_IS_NO_HDCL) { 1893 u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL); 1894 xm_phy_write(hw, port, PHY_BCOM_CTRL, 1895 ctrl | PHY_CT_LOOP); 1896 xm_phy_write(hw, port, PHY_BCOM_CTRL, 1897 ctrl & ~PHY_CT_LOOP); 1898 } 1899 1900 if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) 1901 bcom_check_link(hw, port); 1902 1903 } 1904 1905 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) 1906 { 1907 int i; 1908 1909 gma_write16(hw, port, GM_SMI_DATA, val); 1910 gma_write16(hw, port, GM_SMI_CTRL, 1911 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); 1912 for (i = 0; i < PHY_RETRIES; i++) { 1913 udelay(1); 1914 1915 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) 1916 return 0; 1917 } 1918 1919 pr_warn("%s: phy write timeout\n", hw->dev[port]->name); 1920 return -EIO; 1921 } 1922 1923 static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) 1924 { 1925 int i; 1926 1927 gma_write16(hw, port, GM_SMI_CTRL, 1928 GM_SMI_CT_PHY_AD(hw->phy_addr) 1929 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 1930 1931 for (i = 0; i < PHY_RETRIES; i++) { 1932 udelay(1); 1933 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) 1934 goto ready; 1935 } 1936 1937 return -ETIMEDOUT; 1938 ready: 1939 *val = gma_read16(hw, port, GM_SMI_DATA); 1940 return 0; 1941 } 1942 1943 static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) 1944 { 1945 u16 v = 0; 1946 if (__gm_phy_read(hw, port, reg, &v)) 1947 pr_warn("%s: phy read timeout\n", hw->dev[port]->name); 1948 return v; 1949 } 1950 1951 /* Marvell Phy Initialization */ 1952 static void yukon_init(struct skge_hw *hw, int port) 1953 { 1954 struct skge_port *skge = netdev_priv(hw->dev[port]); 1955 u16 ctrl, ct1000, adv; 1956 1957 if (skge->autoneg == AUTONEG_ENABLE) { 1958 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 1959 1960 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | 1961 PHY_M_EC_MAC_S_MSK); 1962 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); 1963 1964 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); 1965 1966 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); 1967 } 1968 1969 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); 1970 if (skge->autoneg == AUTONEG_DISABLE) 1971 ctrl &= ~PHY_CT_ANE; 1972 1973 ctrl |= PHY_CT_RESET; 1974 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 1975 1976 ctrl = 0; 1977 ct1000 = 0; 1978 adv = PHY_AN_CSMA; 1979 1980 if (skge->autoneg == AUTONEG_ENABLE) { 1981 if (hw->copper) { 1982 if (skge->advertising & ADVERTISED_1000baseT_Full) 1983 ct1000 |= PHY_M_1000C_AFD; 1984 if (skge->advertising & ADVERTISED_1000baseT_Half) 1985 ct1000 |= PHY_M_1000C_AHD; 1986 if (skge->advertising & ADVERTISED_100baseT_Full) 1987 adv |= PHY_M_AN_100_FD; 1988 if (skge->advertising & ADVERTISED_100baseT_Half) 1989 adv |= PHY_M_AN_100_HD; 1990 if (skge->advertising & ADVERTISED_10baseT_Full) 1991 adv |= PHY_M_AN_10_FD; 1992 if (skge->advertising & ADVERTISED_10baseT_Half) 1993 adv |= PHY_M_AN_10_HD; 1994 1995 /* Set Flow-control capabilities */ 1996 adv |= phy_pause_map[skge->flow_control]; 1997 } else { 1998 if (skge->advertising & ADVERTISED_1000baseT_Full) 1999 adv |= PHY_M_AN_1000X_AFD; 2000 if (skge->advertising & ADVERTISED_1000baseT_Half) 2001 adv |= PHY_M_AN_1000X_AHD; 2002 2003 adv |= fiber_pause_map[skge->flow_control]; 2004 } 2005 2006 /* Restart Auto-negotiation */ 2007 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; 2008 } else { 2009 /* forced speed/duplex settings */ 2010 ct1000 = PHY_M_1000C_MSE; 2011 2012 if (skge->duplex == DUPLEX_FULL) 2013 ctrl |= PHY_CT_DUP_MD; 2014 2015 switch (skge->speed) { 2016 case SPEED_1000: 2017 ctrl |= PHY_CT_SP1000; 2018 break; 2019 case SPEED_100: 2020 ctrl |= PHY_CT_SP100; 2021 break; 2022 } 2023 2024 ctrl |= PHY_CT_RESET; 2025 } 2026 2027 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); 2028 2029 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); 2030 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 2031 2032 /* Enable phy interrupt on autonegotiation complete (or link up) */ 2033 if (skge->autoneg == AUTONEG_ENABLE) 2034 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK); 2035 else 2036 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); 2037 } 2038 2039 static void yukon_reset(struct skge_hw *hw, int port) 2040 { 2041 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ 2042 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ 2043 gma_write16(hw, port, GM_MC_ADDR_H2, 0); 2044 gma_write16(hw, port, GM_MC_ADDR_H3, 0); 2045 gma_write16(hw, port, GM_MC_ADDR_H4, 0); 2046 2047 gma_write16(hw, port, GM_RX_CTRL, 2048 gma_read16(hw, port, GM_RX_CTRL) 2049 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 2050 } 2051 2052 /* Apparently, early versions of Yukon-Lite had wrong chip_id? */ 2053 static int is_yukon_lite_a0(struct skge_hw *hw) 2054 { 2055 u32 reg; 2056 int ret; 2057 2058 if (hw->chip_id != CHIP_ID_YUKON) 2059 return 0; 2060 2061 reg = skge_read32(hw, B2_FAR); 2062 skge_write8(hw, B2_FAR + 3, 0xff); 2063 ret = (skge_read8(hw, B2_FAR + 3) != 0); 2064 skge_write32(hw, B2_FAR, reg); 2065 return ret; 2066 } 2067 2068 static void yukon_mac_init(struct skge_hw *hw, int port) 2069 { 2070 struct skge_port *skge = netdev_priv(hw->dev[port]); 2071 int i; 2072 u32 reg; 2073 const u8 *addr = hw->dev[port]->dev_addr; 2074 2075 /* WA code for COMA mode -- set PHY reset */ 2076 if (hw->chip_id == CHIP_ID_YUKON_LITE && 2077 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { 2078 reg = skge_read32(hw, B2_GP_IO); 2079 reg |= GP_DIR_9 | GP_IO_9; 2080 skge_write32(hw, B2_GP_IO, reg); 2081 } 2082 2083 /* hard reset */ 2084 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 2085 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); 2086 2087 /* WA code for COMA mode -- clear PHY reset */ 2088 if (hw->chip_id == CHIP_ID_YUKON_LITE && 2089 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { 2090 reg = skge_read32(hw, B2_GP_IO); 2091 reg |= GP_DIR_9; 2092 reg &= ~GP_IO_9; 2093 skge_write32(hw, B2_GP_IO, reg); 2094 } 2095 2096 /* Set hardware config mode */ 2097 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | 2098 GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE; 2099 reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; 2100 2101 /* Clear GMC reset */ 2102 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); 2103 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); 2104 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); 2105 2106 if (skge->autoneg == AUTONEG_DISABLE) { 2107 reg = GM_GPCR_AU_ALL_DIS; 2108 gma_write16(hw, port, GM_GP_CTRL, 2109 gma_read16(hw, port, GM_GP_CTRL) | reg); 2110 2111 switch (skge->speed) { 2112 case SPEED_1000: 2113 reg &= ~GM_GPCR_SPEED_100; 2114 reg |= GM_GPCR_SPEED_1000; 2115 break; 2116 case SPEED_100: 2117 reg &= ~GM_GPCR_SPEED_1000; 2118 reg |= GM_GPCR_SPEED_100; 2119 break; 2120 case SPEED_10: 2121 reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); 2122 break; 2123 } 2124 2125 if (skge->duplex == DUPLEX_FULL) 2126 reg |= GM_GPCR_DUP_FULL; 2127 } else 2128 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; 2129 2130 switch (skge->flow_control) { 2131 case FLOW_MODE_NONE: 2132 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 2133 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 2134 break; 2135 case FLOW_MODE_LOC_SEND: 2136 /* disable Rx flow-control */ 2137 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 2138 break; 2139 case FLOW_MODE_SYMMETRIC: 2140 case FLOW_MODE_SYM_OR_REM: 2141 /* enable Tx & Rx flow-control */ 2142 break; 2143 } 2144 2145 gma_write16(hw, port, GM_GP_CTRL, reg); 2146 skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); 2147 2148 yukon_init(hw, port); 2149 2150 /* MIB clear */ 2151 reg = gma_read16(hw, port, GM_PHY_ADDR); 2152 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); 2153 2154 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 2155 gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); 2156 gma_write16(hw, port, GM_PHY_ADDR, reg); 2157 2158 /* transmit control */ 2159 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 2160 2161 /* receive control reg: unicast + multicast + no FCS */ 2162 gma_write16(hw, port, GM_RX_CTRL, 2163 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); 2164 2165 /* transmit flow control */ 2166 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); 2167 2168 /* transmit parameter */ 2169 gma_write16(hw, port, GM_TX_PARAM, 2170 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | 2171 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 2172 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); 2173 2174 /* configure the Serial Mode Register */ 2175 reg = DATA_BLIND_VAL(DATA_BLIND_DEF) 2176 | GM_SMOD_VLAN_ENA 2177 | IPG_DATA_VAL(IPG_DATA_DEF); 2178 2179 if (hw->dev[port]->mtu > ETH_DATA_LEN) 2180 reg |= GM_SMOD_JUMBO_ENA; 2181 2182 gma_write16(hw, port, GM_SERIAL_MODE, reg); 2183 2184 /* physical address: used for pause frames */ 2185 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); 2186 /* virtual address for data */ 2187 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); 2188 2189 /* enable interrupt mask for counter overflows */ 2190 gma_write16(hw, port, GM_TX_IRQ_MSK, 0); 2191 gma_write16(hw, port, GM_RX_IRQ_MSK, 0); 2192 gma_write16(hw, port, GM_TR_IRQ_MSK, 0); 2193 2194 /* Initialize Mac Fifo */ 2195 2196 /* Configure Rx MAC FIFO */ 2197 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); 2198 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 2199 2200 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ 2201 if (is_yukon_lite_a0(hw)) 2202 reg &= ~GMF_RX_F_FL_ON; 2203 2204 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 2205 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); 2206 /* 2207 * because Pause Packet Truncation in GMAC is not working 2208 * we have to increase the Flush Threshold to 64 bytes 2209 * in order to flush pause packets in Rx FIFO on Yukon-1 2210 */ 2211 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1); 2212 2213 /* Configure Tx MAC FIFO */ 2214 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 2215 skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 2216 } 2217 2218 /* Go into power down mode */ 2219 static void yukon_suspend(struct skge_hw *hw, int port) 2220 { 2221 u16 ctrl; 2222 2223 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); 2224 ctrl |= PHY_M_PC_POL_R_DIS; 2225 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); 2226 2227 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); 2228 ctrl |= PHY_CT_RESET; 2229 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 2230 2231 /* switch IEEE compatible power down mode on */ 2232 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); 2233 ctrl |= PHY_CT_PDOWN; 2234 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 2235 } 2236 2237 static void yukon_stop(struct skge_port *skge) 2238 { 2239 struct skge_hw *hw = skge->hw; 2240 int port = skge->port; 2241 2242 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); 2243 yukon_reset(hw, port); 2244 2245 gma_write16(hw, port, GM_GP_CTRL, 2246 gma_read16(hw, port, GM_GP_CTRL) 2247 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); 2248 gma_read16(hw, port, GM_GP_CTRL); 2249 2250 yukon_suspend(hw, port); 2251 2252 /* set GPHY Control reset */ 2253 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 2254 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); 2255 } 2256 2257 static void yukon_get_stats(struct skge_port *skge, u64 *data) 2258 { 2259 struct skge_hw *hw = skge->hw; 2260 int port = skge->port; 2261 int i; 2262 2263 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 2264 | gma_read32(hw, port, GM_TXO_OK_LO); 2265 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 2266 | gma_read32(hw, port, GM_RXO_OK_LO); 2267 2268 for (i = 2; i < ARRAY_SIZE(skge_stats); i++) 2269 data[i] = gma_read32(hw, port, 2270 skge_stats[i].gma_offset); 2271 } 2272 2273 static void yukon_mac_intr(struct skge_hw *hw, int port) 2274 { 2275 struct net_device *dev = hw->dev[port]; 2276 struct skge_port *skge = netdev_priv(dev); 2277 u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); 2278 2279 netif_printk(skge, intr, KERN_DEBUG, skge->netdev, 2280 "mac interrupt status 0x%x\n", status); 2281 2282 if (status & GM_IS_RX_FF_OR) { 2283 ++dev->stats.rx_fifo_errors; 2284 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); 2285 } 2286 2287 if (status & GM_IS_TX_FF_UR) { 2288 ++dev->stats.tx_fifo_errors; 2289 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); 2290 } 2291 2292 } 2293 2294 static u16 yukon_speed(const struct skge_hw *hw, u16 aux) 2295 { 2296 switch (aux & PHY_M_PS_SPEED_MSK) { 2297 case PHY_M_PS_SPEED_1000: 2298 return SPEED_1000; 2299 case PHY_M_PS_SPEED_100: 2300 return SPEED_100; 2301 default: 2302 return SPEED_10; 2303 } 2304 } 2305 2306 static void yukon_link_up(struct skge_port *skge) 2307 { 2308 struct skge_hw *hw = skge->hw; 2309 int port = skge->port; 2310 u16 reg; 2311 2312 /* Enable Transmit FIFO Underrun */ 2313 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); 2314 2315 reg = gma_read16(hw, port, GM_GP_CTRL); 2316 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) 2317 reg |= GM_GPCR_DUP_FULL; 2318 2319 /* enable Rx/Tx */ 2320 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 2321 gma_write16(hw, port, GM_GP_CTRL, reg); 2322 2323 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); 2324 skge_link_up(skge); 2325 } 2326 2327 static void yukon_link_down(struct skge_port *skge) 2328 { 2329 struct skge_hw *hw = skge->hw; 2330 int port = skge->port; 2331 u16 ctrl; 2332 2333 ctrl = gma_read16(hw, port, GM_GP_CTRL); 2334 ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 2335 gma_write16(hw, port, GM_GP_CTRL, ctrl); 2336 2337 if (skge->flow_status == FLOW_STAT_REM_SEND) { 2338 ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); 2339 ctrl |= PHY_M_AN_ASP; 2340 /* restore Asymmetric Pause bit */ 2341 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl); 2342 } 2343 2344 skge_link_down(skge); 2345 2346 yukon_init(hw, port); 2347 } 2348 2349 static void yukon_phy_intr(struct skge_port *skge) 2350 { 2351 struct skge_hw *hw = skge->hw; 2352 int port = skge->port; 2353 const char *reason = NULL; 2354 u16 istatus, phystat; 2355 2356 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); 2357 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); 2358 2359 netif_printk(skge, intr, KERN_DEBUG, skge->netdev, 2360 "phy interrupt status 0x%x 0x%x\n", istatus, phystat); 2361 2362 if (istatus & PHY_M_IS_AN_COMPL) { 2363 if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP) 2364 & PHY_M_AN_RF) { 2365 reason = "remote fault"; 2366 goto failed; 2367 } 2368 2369 if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) { 2370 reason = "master/slave fault"; 2371 goto failed; 2372 } 2373 2374 if (!(phystat & PHY_M_PS_SPDUP_RES)) { 2375 reason = "speed/duplex"; 2376 goto failed; 2377 } 2378 2379 skge->duplex = (phystat & PHY_M_PS_FULL_DUP) 2380 ? DUPLEX_FULL : DUPLEX_HALF; 2381 skge->speed = yukon_speed(hw, phystat); 2382 2383 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 2384 switch (phystat & PHY_M_PS_PAUSE_MSK) { 2385 case PHY_M_PS_PAUSE_MSK: 2386 skge->flow_status = FLOW_STAT_SYMMETRIC; 2387 break; 2388 case PHY_M_PS_RX_P_EN: 2389 skge->flow_status = FLOW_STAT_REM_SEND; 2390 break; 2391 case PHY_M_PS_TX_P_EN: 2392 skge->flow_status = FLOW_STAT_LOC_SEND; 2393 break; 2394 default: 2395 skge->flow_status = FLOW_STAT_NONE; 2396 } 2397 2398 if (skge->flow_status == FLOW_STAT_NONE || 2399 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) 2400 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 2401 else 2402 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); 2403 yukon_link_up(skge); 2404 return; 2405 } 2406 2407 if (istatus & PHY_M_IS_LSP_CHANGE) 2408 skge->speed = yukon_speed(hw, phystat); 2409 2410 if (istatus & PHY_M_IS_DUP_CHANGE) 2411 skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; 2412 if (istatus & PHY_M_IS_LST_CHANGE) { 2413 if (phystat & PHY_M_PS_LINK_UP) 2414 yukon_link_up(skge); 2415 else 2416 yukon_link_down(skge); 2417 } 2418 return; 2419 failed: 2420 pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason); 2421 2422 /* XXX restart autonegotiation? */ 2423 } 2424 2425 static void skge_phy_reset(struct skge_port *skge) 2426 { 2427 struct skge_hw *hw = skge->hw; 2428 int port = skge->port; 2429 struct net_device *dev = hw->dev[port]; 2430 2431 netif_stop_queue(skge->netdev); 2432 netif_carrier_off(skge->netdev); 2433 2434 spin_lock_bh(&hw->phy_lock); 2435 if (is_genesis(hw)) { 2436 genesis_reset(hw, port); 2437 genesis_mac_init(hw, port); 2438 } else { 2439 yukon_reset(hw, port); 2440 yukon_init(hw, port); 2441 } 2442 spin_unlock_bh(&hw->phy_lock); 2443 2444 skge_set_multicast(dev); 2445 } 2446 2447 /* Basic MII support */ 2448 static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2449 { 2450 struct mii_ioctl_data *data = if_mii(ifr); 2451 struct skge_port *skge = netdev_priv(dev); 2452 struct skge_hw *hw = skge->hw; 2453 int err = -EOPNOTSUPP; 2454 2455 if (!netif_running(dev)) 2456 return -ENODEV; /* Phy still in reset */ 2457 2458 switch (cmd) { 2459 case SIOCGMIIPHY: 2460 data->phy_id = hw->phy_addr; 2461 2462 /* fallthru */ 2463 case SIOCGMIIREG: { 2464 u16 val = 0; 2465 spin_lock_bh(&hw->phy_lock); 2466 2467 if (is_genesis(hw)) 2468 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2469 else 2470 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2471 spin_unlock_bh(&hw->phy_lock); 2472 data->val_out = val; 2473 break; 2474 } 2475 2476 case SIOCSMIIREG: 2477 spin_lock_bh(&hw->phy_lock); 2478 if (is_genesis(hw)) 2479 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2480 data->val_in); 2481 else 2482 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2483 data->val_in); 2484 spin_unlock_bh(&hw->phy_lock); 2485 break; 2486 } 2487 return err; 2488 } 2489 2490 static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) 2491 { 2492 u32 end; 2493 2494 start /= 8; 2495 len /= 8; 2496 end = start + len - 1; 2497 2498 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); 2499 skge_write32(hw, RB_ADDR(q, RB_START), start); 2500 skge_write32(hw, RB_ADDR(q, RB_WP), start); 2501 skge_write32(hw, RB_ADDR(q, RB_RP), start); 2502 skge_write32(hw, RB_ADDR(q, RB_END), end); 2503 2504 if (q == Q_R1 || q == Q_R2) { 2505 /* Set thresholds on receive queue's */ 2506 skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), 2507 start + (2*len)/3); 2508 skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), 2509 start + (len/3)); 2510 } else { 2511 /* Enable store & forward on Tx queue's because 2512 * Tx FIFO is only 4K on Genesis and 1K on Yukon 2513 */ 2514 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); 2515 } 2516 2517 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); 2518 } 2519 2520 /* Setup Bus Memory Interface */ 2521 static void skge_qset(struct skge_port *skge, u16 q, 2522 const struct skge_element *e) 2523 { 2524 struct skge_hw *hw = skge->hw; 2525 u32 watermark = 0x600; 2526 u64 base = skge->dma + (e->desc - skge->mem); 2527 2528 /* optimization to reduce window on 32bit/33mhz */ 2529 if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0) 2530 watermark /= 2; 2531 2532 skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET); 2533 skge_write32(hw, Q_ADDR(q, Q_F), watermark); 2534 skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32)); 2535 skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base); 2536 } 2537 2538 static int skge_up(struct net_device *dev) 2539 { 2540 struct skge_port *skge = netdev_priv(dev); 2541 struct skge_hw *hw = skge->hw; 2542 int port = skge->port; 2543 u32 chunk, ram_addr; 2544 size_t rx_size, tx_size; 2545 int err; 2546 2547 if (!is_valid_ether_addr(dev->dev_addr)) 2548 return -EINVAL; 2549 2550 netif_info(skge, ifup, skge->netdev, "enabling interface\n"); 2551 2552 if (dev->mtu > RX_BUF_SIZE) 2553 skge->rx_buf_size = dev->mtu + ETH_HLEN; 2554 else 2555 skge->rx_buf_size = RX_BUF_SIZE; 2556 2557 2558 rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); 2559 tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); 2560 skge->mem_size = tx_size + rx_size; 2561 skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma); 2562 if (!skge->mem) 2563 return -ENOMEM; 2564 2565 BUG_ON(skge->dma & 7); 2566 2567 if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { 2568 dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); 2569 err = -EINVAL; 2570 goto free_pci_mem; 2571 } 2572 2573 memset(skge->mem, 0, skge->mem_size); 2574 2575 err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); 2576 if (err) 2577 goto free_pci_mem; 2578 2579 err = skge_rx_fill(dev); 2580 if (err) 2581 goto free_rx_ring; 2582 2583 err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, 2584 skge->dma + rx_size); 2585 if (err) 2586 goto free_rx_ring; 2587 2588 if (hw->ports == 1) { 2589 err = request_irq(hw->pdev->irq, skge_intr, IRQF_SHARED, 2590 dev->name, hw); 2591 if (err) { 2592 netdev_err(dev, "Unable to allocate interrupt %d error: %d\n", 2593 hw->pdev->irq, err); 2594 goto free_tx_ring; 2595 } 2596 } 2597 2598 /* Initialize MAC */ 2599 netif_carrier_off(dev); 2600 spin_lock_bh(&hw->phy_lock); 2601 if (is_genesis(hw)) 2602 genesis_mac_init(hw, port); 2603 else 2604 yukon_mac_init(hw, port); 2605 spin_unlock_bh(&hw->phy_lock); 2606 2607 /* Configure RAMbuffers - equally between ports and tx/rx */ 2608 chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); 2609 ram_addr = hw->ram_offset + 2 * chunk * port; 2610 2611 skge_ramset(hw, rxqaddr[port], ram_addr, chunk); 2612 skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); 2613 2614 BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); 2615 skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk); 2616 skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); 2617 2618 /* Start receiver BMU */ 2619 wmb(); 2620 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); 2621 skge_led(skge, LED_MODE_ON); 2622 2623 spin_lock_irq(&hw->hw_lock); 2624 hw->intr_mask |= portmask[port]; 2625 skge_write32(hw, B0_IMSK, hw->intr_mask); 2626 skge_read32(hw, B0_IMSK); 2627 spin_unlock_irq(&hw->hw_lock); 2628 2629 napi_enable(&skge->napi); 2630 2631 skge_set_multicast(dev); 2632 2633 return 0; 2634 2635 free_tx_ring: 2636 kfree(skge->tx_ring.start); 2637 free_rx_ring: 2638 skge_rx_clean(skge); 2639 kfree(skge->rx_ring.start); 2640 free_pci_mem: 2641 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); 2642 skge->mem = NULL; 2643 2644 return err; 2645 } 2646 2647 /* stop receiver */ 2648 static void skge_rx_stop(struct skge_hw *hw, int port) 2649 { 2650 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); 2651 skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), 2652 RB_RST_SET|RB_DIS_OP_MD); 2653 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); 2654 } 2655 2656 static int skge_down(struct net_device *dev) 2657 { 2658 struct skge_port *skge = netdev_priv(dev); 2659 struct skge_hw *hw = skge->hw; 2660 int port = skge->port; 2661 2662 if (!skge->mem) 2663 return 0; 2664 2665 netif_info(skge, ifdown, skge->netdev, "disabling interface\n"); 2666 2667 netif_tx_disable(dev); 2668 2669 if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC) 2670 del_timer_sync(&skge->link_timer); 2671 2672 napi_disable(&skge->napi); 2673 netif_carrier_off(dev); 2674 2675 spin_lock_irq(&hw->hw_lock); 2676 hw->intr_mask &= ~portmask[port]; 2677 skge_write32(hw, B0_IMSK, (hw->ports == 1) ? 0 : hw->intr_mask); 2678 skge_read32(hw, B0_IMSK); 2679 spin_unlock_irq(&hw->hw_lock); 2680 2681 if (hw->ports == 1) 2682 free_irq(hw->pdev->irq, hw); 2683 2684 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF); 2685 if (is_genesis(hw)) 2686 genesis_stop(skge); 2687 else 2688 yukon_stop(skge); 2689 2690 /* Stop transmitter */ 2691 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2692 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), 2693 RB_RST_SET|RB_DIS_OP_MD); 2694 2695 2696 /* Disable Force Sync bit and Enable Alloc bit */ 2697 skge_write8(hw, SK_REG(port, TXA_CTRL), 2698 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 2699 2700 /* Stop Interval Timer and Limit Counter of Tx Arbiter */ 2701 skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); 2702 skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); 2703 2704 /* Reset PCI FIFO */ 2705 skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); 2706 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); 2707 2708 /* Reset the RAM Buffer async Tx queue */ 2709 skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); 2710 2711 skge_rx_stop(hw, port); 2712 2713 if (is_genesis(hw)) { 2714 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); 2715 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET); 2716 } else { 2717 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 2718 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 2719 } 2720 2721 skge_led(skge, LED_MODE_OFF); 2722 2723 netif_tx_lock_bh(dev); 2724 skge_tx_clean(dev); 2725 netif_tx_unlock_bh(dev); 2726 2727 skge_rx_clean(skge); 2728 2729 kfree(skge->rx_ring.start); 2730 kfree(skge->tx_ring.start); 2731 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); 2732 skge->mem = NULL; 2733 return 0; 2734 } 2735 2736 static inline int skge_avail(const struct skge_ring *ring) 2737 { 2738 smp_mb(); 2739 return ((ring->to_clean > ring->to_use) ? 0 : ring->count) 2740 + (ring->to_clean - ring->to_use) - 1; 2741 } 2742 2743 static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, 2744 struct net_device *dev) 2745 { 2746 struct skge_port *skge = netdev_priv(dev); 2747 struct skge_hw *hw = skge->hw; 2748 struct skge_element *e; 2749 struct skge_tx_desc *td; 2750 int i; 2751 u32 control, len; 2752 dma_addr_t map; 2753 2754 if (skb_padto(skb, ETH_ZLEN)) 2755 return NETDEV_TX_OK; 2756 2757 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) 2758 return NETDEV_TX_BUSY; 2759 2760 e = skge->tx_ring.to_use; 2761 td = e->desc; 2762 BUG_ON(td->control & BMU_OWN); 2763 e->skb = skb; 2764 len = skb_headlen(skb); 2765 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 2766 if (pci_dma_mapping_error(hw->pdev, map)) 2767 goto mapping_error; 2768 2769 dma_unmap_addr_set(e, mapaddr, map); 2770 dma_unmap_len_set(e, maplen, len); 2771 2772 td->dma_lo = lower_32_bits(map); 2773 td->dma_hi = upper_32_bits(map); 2774 2775 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2776 const int offset = skb_checksum_start_offset(skb); 2777 2778 /* This seems backwards, but it is what the sk98lin 2779 * does. Looks like hardware is wrong? 2780 */ 2781 if (ipip_hdr(skb)->protocol == IPPROTO_UDP && 2782 hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) 2783 control = BMU_TCP_CHECK; 2784 else 2785 control = BMU_UDP_CHECK; 2786 2787 td->csum_offs = 0; 2788 td->csum_start = offset; 2789 td->csum_write = offset + skb->csum_offset; 2790 } else 2791 control = BMU_CHECK; 2792 2793 if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */ 2794 control |= BMU_EOF | BMU_IRQ_EOF; 2795 else { 2796 struct skge_tx_desc *tf = td; 2797 2798 control |= BMU_STFWD; 2799 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2800 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2801 2802 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, 2803 skb_frag_size(frag), DMA_TO_DEVICE); 2804 if (dma_mapping_error(&hw->pdev->dev, map)) 2805 goto mapping_unwind; 2806 2807 e = e->next; 2808 e->skb = skb; 2809 tf = e->desc; 2810 BUG_ON(tf->control & BMU_OWN); 2811 2812 tf->dma_lo = lower_32_bits(map); 2813 tf->dma_hi = upper_32_bits(map); 2814 dma_unmap_addr_set(e, mapaddr, map); 2815 dma_unmap_len_set(e, maplen, skb_frag_size(frag)); 2816 2817 tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag); 2818 } 2819 tf->control |= BMU_EOF | BMU_IRQ_EOF; 2820 } 2821 /* Make sure all the descriptors written */ 2822 wmb(); 2823 td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; 2824 wmb(); 2825 2826 netdev_sent_queue(dev, skb->len); 2827 2828 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); 2829 2830 netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, 2831 "tx queued, slot %td, len %d\n", 2832 e - skge->tx_ring.start, skb->len); 2833 2834 skge->tx_ring.to_use = e->next; 2835 smp_wmb(); 2836 2837 if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { 2838 netdev_dbg(dev, "transmit queue full\n"); 2839 netif_stop_queue(dev); 2840 } 2841 2842 return NETDEV_TX_OK; 2843 2844 mapping_unwind: 2845 e = skge->tx_ring.to_use; 2846 pci_unmap_single(hw->pdev, 2847 dma_unmap_addr(e, mapaddr), 2848 dma_unmap_len(e, maplen), 2849 PCI_DMA_TODEVICE); 2850 while (i-- > 0) { 2851 e = e->next; 2852 pci_unmap_page(hw->pdev, 2853 dma_unmap_addr(e, mapaddr), 2854 dma_unmap_len(e, maplen), 2855 PCI_DMA_TODEVICE); 2856 } 2857 2858 mapping_error: 2859 if (net_ratelimit()) 2860 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); 2861 dev_kfree_skb_any(skb); 2862 return NETDEV_TX_OK; 2863 } 2864 2865 2866 /* Free resources associated with this reing element */ 2867 static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e, 2868 u32 control) 2869 { 2870 /* skb header vs. fragment */ 2871 if (control & BMU_STF) 2872 pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr), 2873 dma_unmap_len(e, maplen), 2874 PCI_DMA_TODEVICE); 2875 else 2876 pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr), 2877 dma_unmap_len(e, maplen), 2878 PCI_DMA_TODEVICE); 2879 } 2880 2881 /* Free all buffers in transmit ring */ 2882 static void skge_tx_clean(struct net_device *dev) 2883 { 2884 struct skge_port *skge = netdev_priv(dev); 2885 struct skge_element *e; 2886 2887 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { 2888 struct skge_tx_desc *td = e->desc; 2889 2890 skge_tx_unmap(skge->hw->pdev, e, td->control); 2891 2892 if (td->control & BMU_EOF) 2893 dev_kfree_skb(e->skb); 2894 td->control = 0; 2895 } 2896 2897 netdev_reset_queue(dev); 2898 skge->tx_ring.to_clean = e; 2899 } 2900 2901 static void skge_tx_timeout(struct net_device *dev) 2902 { 2903 struct skge_port *skge = netdev_priv(dev); 2904 2905 netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n"); 2906 2907 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); 2908 skge_tx_clean(dev); 2909 netif_wake_queue(dev); 2910 } 2911 2912 static int skge_change_mtu(struct net_device *dev, int new_mtu) 2913 { 2914 int err; 2915 2916 if (!netif_running(dev)) { 2917 dev->mtu = new_mtu; 2918 return 0; 2919 } 2920 2921 skge_down(dev); 2922 2923 dev->mtu = new_mtu; 2924 2925 err = skge_up(dev); 2926 if (err) 2927 dev_close(dev); 2928 2929 return err; 2930 } 2931 2932 static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; 2933 2934 static void genesis_add_filter(u8 filter[8], const u8 *addr) 2935 { 2936 u32 crc, bit; 2937 2938 crc = ether_crc_le(ETH_ALEN, addr); 2939 bit = ~crc & 0x3f; 2940 filter[bit/8] |= 1 << (bit%8); 2941 } 2942 2943 static void genesis_set_multicast(struct net_device *dev) 2944 { 2945 struct skge_port *skge = netdev_priv(dev); 2946 struct skge_hw *hw = skge->hw; 2947 int port = skge->port; 2948 struct netdev_hw_addr *ha; 2949 u32 mode; 2950 u8 filter[8]; 2951 2952 mode = xm_read32(hw, port, XM_MODE); 2953 mode |= XM_MD_ENA_HASH; 2954 if (dev->flags & IFF_PROMISC) 2955 mode |= XM_MD_ENA_PROM; 2956 else 2957 mode &= ~XM_MD_ENA_PROM; 2958 2959 if (dev->flags & IFF_ALLMULTI) 2960 memset(filter, 0xff, sizeof(filter)); 2961 else { 2962 memset(filter, 0, sizeof(filter)); 2963 2964 if (skge->flow_status == FLOW_STAT_REM_SEND || 2965 skge->flow_status == FLOW_STAT_SYMMETRIC) 2966 genesis_add_filter(filter, pause_mc_addr); 2967 2968 netdev_for_each_mc_addr(ha, dev) 2969 genesis_add_filter(filter, ha->addr); 2970 } 2971 2972 xm_write32(hw, port, XM_MODE, mode); 2973 xm_outhash(hw, port, XM_HSM, filter); 2974 } 2975 2976 static void yukon_add_filter(u8 filter[8], const u8 *addr) 2977 { 2978 u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f; 2979 filter[bit/8] |= 1 << (bit%8); 2980 } 2981 2982 static void yukon_set_multicast(struct net_device *dev) 2983 { 2984 struct skge_port *skge = netdev_priv(dev); 2985 struct skge_hw *hw = skge->hw; 2986 int port = skge->port; 2987 struct netdev_hw_addr *ha; 2988 int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || 2989 skge->flow_status == FLOW_STAT_SYMMETRIC); 2990 u16 reg; 2991 u8 filter[8]; 2992 2993 memset(filter, 0, sizeof(filter)); 2994 2995 reg = gma_read16(hw, port, GM_RX_CTRL); 2996 reg |= GM_RXCR_UCF_ENA; 2997 2998 if (dev->flags & IFF_PROMISC) /* promiscuous */ 2999 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 3000 else if (dev->flags & IFF_ALLMULTI) /* all multicast */ 3001 memset(filter, 0xff, sizeof(filter)); 3002 else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */ 3003 reg &= ~GM_RXCR_MCF_ENA; 3004 else { 3005 reg |= GM_RXCR_MCF_ENA; 3006 3007 if (rx_pause) 3008 yukon_add_filter(filter, pause_mc_addr); 3009 3010 netdev_for_each_mc_addr(ha, dev) 3011 yukon_add_filter(filter, ha->addr); 3012 } 3013 3014 3015 gma_write16(hw, port, GM_MC_ADDR_H1, 3016 (u16)filter[0] | ((u16)filter[1] << 8)); 3017 gma_write16(hw, port, GM_MC_ADDR_H2, 3018 (u16)filter[2] | ((u16)filter[3] << 8)); 3019 gma_write16(hw, port, GM_MC_ADDR_H3, 3020 (u16)filter[4] | ((u16)filter[5] << 8)); 3021 gma_write16(hw, port, GM_MC_ADDR_H4, 3022 (u16)filter[6] | ((u16)filter[7] << 8)); 3023 3024 gma_write16(hw, port, GM_RX_CTRL, reg); 3025 } 3026 3027 static inline u16 phy_length(const struct skge_hw *hw, u32 status) 3028 { 3029 if (is_genesis(hw)) 3030 return status >> XMR_FS_LEN_SHIFT; 3031 else 3032 return status >> GMR_FS_LEN_SHIFT; 3033 } 3034 3035 static inline int bad_phy_status(const struct skge_hw *hw, u32 status) 3036 { 3037 if (is_genesis(hw)) 3038 return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0; 3039 else 3040 return (status & GMR_FS_ANY_ERR) || 3041 (status & GMR_FS_RX_OK) == 0; 3042 } 3043 3044 static void skge_set_multicast(struct net_device *dev) 3045 { 3046 struct skge_port *skge = netdev_priv(dev); 3047 3048 if (is_genesis(skge->hw)) 3049 genesis_set_multicast(dev); 3050 else 3051 yukon_set_multicast(dev); 3052 3053 } 3054 3055 3056 /* Get receive buffer from descriptor. 3057 * Handles copy of small buffers and reallocation failures 3058 */ 3059 static struct sk_buff *skge_rx_get(struct net_device *dev, 3060 struct skge_element *e, 3061 u32 control, u32 status, u16 csum) 3062 { 3063 struct skge_port *skge = netdev_priv(dev); 3064 struct sk_buff *skb; 3065 u16 len = control & BMU_BBC; 3066 3067 netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev, 3068 "rx slot %td status 0x%x len %d\n", 3069 e - skge->rx_ring.start, status, len); 3070 3071 if (len > skge->rx_buf_size) 3072 goto error; 3073 3074 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) 3075 goto error; 3076 3077 if (bad_phy_status(skge->hw, status)) 3078 goto error; 3079 3080 if (phy_length(skge->hw, status) != len) 3081 goto error; 3082 3083 if (len < RX_COPY_THRESHOLD) { 3084 skb = netdev_alloc_skb_ip_align(dev, len); 3085 if (!skb) 3086 goto resubmit; 3087 3088 pci_dma_sync_single_for_cpu(skge->hw->pdev, 3089 dma_unmap_addr(e, mapaddr), 3090 dma_unmap_len(e, maplen), 3091 PCI_DMA_FROMDEVICE); 3092 skb_copy_from_linear_data(e->skb, skb->data, len); 3093 pci_dma_sync_single_for_device(skge->hw->pdev, 3094 dma_unmap_addr(e, mapaddr), 3095 dma_unmap_len(e, maplen), 3096 PCI_DMA_FROMDEVICE); 3097 skge_rx_reuse(e, skge->rx_buf_size); 3098 } else { 3099 struct skge_element ee; 3100 struct sk_buff *nskb; 3101 3102 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); 3103 if (!nskb) 3104 goto resubmit; 3105 3106 ee = *e; 3107 3108 skb = ee.skb; 3109 prefetch(skb->data); 3110 3111 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { 3112 dev_kfree_skb(nskb); 3113 goto resubmit; 3114 } 3115 3116 pci_unmap_single(skge->hw->pdev, 3117 dma_unmap_addr(&ee, mapaddr), 3118 dma_unmap_len(&ee, maplen), 3119 PCI_DMA_FROMDEVICE); 3120 } 3121 3122 skb_put(skb, len); 3123 3124 if (dev->features & NETIF_F_RXCSUM) { 3125 skb->csum = csum; 3126 skb->ip_summed = CHECKSUM_COMPLETE; 3127 } 3128 3129 skb->protocol = eth_type_trans(skb, dev); 3130 3131 return skb; 3132 error: 3133 3134 netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev, 3135 "rx err, slot %td control 0x%x status 0x%x\n", 3136 e - skge->rx_ring.start, control, status); 3137 3138 if (is_genesis(skge->hw)) { 3139 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) 3140 dev->stats.rx_length_errors++; 3141 if (status & XMR_FS_FRA_ERR) 3142 dev->stats.rx_frame_errors++; 3143 if (status & XMR_FS_FCS_ERR) 3144 dev->stats.rx_crc_errors++; 3145 } else { 3146 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) 3147 dev->stats.rx_length_errors++; 3148 if (status & GMR_FS_FRAGMENT) 3149 dev->stats.rx_frame_errors++; 3150 if (status & GMR_FS_CRC_ERR) 3151 dev->stats.rx_crc_errors++; 3152 } 3153 3154 resubmit: 3155 skge_rx_reuse(e, skge->rx_buf_size); 3156 return NULL; 3157 } 3158 3159 /* Free all buffers in Tx ring which are no longer owned by device */ 3160 static void skge_tx_done(struct net_device *dev) 3161 { 3162 struct skge_port *skge = netdev_priv(dev); 3163 struct skge_ring *ring = &skge->tx_ring; 3164 struct skge_element *e; 3165 unsigned int bytes_compl = 0, pkts_compl = 0; 3166 3167 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); 3168 3169 for (e = ring->to_clean; e != ring->to_use; e = e->next) { 3170 u32 control = ((const struct skge_tx_desc *) e->desc)->control; 3171 3172 if (control & BMU_OWN) 3173 break; 3174 3175 skge_tx_unmap(skge->hw->pdev, e, control); 3176 3177 if (control & BMU_EOF) { 3178 netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, 3179 "tx done slot %td\n", 3180 e - skge->tx_ring.start); 3181 3182 pkts_compl++; 3183 bytes_compl += e->skb->len; 3184 3185 dev_consume_skb_any(e->skb); 3186 } 3187 } 3188 netdev_completed_queue(dev, pkts_compl, bytes_compl); 3189 skge->tx_ring.to_clean = e; 3190 3191 /* Can run lockless until we need to synchronize to restart queue. */ 3192 smp_mb(); 3193 3194 if (unlikely(netif_queue_stopped(dev) && 3195 skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { 3196 netif_tx_lock(dev); 3197 if (unlikely(netif_queue_stopped(dev) && 3198 skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { 3199 netif_wake_queue(dev); 3200 3201 } 3202 netif_tx_unlock(dev); 3203 } 3204 } 3205 3206 static int skge_poll(struct napi_struct *napi, int budget) 3207 { 3208 struct skge_port *skge = container_of(napi, struct skge_port, napi); 3209 struct net_device *dev = skge->netdev; 3210 struct skge_hw *hw = skge->hw; 3211 struct skge_ring *ring = &skge->rx_ring; 3212 struct skge_element *e; 3213 int work_done = 0; 3214 3215 skge_tx_done(dev); 3216 3217 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); 3218 3219 for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) { 3220 struct skge_rx_desc *rd = e->desc; 3221 struct sk_buff *skb; 3222 u32 control; 3223 3224 rmb(); 3225 control = rd->control; 3226 if (control & BMU_OWN) 3227 break; 3228 3229 skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); 3230 if (likely(skb)) { 3231 napi_gro_receive(napi, skb); 3232 ++work_done; 3233 } 3234 } 3235 ring->to_clean = e; 3236 3237 /* restart receiver */ 3238 wmb(); 3239 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); 3240 3241 if (work_done < budget && napi_complete_done(napi, work_done)) { 3242 unsigned long flags; 3243 3244 spin_lock_irqsave(&hw->hw_lock, flags); 3245 hw->intr_mask |= napimask[skge->port]; 3246 skge_write32(hw, B0_IMSK, hw->intr_mask); 3247 skge_read32(hw, B0_IMSK); 3248 spin_unlock_irqrestore(&hw->hw_lock, flags); 3249 } 3250 3251 return work_done; 3252 } 3253 3254 /* Parity errors seem to happen when Genesis is connected to a switch 3255 * with no other ports present. Heartbeat error?? 3256 */ 3257 static void skge_mac_parity(struct skge_hw *hw, int port) 3258 { 3259 struct net_device *dev = hw->dev[port]; 3260 3261 ++dev->stats.tx_heartbeat_errors; 3262 3263 if (is_genesis(hw)) 3264 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), 3265 MFF_CLR_PERR); 3266 else 3267 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ 3268 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), 3269 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) 3270 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); 3271 } 3272 3273 static void skge_mac_intr(struct skge_hw *hw, int port) 3274 { 3275 if (is_genesis(hw)) 3276 genesis_mac_intr(hw, port); 3277 else 3278 yukon_mac_intr(hw, port); 3279 } 3280 3281 /* Handle device specific framing and timeout interrupts */ 3282 static void skge_error_irq(struct skge_hw *hw) 3283 { 3284 struct pci_dev *pdev = hw->pdev; 3285 u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); 3286 3287 if (is_genesis(hw)) { 3288 /* clear xmac errors */ 3289 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) 3290 skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT); 3291 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) 3292 skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT); 3293 } else { 3294 /* Timestamp (unused) overflow */ 3295 if (hwstatus & IS_IRQ_TIST_OV) 3296 skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3297 } 3298 3299 if (hwstatus & IS_RAM_RD_PAR) { 3300 dev_err(&pdev->dev, "Ram read data parity error\n"); 3301 skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); 3302 } 3303 3304 if (hwstatus & IS_RAM_WR_PAR) { 3305 dev_err(&pdev->dev, "Ram write data parity error\n"); 3306 skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); 3307 } 3308 3309 if (hwstatus & IS_M1_PAR_ERR) 3310 skge_mac_parity(hw, 0); 3311 3312 if (hwstatus & IS_M2_PAR_ERR) 3313 skge_mac_parity(hw, 1); 3314 3315 if (hwstatus & IS_R1_PAR_ERR) { 3316 dev_err(&pdev->dev, "%s: receive queue parity error\n", 3317 hw->dev[0]->name); 3318 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); 3319 } 3320 3321 if (hwstatus & IS_R2_PAR_ERR) { 3322 dev_err(&pdev->dev, "%s: receive queue parity error\n", 3323 hw->dev[1]->name); 3324 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); 3325 } 3326 3327 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { 3328 u16 pci_status, pci_cmd; 3329 3330 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 3331 pci_read_config_word(pdev, PCI_STATUS, &pci_status); 3332 3333 dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n", 3334 pci_cmd, pci_status); 3335 3336 /* Write the error bits back to clear them. */ 3337 pci_status &= PCI_STATUS_ERROR_BITS; 3338 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3339 pci_write_config_word(pdev, PCI_COMMAND, 3340 pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); 3341 pci_write_config_word(pdev, PCI_STATUS, pci_status); 3342 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3343 3344 /* if error still set then just ignore it */ 3345 hwstatus = skge_read32(hw, B0_HWE_ISRC); 3346 if (hwstatus & IS_IRQ_STAT) { 3347 dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n"); 3348 hw->intr_mask &= ~IS_HW_ERR; 3349 } 3350 } 3351 } 3352 3353 /* 3354 * Interrupt from PHY are handled in tasklet (softirq) 3355 * because accessing phy registers requires spin wait which might 3356 * cause excess interrupt latency. 3357 */ 3358 static void skge_extirq(unsigned long arg) 3359 { 3360 struct skge_hw *hw = (struct skge_hw *) arg; 3361 int port; 3362 3363 for (port = 0; port < hw->ports; port++) { 3364 struct net_device *dev = hw->dev[port]; 3365 3366 if (netif_running(dev)) { 3367 struct skge_port *skge = netdev_priv(dev); 3368 3369 spin_lock(&hw->phy_lock); 3370 if (!is_genesis(hw)) 3371 yukon_phy_intr(skge); 3372 else if (hw->phy_type == SK_PHY_BCOM) 3373 bcom_phy_intr(skge); 3374 spin_unlock(&hw->phy_lock); 3375 } 3376 } 3377 3378 spin_lock_irq(&hw->hw_lock); 3379 hw->intr_mask |= IS_EXT_REG; 3380 skge_write32(hw, B0_IMSK, hw->intr_mask); 3381 skge_read32(hw, B0_IMSK); 3382 spin_unlock_irq(&hw->hw_lock); 3383 } 3384 3385 static irqreturn_t skge_intr(int irq, void *dev_id) 3386 { 3387 struct skge_hw *hw = dev_id; 3388 u32 status; 3389 int handled = 0; 3390 3391 spin_lock(&hw->hw_lock); 3392 /* Reading this register masks IRQ */ 3393 status = skge_read32(hw, B0_SP_ISRC); 3394 if (status == 0 || status == ~0) 3395 goto out; 3396 3397 handled = 1; 3398 status &= hw->intr_mask; 3399 if (status & IS_EXT_REG) { 3400 hw->intr_mask &= ~IS_EXT_REG; 3401 tasklet_schedule(&hw->phy_task); 3402 } 3403 3404 if (status & (IS_XA1_F|IS_R1_F)) { 3405 struct skge_port *skge = netdev_priv(hw->dev[0]); 3406 hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); 3407 napi_schedule(&skge->napi); 3408 } 3409 3410 if (status & IS_PA_TO_TX1) 3411 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); 3412 3413 if (status & IS_PA_TO_RX1) { 3414 ++hw->dev[0]->stats.rx_over_errors; 3415 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); 3416 } 3417 3418 3419 if (status & IS_MAC1) 3420 skge_mac_intr(hw, 0); 3421 3422 if (hw->dev[1]) { 3423 struct skge_port *skge = netdev_priv(hw->dev[1]); 3424 3425 if (status & (IS_XA2_F|IS_R2_F)) { 3426 hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); 3427 napi_schedule(&skge->napi); 3428 } 3429 3430 if (status & IS_PA_TO_RX2) { 3431 ++hw->dev[1]->stats.rx_over_errors; 3432 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); 3433 } 3434 3435 if (status & IS_PA_TO_TX2) 3436 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2); 3437 3438 if (status & IS_MAC2) 3439 skge_mac_intr(hw, 1); 3440 } 3441 3442 if (status & IS_HW_ERR) 3443 skge_error_irq(hw); 3444 out: 3445 skge_write32(hw, B0_IMSK, hw->intr_mask); 3446 skge_read32(hw, B0_IMSK); 3447 spin_unlock(&hw->hw_lock); 3448 3449 return IRQ_RETVAL(handled); 3450 } 3451 3452 #ifdef CONFIG_NET_POLL_CONTROLLER 3453 static void skge_netpoll(struct net_device *dev) 3454 { 3455 struct skge_port *skge = netdev_priv(dev); 3456 3457 disable_irq(dev->irq); 3458 skge_intr(dev->irq, skge->hw); 3459 enable_irq(dev->irq); 3460 } 3461 #endif 3462 3463 static int skge_set_mac_address(struct net_device *dev, void *p) 3464 { 3465 struct skge_port *skge = netdev_priv(dev); 3466 struct skge_hw *hw = skge->hw; 3467 unsigned port = skge->port; 3468 const struct sockaddr *addr = p; 3469 u16 ctrl; 3470 3471 if (!is_valid_ether_addr(addr->sa_data)) 3472 return -EADDRNOTAVAIL; 3473 3474 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 3475 3476 if (!netif_running(dev)) { 3477 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); 3478 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); 3479 } else { 3480 /* disable Rx */ 3481 spin_lock_bh(&hw->phy_lock); 3482 ctrl = gma_read16(hw, port, GM_GP_CTRL); 3483 gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); 3484 3485 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); 3486 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); 3487 3488 if (is_genesis(hw)) 3489 xm_outaddr(hw, port, XM_SA, dev->dev_addr); 3490 else { 3491 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); 3492 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); 3493 } 3494 3495 gma_write16(hw, port, GM_GP_CTRL, ctrl); 3496 spin_unlock_bh(&hw->phy_lock); 3497 } 3498 3499 return 0; 3500 } 3501 3502 static const struct { 3503 u8 id; 3504 const char *name; 3505 } skge_chips[] = { 3506 { CHIP_ID_GENESIS, "Genesis" }, 3507 { CHIP_ID_YUKON, "Yukon" }, 3508 { CHIP_ID_YUKON_LITE, "Yukon-Lite"}, 3509 { CHIP_ID_YUKON_LP, "Yukon-LP"}, 3510 }; 3511 3512 static const char *skge_board_name(const struct skge_hw *hw) 3513 { 3514 int i; 3515 static char buf[16]; 3516 3517 for (i = 0; i < ARRAY_SIZE(skge_chips); i++) 3518 if (skge_chips[i].id == hw->chip_id) 3519 return skge_chips[i].name; 3520 3521 snprintf(buf, sizeof(buf), "chipid 0x%x", hw->chip_id); 3522 return buf; 3523 } 3524 3525 3526 /* 3527 * Setup the board data structure, but don't bring up 3528 * the port(s) 3529 */ 3530 static int skge_reset(struct skge_hw *hw) 3531 { 3532 u32 reg; 3533 u16 ctst, pci_status; 3534 u8 t8, mac_cfg, pmd_type; 3535 int i; 3536 3537 ctst = skge_read16(hw, B0_CTST); 3538 3539 /* do a SW reset */ 3540 skge_write8(hw, B0_CTST, CS_RST_SET); 3541 skge_write8(hw, B0_CTST, CS_RST_CLR); 3542 3543 /* clear PCI errors, if any */ 3544 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3545 skge_write8(hw, B2_TST_CTRL2, 0); 3546 3547 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); 3548 pci_write_config_word(hw->pdev, PCI_STATUS, 3549 pci_status | PCI_STATUS_ERROR_BITS); 3550 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3551 skge_write8(hw, B0_CTST, CS_MRST_CLR); 3552 3553 /* restore CLK_RUN bits (for Yukon-Lite) */ 3554 skge_write16(hw, B0_CTST, 3555 ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA)); 3556 3557 hw->chip_id = skge_read8(hw, B2_CHIP_ID); 3558 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; 3559 pmd_type = skge_read8(hw, B2_PMD_TYP); 3560 hw->copper = (pmd_type == 'T' || pmd_type == '1'); 3561 3562 switch (hw->chip_id) { 3563 case CHIP_ID_GENESIS: 3564 #ifdef CONFIG_SKGE_GENESIS 3565 switch (hw->phy_type) { 3566 case SK_PHY_XMAC: 3567 hw->phy_addr = PHY_ADDR_XMAC; 3568 break; 3569 case SK_PHY_BCOM: 3570 hw->phy_addr = PHY_ADDR_BCOM; 3571 break; 3572 default: 3573 dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n", 3574 hw->phy_type); 3575 return -EOPNOTSUPP; 3576 } 3577 break; 3578 #else 3579 dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n"); 3580 return -EOPNOTSUPP; 3581 #endif 3582 3583 case CHIP_ID_YUKON: 3584 case CHIP_ID_YUKON_LITE: 3585 case CHIP_ID_YUKON_LP: 3586 if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S') 3587 hw->copper = 1; 3588 3589 hw->phy_addr = PHY_ADDR_MARV; 3590 break; 3591 3592 default: 3593 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", 3594 hw->chip_id); 3595 return -EOPNOTSUPP; 3596 } 3597 3598 mac_cfg = skge_read8(hw, B2_MAC_CFG); 3599 hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2; 3600 hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4; 3601 3602 /* read the adapters RAM size */ 3603 t8 = skge_read8(hw, B2_E_0); 3604 if (is_genesis(hw)) { 3605 if (t8 == 3) { 3606 /* special case: 4 x 64k x 36, offset = 0x80000 */ 3607 hw->ram_size = 0x100000; 3608 hw->ram_offset = 0x80000; 3609 } else 3610 hw->ram_size = t8 * 512; 3611 } else if (t8 == 0) 3612 hw->ram_size = 0x20000; 3613 else 3614 hw->ram_size = t8 * 4096; 3615 3616 hw->intr_mask = IS_HW_ERR; 3617 3618 /* Use PHY IRQ for all but fiber based Genesis board */ 3619 if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)) 3620 hw->intr_mask |= IS_EXT_REG; 3621 3622 if (is_genesis(hw)) 3623 genesis_init(hw); 3624 else { 3625 /* switch power to VCC (WA for VAUX problem) */ 3626 skge_write8(hw, B0_POWER_CTRL, 3627 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 3628 3629 /* avoid boards with stuck Hardware error bits */ 3630 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && 3631 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { 3632 dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n"); 3633 hw->intr_mask &= ~IS_HW_ERR; 3634 } 3635 3636 /* Clear PHY COMA */ 3637 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3638 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®); 3639 reg &= ~PCI_PHY_COMA; 3640 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg); 3641 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3642 3643 3644 for (i = 0; i < hw->ports; i++) { 3645 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 3646 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); 3647 } 3648 } 3649 3650 /* turn off hardware timer (unused) */ 3651 skge_write8(hw, B2_TI_CTRL, TIM_STOP); 3652 skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); 3653 skge_write8(hw, B0_LED, LED_STAT_ON); 3654 3655 /* enable the Tx Arbiters */ 3656 for (i = 0; i < hw->ports; i++) 3657 skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); 3658 3659 /* Initialize ram interface */ 3660 skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); 3661 3662 skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53); 3663 skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53); 3664 skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53); 3665 skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53); 3666 skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53); 3667 skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53); 3668 skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53); 3669 skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53); 3670 skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53); 3671 skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53); 3672 skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53); 3673 skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53); 3674 3675 skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK); 3676 3677 /* Set interrupt moderation for Transmit only 3678 * Receive interrupts avoided by NAPI 3679 */ 3680 skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F); 3681 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); 3682 skge_write32(hw, B2_IRQM_CTRL, TIM_START); 3683 3684 /* Leave irq disabled until first port is brought up. */ 3685 skge_write32(hw, B0_IMSK, 0); 3686 3687 for (i = 0; i < hw->ports; i++) { 3688 if (is_genesis(hw)) 3689 genesis_reset(hw, i); 3690 else 3691 yukon_reset(hw, i); 3692 } 3693 3694 return 0; 3695 } 3696 3697 3698 #ifdef CONFIG_SKGE_DEBUG 3699 3700 static struct dentry *skge_debug; 3701 3702 static int skge_debug_show(struct seq_file *seq, void *v) 3703 { 3704 struct net_device *dev = seq->private; 3705 const struct skge_port *skge = netdev_priv(dev); 3706 const struct skge_hw *hw = skge->hw; 3707 const struct skge_element *e; 3708 3709 if (!netif_running(dev)) 3710 return -ENETDOWN; 3711 3712 seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC), 3713 skge_read32(hw, B0_IMSK)); 3714 3715 seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring)); 3716 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { 3717 const struct skge_tx_desc *t = e->desc; 3718 seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n", 3719 t->control, t->dma_hi, t->dma_lo, t->status, 3720 t->csum_offs, t->csum_write, t->csum_start); 3721 } 3722 3723 seq_puts(seq, "\nRx Ring:\n"); 3724 for (e = skge->rx_ring.to_clean; ; e = e->next) { 3725 const struct skge_rx_desc *r = e->desc; 3726 3727 if (r->control & BMU_OWN) 3728 break; 3729 3730 seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n", 3731 r->control, r->dma_hi, r->dma_lo, r->status, 3732 r->timestamp, r->csum1, r->csum1_start); 3733 } 3734 3735 return 0; 3736 } 3737 DEFINE_SHOW_ATTRIBUTE(skge_debug); 3738 3739 /* 3740 * Use network device events to create/remove/rename 3741 * debugfs file entries 3742 */ 3743 static int skge_device_event(struct notifier_block *unused, 3744 unsigned long event, void *ptr) 3745 { 3746 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3747 struct skge_port *skge; 3748 struct dentry *d; 3749 3750 if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug) 3751 goto done; 3752 3753 skge = netdev_priv(dev); 3754 switch (event) { 3755 case NETDEV_CHANGENAME: 3756 if (skge->debugfs) { 3757 d = debugfs_rename(skge_debug, skge->debugfs, 3758 skge_debug, dev->name); 3759 if (d) 3760 skge->debugfs = d; 3761 else { 3762 netdev_info(dev, "rename failed\n"); 3763 debugfs_remove(skge->debugfs); 3764 } 3765 } 3766 break; 3767 3768 case NETDEV_GOING_DOWN: 3769 if (skge->debugfs) { 3770 debugfs_remove(skge->debugfs); 3771 skge->debugfs = NULL; 3772 } 3773 break; 3774 3775 case NETDEV_UP: 3776 d = debugfs_create_file(dev->name, 0444, 3777 skge_debug, dev, 3778 &skge_debug_fops); 3779 if (!d || IS_ERR(d)) 3780 netdev_info(dev, "debugfs create failed\n"); 3781 else 3782 skge->debugfs = d; 3783 break; 3784 } 3785 3786 done: 3787 return NOTIFY_DONE; 3788 } 3789 3790 static struct notifier_block skge_notifier = { 3791 .notifier_call = skge_device_event, 3792 }; 3793 3794 3795 static __init void skge_debug_init(void) 3796 { 3797 struct dentry *ent; 3798 3799 ent = debugfs_create_dir("skge", NULL); 3800 if (!ent || IS_ERR(ent)) { 3801 pr_info("debugfs create directory failed\n"); 3802 return; 3803 } 3804 3805 skge_debug = ent; 3806 register_netdevice_notifier(&skge_notifier); 3807 } 3808 3809 static __exit void skge_debug_cleanup(void) 3810 { 3811 if (skge_debug) { 3812 unregister_netdevice_notifier(&skge_notifier); 3813 debugfs_remove(skge_debug); 3814 skge_debug = NULL; 3815 } 3816 } 3817 3818 #else 3819 #define skge_debug_init() 3820 #define skge_debug_cleanup() 3821 #endif 3822 3823 static const struct net_device_ops skge_netdev_ops = { 3824 .ndo_open = skge_up, 3825 .ndo_stop = skge_down, 3826 .ndo_start_xmit = skge_xmit_frame, 3827 .ndo_do_ioctl = skge_ioctl, 3828 .ndo_get_stats = skge_get_stats, 3829 .ndo_tx_timeout = skge_tx_timeout, 3830 .ndo_change_mtu = skge_change_mtu, 3831 .ndo_validate_addr = eth_validate_addr, 3832 .ndo_set_rx_mode = skge_set_multicast, 3833 .ndo_set_mac_address = skge_set_mac_address, 3834 #ifdef CONFIG_NET_POLL_CONTROLLER 3835 .ndo_poll_controller = skge_netpoll, 3836 #endif 3837 }; 3838 3839 3840 /* Initialize network device */ 3841 static struct net_device *skge_devinit(struct skge_hw *hw, int port, 3842 int highmem) 3843 { 3844 struct skge_port *skge; 3845 struct net_device *dev = alloc_etherdev(sizeof(*skge)); 3846 3847 if (!dev) 3848 return NULL; 3849 3850 SET_NETDEV_DEV(dev, &hw->pdev->dev); 3851 dev->netdev_ops = &skge_netdev_ops; 3852 dev->ethtool_ops = &skge_ethtool_ops; 3853 dev->watchdog_timeo = TX_WATCHDOG; 3854 dev->irq = hw->pdev->irq; 3855 3856 /* MTU range: 60 - 9000 */ 3857 dev->min_mtu = ETH_ZLEN; 3858 dev->max_mtu = ETH_JUMBO_MTU; 3859 3860 if (highmem) 3861 dev->features |= NETIF_F_HIGHDMA; 3862 3863 skge = netdev_priv(dev); 3864 netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); 3865 skge->netdev = dev; 3866 skge->hw = hw; 3867 skge->msg_enable = netif_msg_init(debug, default_msg); 3868 3869 skge->tx_ring.count = DEFAULT_TX_RING_SIZE; 3870 skge->rx_ring.count = DEFAULT_RX_RING_SIZE; 3871 3872 /* Auto speed and flow control */ 3873 skge->autoneg = AUTONEG_ENABLE; 3874 skge->flow_control = FLOW_MODE_SYM_OR_REM; 3875 skge->duplex = -1; 3876 skge->speed = -1; 3877 skge->advertising = skge_supported_modes(hw); 3878 3879 if (device_can_wakeup(&hw->pdev->dev)) { 3880 skge->wol = wol_supported(hw) & WAKE_MAGIC; 3881 device_set_wakeup_enable(&hw->pdev->dev, skge->wol); 3882 } 3883 3884 hw->dev[port] = dev; 3885 3886 skge->port = port; 3887 3888 /* Only used for Genesis XMAC */ 3889 if (is_genesis(hw)) 3890 timer_setup(&skge->link_timer, xm_link_timer, 0); 3891 else { 3892 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 3893 NETIF_F_RXCSUM; 3894 dev->features |= dev->hw_features; 3895 } 3896 3897 /* read the mac address */ 3898 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 3899 3900 return dev; 3901 } 3902 3903 static void skge_show_addr(struct net_device *dev) 3904 { 3905 const struct skge_port *skge = netdev_priv(dev); 3906 3907 netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); 3908 } 3909 3910 static int only_32bit_dma; 3911 3912 static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3913 { 3914 struct net_device *dev, *dev1; 3915 struct skge_hw *hw; 3916 int err, using_dac = 0; 3917 3918 err = pci_enable_device(pdev); 3919 if (err) { 3920 dev_err(&pdev->dev, "cannot enable PCI device\n"); 3921 goto err_out; 3922 } 3923 3924 err = pci_request_regions(pdev, DRV_NAME); 3925 if (err) { 3926 dev_err(&pdev->dev, "cannot obtain PCI resources\n"); 3927 goto err_out_disable_pdev; 3928 } 3929 3930 pci_set_master(pdev); 3931 3932 if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3933 using_dac = 1; 3934 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3935 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 3936 using_dac = 0; 3937 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3938 } 3939 3940 if (err) { 3941 dev_err(&pdev->dev, "no usable DMA configuration\n"); 3942 goto err_out_free_regions; 3943 } 3944 3945 #ifdef __BIG_ENDIAN 3946 /* byte swap descriptors in hardware */ 3947 { 3948 u32 reg; 3949 3950 pci_read_config_dword(pdev, PCI_DEV_REG2, ®); 3951 reg |= PCI_REV_DESC; 3952 pci_write_config_dword(pdev, PCI_DEV_REG2, reg); 3953 } 3954 #endif 3955 3956 err = -ENOMEM; 3957 /* space for skge@pci:0000:04:00.0 */ 3958 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") 3959 + strlen(pci_name(pdev)) + 1, GFP_KERNEL); 3960 if (!hw) 3961 goto err_out_free_regions; 3962 3963 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); 3964 3965 hw->pdev = pdev; 3966 spin_lock_init(&hw->hw_lock); 3967 spin_lock_init(&hw->phy_lock); 3968 tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw); 3969 3970 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3971 if (!hw->regs) { 3972 dev_err(&pdev->dev, "cannot map device registers\n"); 3973 goto err_out_free_hw; 3974 } 3975 3976 err = skge_reset(hw); 3977 if (err) 3978 goto err_out_iounmap; 3979 3980 pr_info("%s addr 0x%llx irq %d chip %s rev %d\n", 3981 DRV_VERSION, 3982 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, 3983 skge_board_name(hw), hw->chip_rev); 3984 3985 dev = skge_devinit(hw, 0, using_dac); 3986 if (!dev) { 3987 err = -ENOMEM; 3988 goto err_out_led_off; 3989 } 3990 3991 /* Some motherboards are broken and has zero in ROM. */ 3992 if (!is_valid_ether_addr(dev->dev_addr)) 3993 dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n"); 3994 3995 err = register_netdev(dev); 3996 if (err) { 3997 dev_err(&pdev->dev, "cannot register net device\n"); 3998 goto err_out_free_netdev; 3999 } 4000 4001 skge_show_addr(dev); 4002 4003 if (hw->ports > 1) { 4004 dev1 = skge_devinit(hw, 1, using_dac); 4005 if (!dev1) { 4006 err = -ENOMEM; 4007 goto err_out_unregister; 4008 } 4009 4010 err = register_netdev(dev1); 4011 if (err) { 4012 dev_err(&pdev->dev, "cannot register second net device\n"); 4013 goto err_out_free_dev1; 4014 } 4015 4016 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, 4017 hw->irq_name, hw); 4018 if (err) { 4019 dev_err(&pdev->dev, "cannot assign irq %d\n", 4020 pdev->irq); 4021 goto err_out_unregister_dev1; 4022 } 4023 4024 skge_show_addr(dev1); 4025 } 4026 pci_set_drvdata(pdev, hw); 4027 4028 return 0; 4029 4030 err_out_unregister_dev1: 4031 unregister_netdev(dev1); 4032 err_out_free_dev1: 4033 free_netdev(dev1); 4034 err_out_unregister: 4035 unregister_netdev(dev); 4036 err_out_free_netdev: 4037 free_netdev(dev); 4038 err_out_led_off: 4039 skge_write16(hw, B0_LED, LED_STAT_OFF); 4040 err_out_iounmap: 4041 iounmap(hw->regs); 4042 err_out_free_hw: 4043 kfree(hw); 4044 err_out_free_regions: 4045 pci_release_regions(pdev); 4046 err_out_disable_pdev: 4047 pci_disable_device(pdev); 4048 err_out: 4049 return err; 4050 } 4051 4052 static void skge_remove(struct pci_dev *pdev) 4053 { 4054 struct skge_hw *hw = pci_get_drvdata(pdev); 4055 struct net_device *dev0, *dev1; 4056 4057 if (!hw) 4058 return; 4059 4060 dev1 = hw->dev[1]; 4061 if (dev1) 4062 unregister_netdev(dev1); 4063 dev0 = hw->dev[0]; 4064 unregister_netdev(dev0); 4065 4066 tasklet_kill(&hw->phy_task); 4067 4068 spin_lock_irq(&hw->hw_lock); 4069 hw->intr_mask = 0; 4070 4071 if (hw->ports > 1) { 4072 skge_write32(hw, B0_IMSK, 0); 4073 skge_read32(hw, B0_IMSK); 4074 } 4075 spin_unlock_irq(&hw->hw_lock); 4076 4077 skge_write16(hw, B0_LED, LED_STAT_OFF); 4078 skge_write8(hw, B0_CTST, CS_RST_SET); 4079 4080 if (hw->ports > 1) 4081 free_irq(pdev->irq, hw); 4082 pci_release_regions(pdev); 4083 pci_disable_device(pdev); 4084 if (dev1) 4085 free_netdev(dev1); 4086 free_netdev(dev0); 4087 4088 iounmap(hw->regs); 4089 kfree(hw); 4090 } 4091 4092 #ifdef CONFIG_PM_SLEEP 4093 static int skge_suspend(struct device *dev) 4094 { 4095 struct pci_dev *pdev = to_pci_dev(dev); 4096 struct skge_hw *hw = pci_get_drvdata(pdev); 4097 int i; 4098 4099 if (!hw) 4100 return 0; 4101 4102 for (i = 0; i < hw->ports; i++) { 4103 struct net_device *dev = hw->dev[i]; 4104 struct skge_port *skge = netdev_priv(dev); 4105 4106 if (netif_running(dev)) 4107 skge_down(dev); 4108 4109 if (skge->wol) 4110 skge_wol_init(skge); 4111 } 4112 4113 skge_write32(hw, B0_IMSK, 0); 4114 4115 return 0; 4116 } 4117 4118 static int skge_resume(struct device *dev) 4119 { 4120 struct pci_dev *pdev = to_pci_dev(dev); 4121 struct skge_hw *hw = pci_get_drvdata(pdev); 4122 int i, err; 4123 4124 if (!hw) 4125 return 0; 4126 4127 err = skge_reset(hw); 4128 if (err) 4129 goto out; 4130 4131 for (i = 0; i < hw->ports; i++) { 4132 struct net_device *dev = hw->dev[i]; 4133 4134 if (netif_running(dev)) { 4135 err = skge_up(dev); 4136 4137 if (err) { 4138 netdev_err(dev, "could not up: %d\n", err); 4139 dev_close(dev); 4140 goto out; 4141 } 4142 } 4143 } 4144 out: 4145 return err; 4146 } 4147 4148 static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume); 4149 #define SKGE_PM_OPS (&skge_pm_ops) 4150 4151 #else 4152 4153 #define SKGE_PM_OPS NULL 4154 #endif /* CONFIG_PM_SLEEP */ 4155 4156 static void skge_shutdown(struct pci_dev *pdev) 4157 { 4158 struct skge_hw *hw = pci_get_drvdata(pdev); 4159 int i; 4160 4161 if (!hw) 4162 return; 4163 4164 for (i = 0; i < hw->ports; i++) { 4165 struct net_device *dev = hw->dev[i]; 4166 struct skge_port *skge = netdev_priv(dev); 4167 4168 if (skge->wol) 4169 skge_wol_init(skge); 4170 } 4171 4172 pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); 4173 pci_set_power_state(pdev, PCI_D3hot); 4174 } 4175 4176 static struct pci_driver skge_driver = { 4177 .name = DRV_NAME, 4178 .id_table = skge_id_table, 4179 .probe = skge_probe, 4180 .remove = skge_remove, 4181 .shutdown = skge_shutdown, 4182 .driver.pm = SKGE_PM_OPS, 4183 }; 4184 4185 static const struct dmi_system_id skge_32bit_dma_boards[] = { 4186 { 4187 .ident = "Gigabyte nForce boards", 4188 .matches = { 4189 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"), 4190 DMI_MATCH(DMI_BOARD_NAME, "nForce"), 4191 }, 4192 }, 4193 { 4194 .ident = "ASUS P5NSLI", 4195 .matches = { 4196 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), 4197 DMI_MATCH(DMI_BOARD_NAME, "P5NSLI") 4198 }, 4199 }, 4200 { 4201 .ident = "FUJITSU SIEMENS A8NE-FM", 4202 .matches = { 4203 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), 4204 DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM") 4205 }, 4206 }, 4207 {} 4208 }; 4209 4210 static int __init skge_init_module(void) 4211 { 4212 if (dmi_check_system(skge_32bit_dma_boards)) 4213 only_32bit_dma = 1; 4214 skge_debug_init(); 4215 return pci_register_driver(&skge_driver); 4216 } 4217 4218 static void __exit skge_cleanup_module(void) 4219 { 4220 pci_unregister_driver(&skge_driver); 4221 skge_debug_cleanup(); 4222 } 4223 4224 module_init(skge_init_module); 4225 module_exit(skge_cleanup_module); 4226