1 /* 2 * SuperH Ethernet device driver 3 * 4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu 5 * Copyright (C) 2008-2012 Renesas Solutions Corp. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * The full GNU General Public License is included in this distribution in 20 * the file called "COPYING". 21 */ 22 23 #include <linux/init.h> 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/spinlock.h> 27 #include <linux/interrupt.h> 28 #include <linux/dma-mapping.h> 29 #include <linux/etherdevice.h> 30 #include <linux/delay.h> 31 #include <linux/platform_device.h> 32 #include <linux/mdio-bitbang.h> 33 #include <linux/netdevice.h> 34 #include <linux/phy.h> 35 #include <linux/cache.h> 36 #include <linux/io.h> 37 #include <linux/pm_runtime.h> 38 #include <linux/slab.h> 39 #include <linux/ethtool.h> 40 #include <linux/if_vlan.h> 41 #include <linux/clk.h> 42 #include <linux/sh_eth.h> 43 44 #include "sh_eth.h" 45 46 #define SH_ETH_DEF_MSG_ENABLE \ 47 (NETIF_MSG_LINK | \ 48 NETIF_MSG_TIMER | \ 49 NETIF_MSG_RX_ERR| \ 50 NETIF_MSG_TX_ERR) 51 52 /* There is CPU dependent code */ 53 #if defined(CONFIG_CPU_SUBTYPE_SH7724) 54 #define SH_ETH_RESET_DEFAULT 1 55 static void sh_eth_set_duplex(struct net_device *ndev) 56 { 57 struct sh_eth_private *mdp = netdev_priv(ndev); 58 59 if (mdp->duplex) /* Full */ 60 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 61 else /* Half */ 62 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 63 } 64 65 static void sh_eth_set_rate(struct net_device *ndev) 66 { 67 struct sh_eth_private *mdp = netdev_priv(ndev); 68 69 switch (mdp->speed) { 70 case 10: /* 10BASE */ 71 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); 72 break; 73 case 100:/* 100BASE */ 74 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); 75 break; 76 default: 77 break; 78 } 79 } 80 81 /* SH7724 */ 82 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 83 .set_duplex = sh_eth_set_duplex, 84 .set_rate = sh_eth_set_rate, 85 86 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 87 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 88 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, 89 90 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 91 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 92 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 93 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 94 95 .apr = 1, 96 .mpr = 1, 97 .tpauser = 1, 98 .hw_swap = 1, 99 .rpadir = 1, 100 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ 101 }; 102 #elif defined(CONFIG_CPU_SUBTYPE_SH7757) 103 #define SH_ETH_HAS_BOTH_MODULES 1 104 #define SH_ETH_HAS_TSU 1 105 static void sh_eth_set_duplex(struct net_device *ndev) 106 { 107 struct sh_eth_private *mdp = netdev_priv(ndev); 108 109 if (mdp->duplex) /* Full */ 110 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 111 else /* Half */ 112 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 113 } 114 115 static void sh_eth_set_rate(struct net_device *ndev) 116 { 117 struct sh_eth_private *mdp = netdev_priv(ndev); 118 119 switch (mdp->speed) { 120 case 10: /* 10BASE */ 121 sh_eth_write(ndev, 0, RTRATE); 122 break; 123 case 100:/* 100BASE */ 124 sh_eth_write(ndev, 1, RTRATE); 125 break; 126 default: 127 break; 128 } 129 } 130 131 /* SH7757 */ 132 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 133 .set_duplex = sh_eth_set_duplex, 134 .set_rate = sh_eth_set_rate, 135 136 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 137 .rmcr_value = 0x00000001, 138 139 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 140 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 141 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 142 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 143 144 .apr = 1, 145 .mpr = 1, 146 .tpauser = 1, 147 .hw_swap = 1, 148 .no_ade = 1, 149 .rpadir = 1, 150 .rpadir_value = 2 << 16, 151 }; 152 153 #define SH_GIGA_ETH_BASE 0xfee00000 154 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) 155 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) 156 static void sh_eth_chip_reset_giga(struct net_device *ndev) 157 { 158 int i; 159 unsigned long mahr[2], malr[2]; 160 161 /* save MAHR and MALR */ 162 for (i = 0; i < 2; i++) { 163 malr[i] = ioread32((void *)GIGA_MALR(i)); 164 mahr[i] = ioread32((void *)GIGA_MAHR(i)); 165 } 166 167 /* reset device */ 168 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800)); 169 mdelay(1); 170 171 /* restore MAHR and MALR */ 172 for (i = 0; i < 2; i++) { 173 iowrite32(malr[i], (void *)GIGA_MALR(i)); 174 iowrite32(mahr[i], (void *)GIGA_MAHR(i)); 175 } 176 } 177 178 static int sh_eth_is_gether(struct sh_eth_private *mdp); 179 static void sh_eth_reset(struct net_device *ndev) 180 { 181 struct sh_eth_private *mdp = netdev_priv(ndev); 182 int cnt = 100; 183 184 if (sh_eth_is_gether(mdp)) { 185 sh_eth_write(ndev, 0x03, EDSR); 186 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, 187 EDMR); 188 while (cnt > 0) { 189 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 190 break; 191 mdelay(1); 192 cnt--; 193 } 194 if (cnt < 0) 195 printk(KERN_ERR "Device reset fail\n"); 196 197 /* Table Init */ 198 sh_eth_write(ndev, 0x0, TDLAR); 199 sh_eth_write(ndev, 0x0, TDFAR); 200 sh_eth_write(ndev, 0x0, TDFXR); 201 sh_eth_write(ndev, 0x0, TDFFR); 202 sh_eth_write(ndev, 0x0, RDLAR); 203 sh_eth_write(ndev, 0x0, RDFAR); 204 sh_eth_write(ndev, 0x0, RDFXR); 205 sh_eth_write(ndev, 0x0, RDFFR); 206 } else { 207 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, 208 EDMR); 209 mdelay(3); 210 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, 211 EDMR); 212 } 213 } 214 215 static void sh_eth_set_duplex_giga(struct net_device *ndev) 216 { 217 struct sh_eth_private *mdp = netdev_priv(ndev); 218 219 if (mdp->duplex) /* Full */ 220 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 221 else /* Half */ 222 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 223 } 224 225 static void sh_eth_set_rate_giga(struct net_device *ndev) 226 { 227 struct sh_eth_private *mdp = netdev_priv(ndev); 228 229 switch (mdp->speed) { 230 case 10: /* 10BASE */ 231 sh_eth_write(ndev, 0x00000000, GECMR); 232 break; 233 case 100:/* 100BASE */ 234 sh_eth_write(ndev, 0x00000010, GECMR); 235 break; 236 case 1000: /* 1000BASE */ 237 sh_eth_write(ndev, 0x00000020, GECMR); 238 break; 239 default: 240 break; 241 } 242 } 243 244 /* SH7757(GETHERC) */ 245 static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { 246 .chip_reset = sh_eth_chip_reset_giga, 247 .set_duplex = sh_eth_set_duplex_giga, 248 .set_rate = sh_eth_set_rate_giga, 249 250 .ecsr_value = ECSR_ICD | ECSR_MPD, 251 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 252 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 253 254 .tx_check = EESR_TC1 | EESR_FTC, 255 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 256 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 257 EESR_ECI, 258 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 259 EESR_TFE, 260 .fdr_value = 0x0000072f, 261 .rmcr_value = 0x00000001, 262 263 .apr = 1, 264 .mpr = 1, 265 .tpauser = 1, 266 .bculr = 1, 267 .hw_swap = 1, 268 .rpadir = 1, 269 .rpadir_value = 2 << 16, 270 .no_trimd = 1, 271 .no_ade = 1, 272 .tsu = 1, 273 }; 274 275 static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) 276 { 277 if (sh_eth_is_gether(mdp)) 278 return &sh_eth_my_cpu_data_giga; 279 else 280 return &sh_eth_my_cpu_data; 281 } 282 283 #elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) 284 #define SH_ETH_HAS_TSU 1 285 static void sh_eth_reset_hw_crc(struct net_device *ndev); 286 static void sh_eth_chip_reset(struct net_device *ndev) 287 { 288 struct sh_eth_private *mdp = netdev_priv(ndev); 289 290 /* reset device */ 291 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 292 mdelay(1); 293 } 294 295 static void sh_eth_reset(struct net_device *ndev) 296 { 297 int cnt = 100; 298 299 sh_eth_write(ndev, EDSR_ENALL, EDSR); 300 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); 301 while (cnt > 0) { 302 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 303 break; 304 mdelay(1); 305 cnt--; 306 } 307 if (cnt == 0) 308 printk(KERN_ERR "Device reset fail\n"); 309 310 /* Table Init */ 311 sh_eth_write(ndev, 0x0, TDLAR); 312 sh_eth_write(ndev, 0x0, TDFAR); 313 sh_eth_write(ndev, 0x0, TDFXR); 314 sh_eth_write(ndev, 0x0, TDFFR); 315 sh_eth_write(ndev, 0x0, RDLAR); 316 sh_eth_write(ndev, 0x0, RDFAR); 317 sh_eth_write(ndev, 0x0, RDFXR); 318 sh_eth_write(ndev, 0x0, RDFFR); 319 320 /* Reset HW CRC register */ 321 sh_eth_reset_hw_crc(ndev); 322 } 323 324 static void sh_eth_set_duplex(struct net_device *ndev) 325 { 326 struct sh_eth_private *mdp = netdev_priv(ndev); 327 328 if (mdp->duplex) /* Full */ 329 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 330 else /* Half */ 331 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 332 } 333 334 static void sh_eth_set_rate(struct net_device *ndev) 335 { 336 struct sh_eth_private *mdp = netdev_priv(ndev); 337 338 switch (mdp->speed) { 339 case 10: /* 10BASE */ 340 sh_eth_write(ndev, GECMR_10, GECMR); 341 break; 342 case 100:/* 100BASE */ 343 sh_eth_write(ndev, GECMR_100, GECMR); 344 break; 345 case 1000: /* 1000BASE */ 346 sh_eth_write(ndev, GECMR_1000, GECMR); 347 break; 348 default: 349 break; 350 } 351 } 352 353 /* sh7763 */ 354 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 355 .chip_reset = sh_eth_chip_reset, 356 .set_duplex = sh_eth_set_duplex, 357 .set_rate = sh_eth_set_rate, 358 359 .ecsr_value = ECSR_ICD | ECSR_MPD, 360 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 361 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 362 363 .tx_check = EESR_TC1 | EESR_FTC, 364 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 365 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 366 EESR_ECI, 367 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 368 EESR_TFE, 369 370 .apr = 1, 371 .mpr = 1, 372 .tpauser = 1, 373 .bculr = 1, 374 .hw_swap = 1, 375 .no_trimd = 1, 376 .no_ade = 1, 377 .tsu = 1, 378 #if defined(CONFIG_CPU_SUBTYPE_SH7734) 379 .hw_crc = 1, 380 #endif 381 }; 382 383 static void sh_eth_reset_hw_crc(struct net_device *ndev) 384 { 385 if (sh_eth_my_cpu_data.hw_crc) 386 sh_eth_write(ndev, 0x0, CSMR); 387 } 388 389 #elif defined(CONFIG_CPU_SUBTYPE_SH7619) 390 #define SH_ETH_RESET_DEFAULT 1 391 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 392 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 393 394 .apr = 1, 395 .mpr = 1, 396 .tpauser = 1, 397 .hw_swap = 1, 398 }; 399 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) 400 #define SH_ETH_RESET_DEFAULT 1 401 #define SH_ETH_HAS_TSU 1 402 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 403 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 404 .tsu = 1, 405 }; 406 #endif 407 408 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) 409 { 410 if (!cd->ecsr_value) 411 cd->ecsr_value = DEFAULT_ECSR_INIT; 412 413 if (!cd->ecsipr_value) 414 cd->ecsipr_value = DEFAULT_ECSIPR_INIT; 415 416 if (!cd->fcftr_value) 417 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \ 418 DEFAULT_FIFO_F_D_RFD; 419 420 if (!cd->fdr_value) 421 cd->fdr_value = DEFAULT_FDR_INIT; 422 423 if (!cd->rmcr_value) 424 cd->rmcr_value = DEFAULT_RMCR_VALUE; 425 426 if (!cd->tx_check) 427 cd->tx_check = DEFAULT_TX_CHECK; 428 429 if (!cd->eesr_err_check) 430 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; 431 432 if (!cd->tx_error_check) 433 cd->tx_error_check = DEFAULT_TX_ERROR_CHECK; 434 } 435 436 #if defined(SH_ETH_RESET_DEFAULT) 437 /* Chip Reset */ 438 static void sh_eth_reset(struct net_device *ndev) 439 { 440 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); 441 mdelay(3); 442 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); 443 } 444 #endif 445 446 #if defined(CONFIG_CPU_SH4) 447 static void sh_eth_set_receive_align(struct sk_buff *skb) 448 { 449 int reserve; 450 451 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1)); 452 if (reserve) 453 skb_reserve(skb, reserve); 454 } 455 #else 456 static void sh_eth_set_receive_align(struct sk_buff *skb) 457 { 458 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN); 459 } 460 #endif 461 462 463 /* CPU <-> EDMAC endian convert */ 464 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) 465 { 466 switch (mdp->edmac_endian) { 467 case EDMAC_LITTLE_ENDIAN: 468 return cpu_to_le32(x); 469 case EDMAC_BIG_ENDIAN: 470 return cpu_to_be32(x); 471 } 472 return x; 473 } 474 475 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) 476 { 477 switch (mdp->edmac_endian) { 478 case EDMAC_LITTLE_ENDIAN: 479 return le32_to_cpu(x); 480 case EDMAC_BIG_ENDIAN: 481 return be32_to_cpu(x); 482 } 483 return x; 484 } 485 486 /* 487 * Program the hardware MAC address from dev->dev_addr. 488 */ 489 static void update_mac_address(struct net_device *ndev) 490 { 491 sh_eth_write(ndev, 492 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 493 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); 494 sh_eth_write(ndev, 495 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); 496 } 497 498 /* 499 * Get MAC address from SuperH MAC address register 500 * 501 * SuperH's Ethernet device doesn't have 'ROM' to MAC address. 502 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). 503 * When you want use this device, you must set MAC address in bootloader. 504 * 505 */ 506 static void read_mac_address(struct net_device *ndev, unsigned char *mac) 507 { 508 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { 509 memcpy(ndev->dev_addr, mac, 6); 510 } else { 511 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); 512 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; 513 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF; 514 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF); 515 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF; 516 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF); 517 } 518 } 519 520 static int sh_eth_is_gether(struct sh_eth_private *mdp) 521 { 522 if (mdp->reg_offset == sh_eth_offset_gigabit) 523 return 1; 524 else 525 return 0; 526 } 527 528 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) 529 { 530 if (sh_eth_is_gether(mdp)) 531 return EDTRR_TRNS_GETHER; 532 else 533 return EDTRR_TRNS_ETHER; 534 } 535 536 struct bb_info { 537 void (*set_gate)(void *addr); 538 struct mdiobb_ctrl ctrl; 539 void *addr; 540 u32 mmd_msk;/* MMD */ 541 u32 mdo_msk; 542 u32 mdi_msk; 543 u32 mdc_msk; 544 }; 545 546 /* PHY bit set */ 547 static void bb_set(void *addr, u32 msk) 548 { 549 iowrite32(ioread32(addr) | msk, addr); 550 } 551 552 /* PHY bit clear */ 553 static void bb_clr(void *addr, u32 msk) 554 { 555 iowrite32((ioread32(addr) & ~msk), addr); 556 } 557 558 /* PHY bit read */ 559 static int bb_read(void *addr, u32 msk) 560 { 561 return (ioread32(addr) & msk) != 0; 562 } 563 564 /* Data I/O pin control */ 565 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) 566 { 567 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 568 569 if (bitbang->set_gate) 570 bitbang->set_gate(bitbang->addr); 571 572 if (bit) 573 bb_set(bitbang->addr, bitbang->mmd_msk); 574 else 575 bb_clr(bitbang->addr, bitbang->mmd_msk); 576 } 577 578 /* Set bit data*/ 579 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) 580 { 581 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 582 583 if (bitbang->set_gate) 584 bitbang->set_gate(bitbang->addr); 585 586 if (bit) 587 bb_set(bitbang->addr, bitbang->mdo_msk); 588 else 589 bb_clr(bitbang->addr, bitbang->mdo_msk); 590 } 591 592 /* Get bit data*/ 593 static int sh_get_mdio(struct mdiobb_ctrl *ctrl) 594 { 595 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 596 597 if (bitbang->set_gate) 598 bitbang->set_gate(bitbang->addr); 599 600 return bb_read(bitbang->addr, bitbang->mdi_msk); 601 } 602 603 /* MDC pin control */ 604 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) 605 { 606 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 607 608 if (bitbang->set_gate) 609 bitbang->set_gate(bitbang->addr); 610 611 if (bit) 612 bb_set(bitbang->addr, bitbang->mdc_msk); 613 else 614 bb_clr(bitbang->addr, bitbang->mdc_msk); 615 } 616 617 /* mdio bus control struct */ 618 static struct mdiobb_ops bb_ops = { 619 .owner = THIS_MODULE, 620 .set_mdc = sh_mdc_ctrl, 621 .set_mdio_dir = sh_mmd_ctrl, 622 .set_mdio_data = sh_set_mdio, 623 .get_mdio_data = sh_get_mdio, 624 }; 625 626 /* free skb and descriptor buffer */ 627 static void sh_eth_ring_free(struct net_device *ndev) 628 { 629 struct sh_eth_private *mdp = netdev_priv(ndev); 630 int i; 631 632 /* Free Rx skb ringbuffer */ 633 if (mdp->rx_skbuff) { 634 for (i = 0; i < RX_RING_SIZE; i++) { 635 if (mdp->rx_skbuff[i]) 636 dev_kfree_skb(mdp->rx_skbuff[i]); 637 } 638 } 639 kfree(mdp->rx_skbuff); 640 641 /* Free Tx skb ringbuffer */ 642 if (mdp->tx_skbuff) { 643 for (i = 0; i < TX_RING_SIZE; i++) { 644 if (mdp->tx_skbuff[i]) 645 dev_kfree_skb(mdp->tx_skbuff[i]); 646 } 647 } 648 kfree(mdp->tx_skbuff); 649 } 650 651 /* format skb and descriptor buffer */ 652 static void sh_eth_ring_format(struct net_device *ndev) 653 { 654 struct sh_eth_private *mdp = netdev_priv(ndev); 655 int i; 656 struct sk_buff *skb; 657 struct sh_eth_rxdesc *rxdesc = NULL; 658 struct sh_eth_txdesc *txdesc = NULL; 659 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE; 660 int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE; 661 662 mdp->cur_rx = mdp->cur_tx = 0; 663 mdp->dirty_rx = mdp->dirty_tx = 0; 664 665 memset(mdp->rx_ring, 0, rx_ringsize); 666 667 /* build Rx ring buffer */ 668 for (i = 0; i < RX_RING_SIZE; i++) { 669 /* skb */ 670 mdp->rx_skbuff[i] = NULL; 671 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 672 mdp->rx_skbuff[i] = skb; 673 if (skb == NULL) 674 break; 675 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, 676 DMA_FROM_DEVICE); 677 sh_eth_set_receive_align(skb); 678 679 /* RX descriptor */ 680 rxdesc = &mdp->rx_ring[i]; 681 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 682 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 683 684 /* The size of the buffer is 16 byte boundary. */ 685 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 686 /* Rx descriptor address set */ 687 if (i == 0) { 688 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); 689 if (sh_eth_is_gether(mdp)) 690 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); 691 } 692 } 693 694 mdp->dirty_rx = (u32) (i - RX_RING_SIZE); 695 696 /* Mark the last entry as wrapping the ring. */ 697 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); 698 699 memset(mdp->tx_ring, 0, tx_ringsize); 700 701 /* build Tx ring buffer */ 702 for (i = 0; i < TX_RING_SIZE; i++) { 703 mdp->tx_skbuff[i] = NULL; 704 txdesc = &mdp->tx_ring[i]; 705 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 706 txdesc->buffer_length = 0; 707 if (i == 0) { 708 /* Tx descriptor address set */ 709 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); 710 if (sh_eth_is_gether(mdp)) 711 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); 712 } 713 } 714 715 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 716 } 717 718 /* Get skb and descriptor buffer */ 719 static int sh_eth_ring_init(struct net_device *ndev) 720 { 721 struct sh_eth_private *mdp = netdev_priv(ndev); 722 int rx_ringsize, tx_ringsize, ret = 0; 723 724 /* 725 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the 726 * card needs room to do 8 byte alignment, +2 so we can reserve 727 * the first 2 bytes, and +16 gets room for the status word from the 728 * card. 729 */ 730 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : 731 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); 732 if (mdp->cd->rpadir) 733 mdp->rx_buf_sz += NET_IP_ALIGN; 734 735 /* Allocate RX and TX skb rings */ 736 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE, 737 GFP_KERNEL); 738 if (!mdp->rx_skbuff) { 739 dev_err(&ndev->dev, "Cannot allocate Rx skb\n"); 740 ret = -ENOMEM; 741 return ret; 742 } 743 744 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE, 745 GFP_KERNEL); 746 if (!mdp->tx_skbuff) { 747 dev_err(&ndev->dev, "Cannot allocate Tx skb\n"); 748 ret = -ENOMEM; 749 goto skb_ring_free; 750 } 751 752 /* Allocate all Rx descriptors. */ 753 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; 754 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 755 GFP_KERNEL); 756 757 if (!mdp->rx_ring) { 758 dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n", 759 rx_ringsize); 760 ret = -ENOMEM; 761 goto desc_ring_free; 762 } 763 764 mdp->dirty_rx = 0; 765 766 /* Allocate all Tx descriptors. */ 767 tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; 768 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 769 GFP_KERNEL); 770 if (!mdp->tx_ring) { 771 dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n", 772 tx_ringsize); 773 ret = -ENOMEM; 774 goto desc_ring_free; 775 } 776 return ret; 777 778 desc_ring_free: 779 /* free DMA buffer */ 780 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma); 781 782 skb_ring_free: 783 /* Free Rx and Tx skb ring buffer */ 784 sh_eth_ring_free(ndev); 785 786 return ret; 787 } 788 789 static int sh_eth_dev_init(struct net_device *ndev) 790 { 791 int ret = 0; 792 struct sh_eth_private *mdp = netdev_priv(ndev); 793 u_int32_t rx_int_var, tx_int_var; 794 u32 val; 795 796 /* Soft Reset */ 797 sh_eth_reset(ndev); 798 799 /* Descriptor format */ 800 sh_eth_ring_format(ndev); 801 if (mdp->cd->rpadir) 802 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); 803 804 /* all sh_eth int mask */ 805 sh_eth_write(ndev, 0, EESIPR); 806 807 #if defined(__LITTLE_ENDIAN) 808 if (mdp->cd->hw_swap) 809 sh_eth_write(ndev, EDMR_EL, EDMR); 810 else 811 #endif 812 sh_eth_write(ndev, 0, EDMR); 813 814 /* FIFO size set */ 815 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); 816 sh_eth_write(ndev, 0, TFTR); 817 818 /* Frame recv control */ 819 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); 820 821 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; 822 tx_int_var = mdp->tx_int_var = DESC_I_TINT2; 823 sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER); 824 825 if (mdp->cd->bculr) 826 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ 827 828 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); 829 830 if (!mdp->cd->no_trimd) 831 sh_eth_write(ndev, 0, TRIMD); 832 833 /* Recv frame limit set register */ 834 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, 835 RFLR); 836 837 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); 838 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 839 840 /* PAUSE Prohibition */ 841 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | 842 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 843 844 sh_eth_write(ndev, val, ECMR); 845 846 if (mdp->cd->set_rate) 847 mdp->cd->set_rate(ndev); 848 849 /* E-MAC Status Register clear */ 850 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); 851 852 /* E-MAC Interrupt Enable register */ 853 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); 854 855 /* Set MAC address */ 856 update_mac_address(ndev); 857 858 /* mask reset */ 859 if (mdp->cd->apr) 860 sh_eth_write(ndev, APR_AP, APR); 861 if (mdp->cd->mpr) 862 sh_eth_write(ndev, MPR_MP, MPR); 863 if (mdp->cd->tpauser) 864 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); 865 866 /* Setting the Rx mode will start the Rx process. */ 867 sh_eth_write(ndev, EDRRR_R, EDRRR); 868 869 netif_start_queue(ndev); 870 871 return ret; 872 } 873 874 /* free Tx skb function */ 875 static int sh_eth_txfree(struct net_device *ndev) 876 { 877 struct sh_eth_private *mdp = netdev_priv(ndev); 878 struct sh_eth_txdesc *txdesc; 879 int freeNum = 0; 880 int entry = 0; 881 882 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 883 entry = mdp->dirty_tx % TX_RING_SIZE; 884 txdesc = &mdp->tx_ring[entry]; 885 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 886 break; 887 /* Free the original skb. */ 888 if (mdp->tx_skbuff[entry]) { 889 dma_unmap_single(&ndev->dev, txdesc->addr, 890 txdesc->buffer_length, DMA_TO_DEVICE); 891 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 892 mdp->tx_skbuff[entry] = NULL; 893 freeNum++; 894 } 895 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 896 if (entry >= TX_RING_SIZE - 1) 897 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 898 899 ndev->stats.tx_packets++; 900 ndev->stats.tx_bytes += txdesc->buffer_length; 901 } 902 return freeNum; 903 } 904 905 /* Packet receive function */ 906 static int sh_eth_rx(struct net_device *ndev) 907 { 908 struct sh_eth_private *mdp = netdev_priv(ndev); 909 struct sh_eth_rxdesc *rxdesc; 910 911 int entry = mdp->cur_rx % RX_RING_SIZE; 912 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; 913 struct sk_buff *skb; 914 u16 pkt_len = 0; 915 u32 desc_status; 916 917 rxdesc = &mdp->rx_ring[entry]; 918 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 919 desc_status = edmac_to_cpu(mdp, rxdesc->status); 920 pkt_len = rxdesc->frame_length; 921 922 if (--boguscnt < 0) 923 break; 924 925 if (!(desc_status & RDFEND)) 926 ndev->stats.rx_length_errors++; 927 928 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 929 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 930 ndev->stats.rx_errors++; 931 if (desc_status & RD_RFS1) 932 ndev->stats.rx_crc_errors++; 933 if (desc_status & RD_RFS2) 934 ndev->stats.rx_frame_errors++; 935 if (desc_status & RD_RFS3) 936 ndev->stats.rx_length_errors++; 937 if (desc_status & RD_RFS4) 938 ndev->stats.rx_length_errors++; 939 if (desc_status & RD_RFS6) 940 ndev->stats.rx_missed_errors++; 941 if (desc_status & RD_RFS10) 942 ndev->stats.rx_over_errors++; 943 } else { 944 if (!mdp->cd->hw_swap) 945 sh_eth_soft_swap( 946 phys_to_virt(ALIGN(rxdesc->addr, 4)), 947 pkt_len + 2); 948 skb = mdp->rx_skbuff[entry]; 949 mdp->rx_skbuff[entry] = NULL; 950 if (mdp->cd->rpadir) 951 skb_reserve(skb, NET_IP_ALIGN); 952 skb_put(skb, pkt_len); 953 skb->protocol = eth_type_trans(skb, ndev); 954 netif_rx(skb); 955 ndev->stats.rx_packets++; 956 ndev->stats.rx_bytes += pkt_len; 957 } 958 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); 959 entry = (++mdp->cur_rx) % RX_RING_SIZE; 960 rxdesc = &mdp->rx_ring[entry]; 961 } 962 963 /* Refill the Rx ring buffers. */ 964 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 965 entry = mdp->dirty_rx % RX_RING_SIZE; 966 rxdesc = &mdp->rx_ring[entry]; 967 /* The size of the buffer is 16 byte boundary. */ 968 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 969 970 if (mdp->rx_skbuff[entry] == NULL) { 971 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 972 mdp->rx_skbuff[entry] = skb; 973 if (skb == NULL) 974 break; /* Better luck next round. */ 975 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, 976 DMA_FROM_DEVICE); 977 sh_eth_set_receive_align(skb); 978 979 skb_checksum_none_assert(skb); 980 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 981 } 982 if (entry >= RX_RING_SIZE - 1) 983 rxdesc->status |= 984 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 985 else 986 rxdesc->status |= 987 cpu_to_edmac(mdp, RD_RACT | RD_RFP); 988 } 989 990 /* Restart Rx engine if stopped. */ 991 /* If we don't need to check status, don't. -KDU */ 992 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) 993 sh_eth_write(ndev, EDRRR_R, EDRRR); 994 995 return 0; 996 } 997 998 static void sh_eth_rcv_snd_disable(struct net_device *ndev) 999 { 1000 /* disable tx and rx */ 1001 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & 1002 ~(ECMR_RE | ECMR_TE), ECMR); 1003 } 1004 1005 static void sh_eth_rcv_snd_enable(struct net_device *ndev) 1006 { 1007 /* enable tx and rx */ 1008 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | 1009 (ECMR_RE | ECMR_TE), ECMR); 1010 } 1011 1012 /* error control function */ 1013 static void sh_eth_error(struct net_device *ndev, int intr_status) 1014 { 1015 struct sh_eth_private *mdp = netdev_priv(ndev); 1016 u32 felic_stat; 1017 u32 link_stat; 1018 u32 mask; 1019 1020 if (intr_status & EESR_ECI) { 1021 felic_stat = sh_eth_read(ndev, ECSR); 1022 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ 1023 if (felic_stat & ECSR_ICD) 1024 ndev->stats.tx_carrier_errors++; 1025 if (felic_stat & ECSR_LCHNG) { 1026 /* Link Changed */ 1027 if (mdp->cd->no_psr || mdp->no_ether_link) { 1028 if (mdp->link == PHY_DOWN) 1029 link_stat = 0; 1030 else 1031 link_stat = PHY_ST_LINK; 1032 } else { 1033 link_stat = (sh_eth_read(ndev, PSR)); 1034 if (mdp->ether_link_active_low) 1035 link_stat = ~link_stat; 1036 } 1037 if (!(link_stat & PHY_ST_LINK)) 1038 sh_eth_rcv_snd_disable(ndev); 1039 else { 1040 /* Link Up */ 1041 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & 1042 ~DMAC_M_ECI, EESIPR); 1043 /*clear int */ 1044 sh_eth_write(ndev, sh_eth_read(ndev, ECSR), 1045 ECSR); 1046 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | 1047 DMAC_M_ECI, EESIPR); 1048 /* enable tx and rx */ 1049 sh_eth_rcv_snd_enable(ndev); 1050 } 1051 } 1052 } 1053 1054 if (intr_status & EESR_TWB) { 1055 /* Write buck end. unused write back interrupt */ 1056 if (intr_status & EESR_TABT) /* Transmit Abort int */ 1057 ndev->stats.tx_aborted_errors++; 1058 if (netif_msg_tx_err(mdp)) 1059 dev_err(&ndev->dev, "Transmit Abort\n"); 1060 } 1061 1062 if (intr_status & EESR_RABT) { 1063 /* Receive Abort int */ 1064 if (intr_status & EESR_RFRMER) { 1065 /* Receive Frame Overflow int */ 1066 ndev->stats.rx_frame_errors++; 1067 if (netif_msg_rx_err(mdp)) 1068 dev_err(&ndev->dev, "Receive Abort\n"); 1069 } 1070 } 1071 1072 if (intr_status & EESR_TDE) { 1073 /* Transmit Descriptor Empty int */ 1074 ndev->stats.tx_fifo_errors++; 1075 if (netif_msg_tx_err(mdp)) 1076 dev_err(&ndev->dev, "Transmit Descriptor Empty\n"); 1077 } 1078 1079 if (intr_status & EESR_TFE) { 1080 /* FIFO under flow */ 1081 ndev->stats.tx_fifo_errors++; 1082 if (netif_msg_tx_err(mdp)) 1083 dev_err(&ndev->dev, "Transmit FIFO Under flow\n"); 1084 } 1085 1086 if (intr_status & EESR_RDE) { 1087 /* Receive Descriptor Empty int */ 1088 ndev->stats.rx_over_errors++; 1089 1090 if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R) 1091 sh_eth_write(ndev, EDRRR_R, EDRRR); 1092 if (netif_msg_rx_err(mdp)) 1093 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 1094 } 1095 1096 if (intr_status & EESR_RFE) { 1097 /* Receive FIFO Overflow int */ 1098 ndev->stats.rx_fifo_errors++; 1099 if (netif_msg_rx_err(mdp)) 1100 dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 1101 } 1102 1103 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1104 /* Address Error */ 1105 ndev->stats.tx_fifo_errors++; 1106 if (netif_msg_tx_err(mdp)) 1107 dev_err(&ndev->dev, "Address Error\n"); 1108 } 1109 1110 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 1111 if (mdp->cd->no_ade) 1112 mask &= ~EESR_ADE; 1113 if (intr_status & mask) { 1114 /* Tx error */ 1115 u32 edtrr = sh_eth_read(ndev, EDTRR); 1116 /* dmesg */ 1117 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", 1118 intr_status, mdp->cur_tx); 1119 dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", 1120 mdp->dirty_tx, (u32) ndev->state, edtrr); 1121 /* dirty buffer free */ 1122 sh_eth_txfree(ndev); 1123 1124 /* SH7712 BUG */ 1125 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { 1126 /* tx dma start */ 1127 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 1128 } 1129 /* wakeup */ 1130 netif_wake_queue(ndev); 1131 } 1132 } 1133 1134 static irqreturn_t sh_eth_interrupt(int irq, void *netdev) 1135 { 1136 struct net_device *ndev = netdev; 1137 struct sh_eth_private *mdp = netdev_priv(ndev); 1138 struct sh_eth_cpu_data *cd = mdp->cd; 1139 irqreturn_t ret = IRQ_NONE; 1140 u32 intr_status = 0; 1141 1142 spin_lock(&mdp->lock); 1143 1144 /* Get interrpt stat */ 1145 intr_status = sh_eth_read(ndev, EESR); 1146 /* Clear interrupt */ 1147 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | 1148 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | 1149 cd->tx_check | cd->eesr_err_check)) { 1150 sh_eth_write(ndev, intr_status, EESR); 1151 ret = IRQ_HANDLED; 1152 } else 1153 goto other_irq; 1154 1155 if (intr_status & (EESR_FRC | /* Frame recv*/ 1156 EESR_RMAF | /* Multi cast address recv*/ 1157 EESR_RRF | /* Bit frame recv */ 1158 EESR_RTLF | /* Long frame recv*/ 1159 EESR_RTSF | /* short frame recv */ 1160 EESR_PRE | /* PHY-LSI recv error */ 1161 EESR_CERF)){ /* recv frame CRC error */ 1162 sh_eth_rx(ndev); 1163 } 1164 1165 /* Tx Check */ 1166 if (intr_status & cd->tx_check) { 1167 sh_eth_txfree(ndev); 1168 netif_wake_queue(ndev); 1169 } 1170 1171 if (intr_status & cd->eesr_err_check) 1172 sh_eth_error(ndev, intr_status); 1173 1174 other_irq: 1175 spin_unlock(&mdp->lock); 1176 1177 return ret; 1178 } 1179 1180 static void sh_eth_timer(unsigned long data) 1181 { 1182 struct net_device *ndev = (struct net_device *)data; 1183 struct sh_eth_private *mdp = netdev_priv(ndev); 1184 1185 mod_timer(&mdp->timer, jiffies + (10 * HZ)); 1186 } 1187 1188 /* PHY state control function */ 1189 static void sh_eth_adjust_link(struct net_device *ndev) 1190 { 1191 struct sh_eth_private *mdp = netdev_priv(ndev); 1192 struct phy_device *phydev = mdp->phydev; 1193 int new_state = 0; 1194 1195 if (phydev->link != PHY_DOWN) { 1196 if (phydev->duplex != mdp->duplex) { 1197 new_state = 1; 1198 mdp->duplex = phydev->duplex; 1199 if (mdp->cd->set_duplex) 1200 mdp->cd->set_duplex(ndev); 1201 } 1202 1203 if (phydev->speed != mdp->speed) { 1204 new_state = 1; 1205 mdp->speed = phydev->speed; 1206 if (mdp->cd->set_rate) 1207 mdp->cd->set_rate(ndev); 1208 } 1209 if (mdp->link == PHY_DOWN) { 1210 sh_eth_write(ndev, 1211 (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); 1212 new_state = 1; 1213 mdp->link = phydev->link; 1214 } 1215 } else if (mdp->link) { 1216 new_state = 1; 1217 mdp->link = PHY_DOWN; 1218 mdp->speed = 0; 1219 mdp->duplex = -1; 1220 } 1221 1222 if (new_state && netif_msg_link(mdp)) 1223 phy_print_status(phydev); 1224 } 1225 1226 /* PHY init function */ 1227 static int sh_eth_phy_init(struct net_device *ndev) 1228 { 1229 struct sh_eth_private *mdp = netdev_priv(ndev); 1230 char phy_id[MII_BUS_ID_SIZE + 3]; 1231 struct phy_device *phydev = NULL; 1232 1233 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 1234 mdp->mii_bus->id , mdp->phy_id); 1235 1236 mdp->link = PHY_DOWN; 1237 mdp->speed = 0; 1238 mdp->duplex = -1; 1239 1240 /* Try connect to PHY */ 1241 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, 1242 0, mdp->phy_interface); 1243 if (IS_ERR(phydev)) { 1244 dev_err(&ndev->dev, "phy_connect failed\n"); 1245 return PTR_ERR(phydev); 1246 } 1247 1248 dev_info(&ndev->dev, "attached phy %i to driver %s\n", 1249 phydev->addr, phydev->drv->name); 1250 1251 mdp->phydev = phydev; 1252 1253 return 0; 1254 } 1255 1256 /* PHY control start function */ 1257 static int sh_eth_phy_start(struct net_device *ndev) 1258 { 1259 struct sh_eth_private *mdp = netdev_priv(ndev); 1260 int ret; 1261 1262 ret = sh_eth_phy_init(ndev); 1263 if (ret) 1264 return ret; 1265 1266 /* reset phy - this also wakes it from PDOWN */ 1267 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET); 1268 phy_start(mdp->phydev); 1269 1270 return 0; 1271 } 1272 1273 static int sh_eth_get_settings(struct net_device *ndev, 1274 struct ethtool_cmd *ecmd) 1275 { 1276 struct sh_eth_private *mdp = netdev_priv(ndev); 1277 unsigned long flags; 1278 int ret; 1279 1280 spin_lock_irqsave(&mdp->lock, flags); 1281 ret = phy_ethtool_gset(mdp->phydev, ecmd); 1282 spin_unlock_irqrestore(&mdp->lock, flags); 1283 1284 return ret; 1285 } 1286 1287 static int sh_eth_set_settings(struct net_device *ndev, 1288 struct ethtool_cmd *ecmd) 1289 { 1290 struct sh_eth_private *mdp = netdev_priv(ndev); 1291 unsigned long flags; 1292 int ret; 1293 1294 spin_lock_irqsave(&mdp->lock, flags); 1295 1296 /* disable tx and rx */ 1297 sh_eth_rcv_snd_disable(ndev); 1298 1299 ret = phy_ethtool_sset(mdp->phydev, ecmd); 1300 if (ret) 1301 goto error_exit; 1302 1303 if (ecmd->duplex == DUPLEX_FULL) 1304 mdp->duplex = 1; 1305 else 1306 mdp->duplex = 0; 1307 1308 if (mdp->cd->set_duplex) 1309 mdp->cd->set_duplex(ndev); 1310 1311 error_exit: 1312 mdelay(1); 1313 1314 /* enable tx and rx */ 1315 sh_eth_rcv_snd_enable(ndev); 1316 1317 spin_unlock_irqrestore(&mdp->lock, flags); 1318 1319 return ret; 1320 } 1321 1322 static int sh_eth_nway_reset(struct net_device *ndev) 1323 { 1324 struct sh_eth_private *mdp = netdev_priv(ndev); 1325 unsigned long flags; 1326 int ret; 1327 1328 spin_lock_irqsave(&mdp->lock, flags); 1329 ret = phy_start_aneg(mdp->phydev); 1330 spin_unlock_irqrestore(&mdp->lock, flags); 1331 1332 return ret; 1333 } 1334 1335 static u32 sh_eth_get_msglevel(struct net_device *ndev) 1336 { 1337 struct sh_eth_private *mdp = netdev_priv(ndev); 1338 return mdp->msg_enable; 1339 } 1340 1341 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value) 1342 { 1343 struct sh_eth_private *mdp = netdev_priv(ndev); 1344 mdp->msg_enable = value; 1345 } 1346 1347 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = { 1348 "rx_current", "tx_current", 1349 "rx_dirty", "tx_dirty", 1350 }; 1351 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats) 1352 1353 static int sh_eth_get_sset_count(struct net_device *netdev, int sset) 1354 { 1355 switch (sset) { 1356 case ETH_SS_STATS: 1357 return SH_ETH_STATS_LEN; 1358 default: 1359 return -EOPNOTSUPP; 1360 } 1361 } 1362 1363 static void sh_eth_get_ethtool_stats(struct net_device *ndev, 1364 struct ethtool_stats *stats, u64 *data) 1365 { 1366 struct sh_eth_private *mdp = netdev_priv(ndev); 1367 int i = 0; 1368 1369 /* device-specific stats */ 1370 data[i++] = mdp->cur_rx; 1371 data[i++] = mdp->cur_tx; 1372 data[i++] = mdp->dirty_rx; 1373 data[i++] = mdp->dirty_tx; 1374 } 1375 1376 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 1377 { 1378 switch (stringset) { 1379 case ETH_SS_STATS: 1380 memcpy(data, *sh_eth_gstrings_stats, 1381 sizeof(sh_eth_gstrings_stats)); 1382 break; 1383 } 1384 } 1385 1386 static const struct ethtool_ops sh_eth_ethtool_ops = { 1387 .get_settings = sh_eth_get_settings, 1388 .set_settings = sh_eth_set_settings, 1389 .nway_reset = sh_eth_nway_reset, 1390 .get_msglevel = sh_eth_get_msglevel, 1391 .set_msglevel = sh_eth_set_msglevel, 1392 .get_link = ethtool_op_get_link, 1393 .get_strings = sh_eth_get_strings, 1394 .get_ethtool_stats = sh_eth_get_ethtool_stats, 1395 .get_sset_count = sh_eth_get_sset_count, 1396 }; 1397 1398 /* network device open function */ 1399 static int sh_eth_open(struct net_device *ndev) 1400 { 1401 int ret = 0; 1402 struct sh_eth_private *mdp = netdev_priv(ndev); 1403 1404 pm_runtime_get_sync(&mdp->pdev->dev); 1405 1406 ret = request_irq(ndev->irq, sh_eth_interrupt, 1407 #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1408 defined(CONFIG_CPU_SUBTYPE_SH7764) || \ 1409 defined(CONFIG_CPU_SUBTYPE_SH7757) 1410 IRQF_SHARED, 1411 #else 1412 0, 1413 #endif 1414 ndev->name, ndev); 1415 if (ret) { 1416 dev_err(&ndev->dev, "Can not assign IRQ number\n"); 1417 return ret; 1418 } 1419 1420 /* Descriptor set */ 1421 ret = sh_eth_ring_init(ndev); 1422 if (ret) 1423 goto out_free_irq; 1424 1425 /* device init */ 1426 ret = sh_eth_dev_init(ndev); 1427 if (ret) 1428 goto out_free_irq; 1429 1430 /* PHY control start*/ 1431 ret = sh_eth_phy_start(ndev); 1432 if (ret) 1433 goto out_free_irq; 1434 1435 /* Set the timer to check for link beat. */ 1436 init_timer(&mdp->timer); 1437 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ 1438 setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev); 1439 1440 return ret; 1441 1442 out_free_irq: 1443 free_irq(ndev->irq, ndev); 1444 pm_runtime_put_sync(&mdp->pdev->dev); 1445 return ret; 1446 } 1447 1448 /* Timeout function */ 1449 static void sh_eth_tx_timeout(struct net_device *ndev) 1450 { 1451 struct sh_eth_private *mdp = netdev_priv(ndev); 1452 struct sh_eth_rxdesc *rxdesc; 1453 int i; 1454 1455 netif_stop_queue(ndev); 1456 1457 if (netif_msg_timer(mdp)) 1458 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x," 1459 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR)); 1460 1461 /* tx_errors count up */ 1462 ndev->stats.tx_errors++; 1463 1464 /* timer off */ 1465 del_timer_sync(&mdp->timer); 1466 1467 /* Free all the skbuffs in the Rx queue. */ 1468 for (i = 0; i < RX_RING_SIZE; i++) { 1469 rxdesc = &mdp->rx_ring[i]; 1470 rxdesc->status = 0; 1471 rxdesc->addr = 0xBADF00D0; 1472 if (mdp->rx_skbuff[i]) 1473 dev_kfree_skb(mdp->rx_skbuff[i]); 1474 mdp->rx_skbuff[i] = NULL; 1475 } 1476 for (i = 0; i < TX_RING_SIZE; i++) { 1477 if (mdp->tx_skbuff[i]) 1478 dev_kfree_skb(mdp->tx_skbuff[i]); 1479 mdp->tx_skbuff[i] = NULL; 1480 } 1481 1482 /* device init */ 1483 sh_eth_dev_init(ndev); 1484 1485 /* timer on */ 1486 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ 1487 add_timer(&mdp->timer); 1488 } 1489 1490 /* Packet transmit function */ 1491 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1492 { 1493 struct sh_eth_private *mdp = netdev_priv(ndev); 1494 struct sh_eth_txdesc *txdesc; 1495 u32 entry; 1496 unsigned long flags; 1497 1498 spin_lock_irqsave(&mdp->lock, flags); 1499 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 1500 if (!sh_eth_txfree(ndev)) { 1501 if (netif_msg_tx_queued(mdp)) 1502 dev_warn(&ndev->dev, "TxFD exhausted.\n"); 1503 netif_stop_queue(ndev); 1504 spin_unlock_irqrestore(&mdp->lock, flags); 1505 return NETDEV_TX_BUSY; 1506 } 1507 } 1508 spin_unlock_irqrestore(&mdp->lock, flags); 1509 1510 entry = mdp->cur_tx % TX_RING_SIZE; 1511 mdp->tx_skbuff[entry] = skb; 1512 txdesc = &mdp->tx_ring[entry]; 1513 /* soft swap. */ 1514 if (!mdp->cd->hw_swap) 1515 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), 1516 skb->len + 2); 1517 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 1518 DMA_TO_DEVICE); 1519 if (skb->len < ETHERSMALL) 1520 txdesc->buffer_length = ETHERSMALL; 1521 else 1522 txdesc->buffer_length = skb->len; 1523 1524 if (entry >= TX_RING_SIZE - 1) 1525 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 1526 else 1527 txdesc->status |= cpu_to_edmac(mdp, TD_TACT); 1528 1529 mdp->cur_tx++; 1530 1531 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) 1532 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 1533 1534 return NETDEV_TX_OK; 1535 } 1536 1537 /* device close function */ 1538 static int sh_eth_close(struct net_device *ndev) 1539 { 1540 struct sh_eth_private *mdp = netdev_priv(ndev); 1541 int ringsize; 1542 1543 netif_stop_queue(ndev); 1544 1545 /* Disable interrupts by clearing the interrupt mask. */ 1546 sh_eth_write(ndev, 0x0000, EESIPR); 1547 1548 /* Stop the chip's Tx and Rx processes. */ 1549 sh_eth_write(ndev, 0, EDTRR); 1550 sh_eth_write(ndev, 0, EDRRR); 1551 1552 /* PHY Disconnect */ 1553 if (mdp->phydev) { 1554 phy_stop(mdp->phydev); 1555 phy_disconnect(mdp->phydev); 1556 } 1557 1558 free_irq(ndev->irq, ndev); 1559 1560 del_timer_sync(&mdp->timer); 1561 1562 /* Free all the skbuffs in the Rx queue. */ 1563 sh_eth_ring_free(ndev); 1564 1565 /* free DMA buffer */ 1566 ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; 1567 dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma); 1568 1569 /* free DMA buffer */ 1570 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; 1571 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma); 1572 1573 pm_runtime_put_sync(&mdp->pdev->dev); 1574 1575 return 0; 1576 } 1577 1578 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) 1579 { 1580 struct sh_eth_private *mdp = netdev_priv(ndev); 1581 1582 pm_runtime_get_sync(&mdp->pdev->dev); 1583 1584 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); 1585 sh_eth_write(ndev, 0, TROCR); /* (write clear) */ 1586 ndev->stats.collisions += sh_eth_read(ndev, CDCR); 1587 sh_eth_write(ndev, 0, CDCR); /* (write clear) */ 1588 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); 1589 sh_eth_write(ndev, 0, LCCR); /* (write clear) */ 1590 if (sh_eth_is_gether(mdp)) { 1591 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); 1592 sh_eth_write(ndev, 0, CERCR); /* (write clear) */ 1593 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); 1594 sh_eth_write(ndev, 0, CEECR); /* (write clear) */ 1595 } else { 1596 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); 1597 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ 1598 } 1599 pm_runtime_put_sync(&mdp->pdev->dev); 1600 1601 return &ndev->stats; 1602 } 1603 1604 /* ioctl to device function */ 1605 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, 1606 int cmd) 1607 { 1608 struct sh_eth_private *mdp = netdev_priv(ndev); 1609 struct phy_device *phydev = mdp->phydev; 1610 1611 if (!netif_running(ndev)) 1612 return -EINVAL; 1613 1614 if (!phydev) 1615 return -ENODEV; 1616 1617 return phy_mii_ioctl(phydev, rq, cmd); 1618 } 1619 1620 #if defined(SH_ETH_HAS_TSU) 1621 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ 1622 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, 1623 int entry) 1624 { 1625 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4); 1626 } 1627 1628 static u32 sh_eth_tsu_get_post_mask(int entry) 1629 { 1630 return 0x0f << (28 - ((entry % 8) * 4)); 1631 } 1632 1633 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) 1634 { 1635 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); 1636 } 1637 1638 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev, 1639 int entry) 1640 { 1641 struct sh_eth_private *mdp = netdev_priv(ndev); 1642 u32 tmp; 1643 void *reg_offset; 1644 1645 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 1646 tmp = ioread32(reg_offset); 1647 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset); 1648 } 1649 1650 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev, 1651 int entry) 1652 { 1653 struct sh_eth_private *mdp = netdev_priv(ndev); 1654 u32 post_mask, ref_mask, tmp; 1655 void *reg_offset; 1656 1657 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 1658 post_mask = sh_eth_tsu_get_post_mask(entry); 1659 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; 1660 1661 tmp = ioread32(reg_offset); 1662 iowrite32(tmp & ~post_mask, reg_offset); 1663 1664 /* If other port enables, the function returns "true" */ 1665 return tmp & ref_mask; 1666 } 1667 1668 static int sh_eth_tsu_busy(struct net_device *ndev) 1669 { 1670 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100; 1671 struct sh_eth_private *mdp = netdev_priv(ndev); 1672 1673 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { 1674 udelay(10); 1675 timeout--; 1676 if (timeout <= 0) { 1677 dev_err(&ndev->dev, "%s: timeout\n", __func__); 1678 return -ETIMEDOUT; 1679 } 1680 } 1681 1682 return 0; 1683 } 1684 1685 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg, 1686 const u8 *addr) 1687 { 1688 u32 val; 1689 1690 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]; 1691 iowrite32(val, reg); 1692 if (sh_eth_tsu_busy(ndev) < 0) 1693 return -EBUSY; 1694 1695 val = addr[4] << 8 | addr[5]; 1696 iowrite32(val, reg + 4); 1697 if (sh_eth_tsu_busy(ndev) < 0) 1698 return -EBUSY; 1699 1700 return 0; 1701 } 1702 1703 static void sh_eth_tsu_read_entry(void *reg, u8 *addr) 1704 { 1705 u32 val; 1706 1707 val = ioread32(reg); 1708 addr[0] = (val >> 24) & 0xff; 1709 addr[1] = (val >> 16) & 0xff; 1710 addr[2] = (val >> 8) & 0xff; 1711 addr[3] = val & 0xff; 1712 val = ioread32(reg + 4); 1713 addr[4] = (val >> 8) & 0xff; 1714 addr[5] = val & 0xff; 1715 } 1716 1717 1718 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr) 1719 { 1720 struct sh_eth_private *mdp = netdev_priv(ndev); 1721 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 1722 int i; 1723 u8 c_addr[ETH_ALEN]; 1724 1725 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 1726 sh_eth_tsu_read_entry(reg_offset, c_addr); 1727 if (memcmp(addr, c_addr, ETH_ALEN) == 0) 1728 return i; 1729 } 1730 1731 return -ENOENT; 1732 } 1733 1734 static int sh_eth_tsu_find_empty(struct net_device *ndev) 1735 { 1736 u8 blank[ETH_ALEN]; 1737 int entry; 1738 1739 memset(blank, 0, sizeof(blank)); 1740 entry = sh_eth_tsu_find_entry(ndev, blank); 1741 return (entry < 0) ? -ENOMEM : entry; 1742 } 1743 1744 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, 1745 int entry) 1746 { 1747 struct sh_eth_private *mdp = netdev_priv(ndev); 1748 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 1749 int ret; 1750 u8 blank[ETH_ALEN]; 1751 1752 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & 1753 ~(1 << (31 - entry)), TSU_TEN); 1754 1755 memset(blank, 0, sizeof(blank)); 1756 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank); 1757 if (ret < 0) 1758 return ret; 1759 return 0; 1760 } 1761 1762 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr) 1763 { 1764 struct sh_eth_private *mdp = netdev_priv(ndev); 1765 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 1766 int i, ret; 1767 1768 if (!mdp->cd->tsu) 1769 return 0; 1770 1771 i = sh_eth_tsu_find_entry(ndev, addr); 1772 if (i < 0) { 1773 /* No entry found, create one */ 1774 i = sh_eth_tsu_find_empty(ndev); 1775 if (i < 0) 1776 return -ENOMEM; 1777 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr); 1778 if (ret < 0) 1779 return ret; 1780 1781 /* Enable the entry */ 1782 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | 1783 (1 << (31 - i)), TSU_TEN); 1784 } 1785 1786 /* Entry found or created, enable POST */ 1787 sh_eth_tsu_enable_cam_entry_post(ndev, i); 1788 1789 return 0; 1790 } 1791 1792 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr) 1793 { 1794 struct sh_eth_private *mdp = netdev_priv(ndev); 1795 int i, ret; 1796 1797 if (!mdp->cd->tsu) 1798 return 0; 1799 1800 i = sh_eth_tsu_find_entry(ndev, addr); 1801 if (i) { 1802 /* Entry found */ 1803 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 1804 goto done; 1805 1806 /* Disable the entry if both ports was disabled */ 1807 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 1808 if (ret < 0) 1809 return ret; 1810 } 1811 done: 1812 return 0; 1813 } 1814 1815 static int sh_eth_tsu_purge_all(struct net_device *ndev) 1816 { 1817 struct sh_eth_private *mdp = netdev_priv(ndev); 1818 int i, ret; 1819 1820 if (unlikely(!mdp->cd->tsu)) 1821 return 0; 1822 1823 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { 1824 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 1825 continue; 1826 1827 /* Disable the entry if both ports was disabled */ 1828 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 1829 if (ret < 0) 1830 return ret; 1831 } 1832 1833 return 0; 1834 } 1835 1836 static void sh_eth_tsu_purge_mcast(struct net_device *ndev) 1837 { 1838 struct sh_eth_private *mdp = netdev_priv(ndev); 1839 u8 addr[ETH_ALEN]; 1840 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 1841 int i; 1842 1843 if (unlikely(!mdp->cd->tsu)) 1844 return; 1845 1846 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 1847 sh_eth_tsu_read_entry(reg_offset, addr); 1848 if (is_multicast_ether_addr(addr)) 1849 sh_eth_tsu_del_entry(ndev, addr); 1850 } 1851 } 1852 1853 /* Multicast reception directions set */ 1854 static void sh_eth_set_multicast_list(struct net_device *ndev) 1855 { 1856 struct sh_eth_private *mdp = netdev_priv(ndev); 1857 u32 ecmr_bits; 1858 int mcast_all = 0; 1859 unsigned long flags; 1860 1861 spin_lock_irqsave(&mdp->lock, flags); 1862 /* 1863 * Initial condition is MCT = 1, PRM = 0. 1864 * Depending on ndev->flags, set PRM or clear MCT 1865 */ 1866 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT; 1867 1868 if (!(ndev->flags & IFF_MULTICAST)) { 1869 sh_eth_tsu_purge_mcast(ndev); 1870 mcast_all = 1; 1871 } 1872 if (ndev->flags & IFF_ALLMULTI) { 1873 sh_eth_tsu_purge_mcast(ndev); 1874 ecmr_bits &= ~ECMR_MCT; 1875 mcast_all = 1; 1876 } 1877 1878 if (ndev->flags & IFF_PROMISC) { 1879 sh_eth_tsu_purge_all(ndev); 1880 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM; 1881 } else if (mdp->cd->tsu) { 1882 struct netdev_hw_addr *ha; 1883 netdev_for_each_mc_addr(ha, ndev) { 1884 if (mcast_all && is_multicast_ether_addr(ha->addr)) 1885 continue; 1886 1887 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) { 1888 if (!mcast_all) { 1889 sh_eth_tsu_purge_mcast(ndev); 1890 ecmr_bits &= ~ECMR_MCT; 1891 mcast_all = 1; 1892 } 1893 } 1894 } 1895 } else { 1896 /* Normal, unicast/broadcast-only mode. */ 1897 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT; 1898 } 1899 1900 /* update the ethernet mode */ 1901 sh_eth_write(ndev, ecmr_bits, ECMR); 1902 1903 spin_unlock_irqrestore(&mdp->lock, flags); 1904 } 1905 1906 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) 1907 { 1908 if (!mdp->port) 1909 return TSU_VTAG0; 1910 else 1911 return TSU_VTAG1; 1912 } 1913 1914 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid) 1915 { 1916 struct sh_eth_private *mdp = netdev_priv(ndev); 1917 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 1918 1919 if (unlikely(!mdp->cd->tsu)) 1920 return -EPERM; 1921 1922 /* No filtering if vid = 0 */ 1923 if (!vid) 1924 return 0; 1925 1926 mdp->vlan_num_ids++; 1927 1928 /* 1929 * The controller has one VLAN tag HW filter. So, if the filter is 1930 * already enabled, the driver disables it and the filte 1931 */ 1932 if (mdp->vlan_num_ids > 1) { 1933 /* disable VLAN filter */ 1934 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 1935 return 0; 1936 } 1937 1938 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), 1939 vtag_reg_index); 1940 1941 return 0; 1942 } 1943 1944 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) 1945 { 1946 struct sh_eth_private *mdp = netdev_priv(ndev); 1947 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 1948 1949 if (unlikely(!mdp->cd->tsu)) 1950 return -EPERM; 1951 1952 /* No filtering if vid = 0 */ 1953 if (!vid) 1954 return 0; 1955 1956 mdp->vlan_num_ids--; 1957 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 1958 1959 return 0; 1960 } 1961 #endif /* SH_ETH_HAS_TSU */ 1962 1963 /* SuperH's TSU register init function */ 1964 static void sh_eth_tsu_init(struct sh_eth_private *mdp) 1965 { 1966 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ 1967 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ 1968 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ 1969 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); 1970 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); 1971 sh_eth_tsu_write(mdp, 0, TSU_PRISL0); 1972 sh_eth_tsu_write(mdp, 0, TSU_PRISL1); 1973 sh_eth_tsu_write(mdp, 0, TSU_FWSL0); 1974 sh_eth_tsu_write(mdp, 0, TSU_FWSL1); 1975 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); 1976 if (sh_eth_is_gether(mdp)) { 1977 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ 1978 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ 1979 } else { 1980 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ 1981 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ 1982 } 1983 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ 1984 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ 1985 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ 1986 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ 1987 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ 1988 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ 1989 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ 1990 } 1991 1992 /* MDIO bus release function */ 1993 static int sh_mdio_release(struct net_device *ndev) 1994 { 1995 struct mii_bus *bus = dev_get_drvdata(&ndev->dev); 1996 1997 /* unregister mdio bus */ 1998 mdiobus_unregister(bus); 1999 2000 /* remove mdio bus info from net_device */ 2001 dev_set_drvdata(&ndev->dev, NULL); 2002 2003 /* free interrupts memory */ 2004 kfree(bus->irq); 2005 2006 /* free bitbang info */ 2007 free_mdio_bitbang(bus); 2008 2009 return 0; 2010 } 2011 2012 /* MDIO bus init function */ 2013 static int sh_mdio_init(struct net_device *ndev, int id, 2014 struct sh_eth_plat_data *pd) 2015 { 2016 int ret, i; 2017 struct bb_info *bitbang; 2018 struct sh_eth_private *mdp = netdev_priv(ndev); 2019 2020 /* create bit control struct for PHY */ 2021 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); 2022 if (!bitbang) { 2023 ret = -ENOMEM; 2024 goto out; 2025 } 2026 2027 /* bitbang init */ 2028 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; 2029 bitbang->set_gate = pd->set_mdio_gate; 2030 bitbang->mdi_msk = 0x08; 2031 bitbang->mdo_msk = 0x04; 2032 bitbang->mmd_msk = 0x02;/* MMD */ 2033 bitbang->mdc_msk = 0x01; 2034 bitbang->ctrl.ops = &bb_ops; 2035 2036 /* MII controller setting */ 2037 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 2038 if (!mdp->mii_bus) { 2039 ret = -ENOMEM; 2040 goto out_free_bitbang; 2041 } 2042 2043 /* Hook up MII support for ethtool */ 2044 mdp->mii_bus->name = "sh_mii"; 2045 mdp->mii_bus->parent = &ndev->dev; 2046 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2047 mdp->pdev->name, id); 2048 2049 /* PHY IRQ */ 2050 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 2051 if (!mdp->mii_bus->irq) { 2052 ret = -ENOMEM; 2053 goto out_free_bus; 2054 } 2055 2056 for (i = 0; i < PHY_MAX_ADDR; i++) 2057 mdp->mii_bus->irq[i] = PHY_POLL; 2058 2059 /* regist mdio bus */ 2060 ret = mdiobus_register(mdp->mii_bus); 2061 if (ret) 2062 goto out_free_irq; 2063 2064 dev_set_drvdata(&ndev->dev, mdp->mii_bus); 2065 2066 return 0; 2067 2068 out_free_irq: 2069 kfree(mdp->mii_bus->irq); 2070 2071 out_free_bus: 2072 free_mdio_bitbang(mdp->mii_bus); 2073 2074 out_free_bitbang: 2075 kfree(bitbang); 2076 2077 out: 2078 return ret; 2079 } 2080 2081 static const u16 *sh_eth_get_register_offset(int register_type) 2082 { 2083 const u16 *reg_offset = NULL; 2084 2085 switch (register_type) { 2086 case SH_ETH_REG_GIGABIT: 2087 reg_offset = sh_eth_offset_gigabit; 2088 break; 2089 case SH_ETH_REG_FAST_SH4: 2090 reg_offset = sh_eth_offset_fast_sh4; 2091 break; 2092 case SH_ETH_REG_FAST_SH3_SH2: 2093 reg_offset = sh_eth_offset_fast_sh3_sh2; 2094 break; 2095 default: 2096 printk(KERN_ERR "Unknown register type (%d)\n", register_type); 2097 break; 2098 } 2099 2100 return reg_offset; 2101 } 2102 2103 static const struct net_device_ops sh_eth_netdev_ops = { 2104 .ndo_open = sh_eth_open, 2105 .ndo_stop = sh_eth_close, 2106 .ndo_start_xmit = sh_eth_start_xmit, 2107 .ndo_get_stats = sh_eth_get_stats, 2108 #if defined(SH_ETH_HAS_TSU) 2109 .ndo_set_rx_mode = sh_eth_set_multicast_list, 2110 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, 2111 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, 2112 #endif 2113 .ndo_tx_timeout = sh_eth_tx_timeout, 2114 .ndo_do_ioctl = sh_eth_do_ioctl, 2115 .ndo_validate_addr = eth_validate_addr, 2116 .ndo_set_mac_address = eth_mac_addr, 2117 .ndo_change_mtu = eth_change_mtu, 2118 }; 2119 2120 static int sh_eth_drv_probe(struct platform_device *pdev) 2121 { 2122 int ret, devno = 0; 2123 struct resource *res; 2124 struct net_device *ndev = NULL; 2125 struct sh_eth_private *mdp = NULL; 2126 struct sh_eth_plat_data *pd; 2127 2128 /* get base addr */ 2129 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2130 if (unlikely(res == NULL)) { 2131 dev_err(&pdev->dev, "invalid resource\n"); 2132 ret = -EINVAL; 2133 goto out; 2134 } 2135 2136 ndev = alloc_etherdev(sizeof(struct sh_eth_private)); 2137 if (!ndev) { 2138 ret = -ENOMEM; 2139 goto out; 2140 } 2141 2142 /* The sh Ether-specific entries in the device structure. */ 2143 ndev->base_addr = res->start; 2144 devno = pdev->id; 2145 if (devno < 0) 2146 devno = 0; 2147 2148 ndev->dma = -1; 2149 ret = platform_get_irq(pdev, 0); 2150 if (ret < 0) { 2151 ret = -ENODEV; 2152 goto out_release; 2153 } 2154 ndev->irq = ret; 2155 2156 SET_NETDEV_DEV(ndev, &pdev->dev); 2157 2158 /* Fill in the fields of the device structure with ethernet values. */ 2159 ether_setup(ndev); 2160 2161 mdp = netdev_priv(ndev); 2162 mdp->addr = ioremap(res->start, resource_size(res)); 2163 if (mdp->addr == NULL) { 2164 ret = -ENOMEM; 2165 dev_err(&pdev->dev, "ioremap failed.\n"); 2166 goto out_release; 2167 } 2168 2169 spin_lock_init(&mdp->lock); 2170 mdp->pdev = pdev; 2171 pm_runtime_enable(&pdev->dev); 2172 pm_runtime_resume(&pdev->dev); 2173 2174 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); 2175 /* get PHY ID */ 2176 mdp->phy_id = pd->phy; 2177 mdp->phy_interface = pd->phy_interface; 2178 /* EDMAC endian */ 2179 mdp->edmac_endian = pd->edmac_endian; 2180 mdp->no_ether_link = pd->no_ether_link; 2181 mdp->ether_link_active_low = pd->ether_link_active_low; 2182 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); 2183 2184 /* set cpu data */ 2185 #if defined(SH_ETH_HAS_BOTH_MODULES) 2186 mdp->cd = sh_eth_get_cpu_data(mdp); 2187 #else 2188 mdp->cd = &sh_eth_my_cpu_data; 2189 #endif 2190 sh_eth_set_default_cpu_data(mdp->cd); 2191 2192 /* set function */ 2193 ndev->netdev_ops = &sh_eth_netdev_ops; 2194 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); 2195 ndev->watchdog_timeo = TX_TIMEOUT; 2196 2197 /* debug message level */ 2198 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; 2199 mdp->post_rx = POST_RX >> (devno << 1); 2200 mdp->post_fw = POST_FW >> (devno << 1); 2201 2202 /* read and set MAC address */ 2203 read_mac_address(ndev, pd->mac_addr); 2204 2205 /* ioremap the TSU registers */ 2206 if (mdp->cd->tsu) { 2207 struct resource *rtsu; 2208 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2209 if (!rtsu) { 2210 dev_err(&pdev->dev, "Not found TSU resource\n"); 2211 goto out_release; 2212 } 2213 mdp->tsu_addr = ioremap(rtsu->start, 2214 resource_size(rtsu)); 2215 mdp->port = devno % 2; 2216 ndev->features = NETIF_F_HW_VLAN_FILTER; 2217 } 2218 2219 /* initialize first or needed device */ 2220 if (!devno || pd->needs_init) { 2221 if (mdp->cd->chip_reset) 2222 mdp->cd->chip_reset(ndev); 2223 2224 if (mdp->cd->tsu) { 2225 /* TSU init (Init only)*/ 2226 sh_eth_tsu_init(mdp); 2227 } 2228 } 2229 2230 /* network device register */ 2231 ret = register_netdev(ndev); 2232 if (ret) 2233 goto out_release; 2234 2235 /* mdio bus init */ 2236 ret = sh_mdio_init(ndev, pdev->id, pd); 2237 if (ret) 2238 goto out_unregister; 2239 2240 /* print device information */ 2241 pr_info("Base address at 0x%x, %pM, IRQ %d.\n", 2242 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); 2243 2244 platform_set_drvdata(pdev, ndev); 2245 2246 return ret; 2247 2248 out_unregister: 2249 unregister_netdev(ndev); 2250 2251 out_release: 2252 /* net_dev free */ 2253 if (mdp && mdp->addr) 2254 iounmap(mdp->addr); 2255 if (mdp && mdp->tsu_addr) 2256 iounmap(mdp->tsu_addr); 2257 if (ndev) 2258 free_netdev(ndev); 2259 2260 out: 2261 return ret; 2262 } 2263 2264 static int sh_eth_drv_remove(struct platform_device *pdev) 2265 { 2266 struct net_device *ndev = platform_get_drvdata(pdev); 2267 struct sh_eth_private *mdp = netdev_priv(ndev); 2268 2269 if (mdp->cd->tsu) 2270 iounmap(mdp->tsu_addr); 2271 sh_mdio_release(ndev); 2272 unregister_netdev(ndev); 2273 pm_runtime_disable(&pdev->dev); 2274 iounmap(mdp->addr); 2275 free_netdev(ndev); 2276 platform_set_drvdata(pdev, NULL); 2277 2278 return 0; 2279 } 2280 2281 static int sh_eth_runtime_nop(struct device *dev) 2282 { 2283 /* 2284 * Runtime PM callback shared between ->runtime_suspend() 2285 * and ->runtime_resume(). Simply returns success. 2286 * 2287 * This driver re-initializes all registers after 2288 * pm_runtime_get_sync() anyway so there is no need 2289 * to save and restore registers here. 2290 */ 2291 return 0; 2292 } 2293 2294 static struct dev_pm_ops sh_eth_dev_pm_ops = { 2295 .runtime_suspend = sh_eth_runtime_nop, 2296 .runtime_resume = sh_eth_runtime_nop, 2297 }; 2298 2299 static struct platform_driver sh_eth_driver = { 2300 .probe = sh_eth_drv_probe, 2301 .remove = sh_eth_drv_remove, 2302 .driver = { 2303 .name = CARDNAME, 2304 .pm = &sh_eth_dev_pm_ops, 2305 }, 2306 }; 2307 2308 module_platform_driver(sh_eth_driver); 2309 2310 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); 2311 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); 2312 MODULE_LICENSE("GPL v2"); 2313