1 /* 2 * SuperH Ethernet device driver 3 * 4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu 5 * Copyright (C) 2008-2013 Renesas Solutions Corp. 6 * Copyright (C) 2013 Cogent Embedded, Inc. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2, as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * You should have received a copy of the GNU General Public License along with 17 * this program; if not, write to the Free Software Foundation, Inc., 18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * The full GNU General Public License is included in this distribution in 21 * the file called "COPYING". 22 */ 23 24 #include <linux/init.h> 25 #include <linux/module.h> 26 #include <linux/kernel.h> 27 #include <linux/spinlock.h> 28 #include <linux/interrupt.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/etherdevice.h> 31 #include <linux/delay.h> 32 #include <linux/platform_device.h> 33 #include <linux/mdio-bitbang.h> 34 #include <linux/netdevice.h> 35 #include <linux/phy.h> 36 #include <linux/cache.h> 37 #include <linux/io.h> 38 #include <linux/pm_runtime.h> 39 #include <linux/slab.h> 40 #include <linux/ethtool.h> 41 #include <linux/if_vlan.h> 42 #include <linux/clk.h> 43 #include <linux/sh_eth.h> 44 45 #include "sh_eth.h" 46 47 #define SH_ETH_DEF_MSG_ENABLE \ 48 (NETIF_MSG_LINK | \ 49 NETIF_MSG_TIMER | \ 50 NETIF_MSG_RX_ERR| \ 51 NETIF_MSG_TX_ERR) 52 53 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { 54 [EDSR] = 0x0000, 55 [EDMR] = 0x0400, 56 [EDTRR] = 0x0408, 57 [EDRRR] = 0x0410, 58 [EESR] = 0x0428, 59 [EESIPR] = 0x0430, 60 [TDLAR] = 0x0010, 61 [TDFAR] = 0x0014, 62 [TDFXR] = 0x0018, 63 [TDFFR] = 0x001c, 64 [RDLAR] = 0x0030, 65 [RDFAR] = 0x0034, 66 [RDFXR] = 0x0038, 67 [RDFFR] = 0x003c, 68 [TRSCER] = 0x0438, 69 [RMFCR] = 0x0440, 70 [TFTR] = 0x0448, 71 [FDR] = 0x0450, 72 [RMCR] = 0x0458, 73 [RPADIR] = 0x0460, 74 [FCFTR] = 0x0468, 75 [CSMR] = 0x04E4, 76 77 [ECMR] = 0x0500, 78 [ECSR] = 0x0510, 79 [ECSIPR] = 0x0518, 80 [PIR] = 0x0520, 81 [PSR] = 0x0528, 82 [PIPR] = 0x052c, 83 [RFLR] = 0x0508, 84 [APR] = 0x0554, 85 [MPR] = 0x0558, 86 [PFTCR] = 0x055c, 87 [PFRCR] = 0x0560, 88 [TPAUSER] = 0x0564, 89 [GECMR] = 0x05b0, 90 [BCULR] = 0x05b4, 91 [MAHR] = 0x05c0, 92 [MALR] = 0x05c8, 93 [TROCR] = 0x0700, 94 [CDCR] = 0x0708, 95 [LCCR] = 0x0710, 96 [CEFCR] = 0x0740, 97 [FRECR] = 0x0748, 98 [TSFRCR] = 0x0750, 99 [TLFRCR] = 0x0758, 100 [RFCR] = 0x0760, 101 [CERCR] = 0x0768, 102 [CEECR] = 0x0770, 103 [MAFCR] = 0x0778, 104 [RMII_MII] = 0x0790, 105 106 [ARSTR] = 0x0000, 107 [TSU_CTRST] = 0x0004, 108 [TSU_FWEN0] = 0x0010, 109 [TSU_FWEN1] = 0x0014, 110 [TSU_FCM] = 0x0018, 111 [TSU_BSYSL0] = 0x0020, 112 [TSU_BSYSL1] = 0x0024, 113 [TSU_PRISL0] = 0x0028, 114 [TSU_PRISL1] = 0x002c, 115 [TSU_FWSL0] = 0x0030, 116 [TSU_FWSL1] = 0x0034, 117 [TSU_FWSLC] = 0x0038, 118 [TSU_QTAG0] = 0x0040, 119 [TSU_QTAG1] = 0x0044, 120 [TSU_FWSR] = 0x0050, 121 [TSU_FWINMK] = 0x0054, 122 [TSU_ADQT0] = 0x0048, 123 [TSU_ADQT1] = 0x004c, 124 [TSU_VTAG0] = 0x0058, 125 [TSU_VTAG1] = 0x005c, 126 [TSU_ADSBSY] = 0x0060, 127 [TSU_TEN] = 0x0064, 128 [TSU_POST1] = 0x0070, 129 [TSU_POST2] = 0x0074, 130 [TSU_POST3] = 0x0078, 131 [TSU_POST4] = 0x007c, 132 [TSU_ADRH0] = 0x0100, 133 [TSU_ADRL0] = 0x0104, 134 [TSU_ADRH31] = 0x01f8, 135 [TSU_ADRL31] = 0x01fc, 136 137 [TXNLCR0] = 0x0080, 138 [TXALCR0] = 0x0084, 139 [RXNLCR0] = 0x0088, 140 [RXALCR0] = 0x008c, 141 [FWNLCR0] = 0x0090, 142 [FWALCR0] = 0x0094, 143 [TXNLCR1] = 0x00a0, 144 [TXALCR1] = 0x00a0, 145 [RXNLCR1] = 0x00a8, 146 [RXALCR1] = 0x00ac, 147 [FWNLCR1] = 0x00b0, 148 [FWALCR1] = 0x00b4, 149 }; 150 151 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = { 152 [ECMR] = 0x0300, 153 [RFLR] = 0x0308, 154 [ECSR] = 0x0310, 155 [ECSIPR] = 0x0318, 156 [PIR] = 0x0320, 157 [PSR] = 0x0328, 158 [RDMLR] = 0x0340, 159 [IPGR] = 0x0350, 160 [APR] = 0x0354, 161 [MPR] = 0x0358, 162 [RFCF] = 0x0360, 163 [TPAUSER] = 0x0364, 164 [TPAUSECR] = 0x0368, 165 [MAHR] = 0x03c0, 166 [MALR] = 0x03c8, 167 [TROCR] = 0x03d0, 168 [CDCR] = 0x03d4, 169 [LCCR] = 0x03d8, 170 [CNDCR] = 0x03dc, 171 [CEFCR] = 0x03e4, 172 [FRECR] = 0x03e8, 173 [TSFRCR] = 0x03ec, 174 [TLFRCR] = 0x03f0, 175 [RFCR] = 0x03f4, 176 [MAFCR] = 0x03f8, 177 178 [EDMR] = 0x0200, 179 [EDTRR] = 0x0208, 180 [EDRRR] = 0x0210, 181 [TDLAR] = 0x0218, 182 [RDLAR] = 0x0220, 183 [EESR] = 0x0228, 184 [EESIPR] = 0x0230, 185 [TRSCER] = 0x0238, 186 [RMFCR] = 0x0240, 187 [TFTR] = 0x0248, 188 [FDR] = 0x0250, 189 [RMCR] = 0x0258, 190 [TFUCR] = 0x0264, 191 [RFOCR] = 0x0268, 192 [FCFTR] = 0x0270, 193 [TRIMD] = 0x027c, 194 }; 195 196 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = { 197 [ECMR] = 0x0100, 198 [RFLR] = 0x0108, 199 [ECSR] = 0x0110, 200 [ECSIPR] = 0x0118, 201 [PIR] = 0x0120, 202 [PSR] = 0x0128, 203 [RDMLR] = 0x0140, 204 [IPGR] = 0x0150, 205 [APR] = 0x0154, 206 [MPR] = 0x0158, 207 [TPAUSER] = 0x0164, 208 [RFCF] = 0x0160, 209 [TPAUSECR] = 0x0168, 210 [BCFRR] = 0x016c, 211 [MAHR] = 0x01c0, 212 [MALR] = 0x01c8, 213 [TROCR] = 0x01d0, 214 [CDCR] = 0x01d4, 215 [LCCR] = 0x01d8, 216 [CNDCR] = 0x01dc, 217 [CEFCR] = 0x01e4, 218 [FRECR] = 0x01e8, 219 [TSFRCR] = 0x01ec, 220 [TLFRCR] = 0x01f0, 221 [RFCR] = 0x01f4, 222 [MAFCR] = 0x01f8, 223 [RTRATE] = 0x01fc, 224 225 [EDMR] = 0x0000, 226 [EDTRR] = 0x0008, 227 [EDRRR] = 0x0010, 228 [TDLAR] = 0x0018, 229 [RDLAR] = 0x0020, 230 [EESR] = 0x0028, 231 [EESIPR] = 0x0030, 232 [TRSCER] = 0x0038, 233 [RMFCR] = 0x0040, 234 [TFTR] = 0x0048, 235 [FDR] = 0x0050, 236 [RMCR] = 0x0058, 237 [TFUCR] = 0x0064, 238 [RFOCR] = 0x0068, 239 [FCFTR] = 0x0070, 240 [RPADIR] = 0x0078, 241 [TRIMD] = 0x007c, 242 [RBWAR] = 0x00c8, 243 [RDFAR] = 0x00cc, 244 [TBRAR] = 0x00d4, 245 [TDFAR] = 0x00d8, 246 }; 247 248 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { 249 [ECMR] = 0x0160, 250 [ECSR] = 0x0164, 251 [ECSIPR] = 0x0168, 252 [PIR] = 0x016c, 253 [MAHR] = 0x0170, 254 [MALR] = 0x0174, 255 [RFLR] = 0x0178, 256 [PSR] = 0x017c, 257 [TROCR] = 0x0180, 258 [CDCR] = 0x0184, 259 [LCCR] = 0x0188, 260 [CNDCR] = 0x018c, 261 [CEFCR] = 0x0194, 262 [FRECR] = 0x0198, 263 [TSFRCR] = 0x019c, 264 [TLFRCR] = 0x01a0, 265 [RFCR] = 0x01a4, 266 [MAFCR] = 0x01a8, 267 [IPGR] = 0x01b4, 268 [APR] = 0x01b8, 269 [MPR] = 0x01bc, 270 [TPAUSER] = 0x01c4, 271 [BCFR] = 0x01cc, 272 273 [ARSTR] = 0x0000, 274 [TSU_CTRST] = 0x0004, 275 [TSU_FWEN0] = 0x0010, 276 [TSU_FWEN1] = 0x0014, 277 [TSU_FCM] = 0x0018, 278 [TSU_BSYSL0] = 0x0020, 279 [TSU_BSYSL1] = 0x0024, 280 [TSU_PRISL0] = 0x0028, 281 [TSU_PRISL1] = 0x002c, 282 [TSU_FWSL0] = 0x0030, 283 [TSU_FWSL1] = 0x0034, 284 [TSU_FWSLC] = 0x0038, 285 [TSU_QTAGM0] = 0x0040, 286 [TSU_QTAGM1] = 0x0044, 287 [TSU_ADQT0] = 0x0048, 288 [TSU_ADQT1] = 0x004c, 289 [TSU_FWSR] = 0x0050, 290 [TSU_FWINMK] = 0x0054, 291 [TSU_ADSBSY] = 0x0060, 292 [TSU_TEN] = 0x0064, 293 [TSU_POST1] = 0x0070, 294 [TSU_POST2] = 0x0074, 295 [TSU_POST3] = 0x0078, 296 [TSU_POST4] = 0x007c, 297 298 [TXNLCR0] = 0x0080, 299 [TXALCR0] = 0x0084, 300 [RXNLCR0] = 0x0088, 301 [RXALCR0] = 0x008c, 302 [FWNLCR0] = 0x0090, 303 [FWALCR0] = 0x0094, 304 [TXNLCR1] = 0x00a0, 305 [TXALCR1] = 0x00a0, 306 [RXNLCR1] = 0x00a8, 307 [RXALCR1] = 0x00ac, 308 [FWNLCR1] = 0x00b0, 309 [FWALCR1] = 0x00b4, 310 311 [TSU_ADRH0] = 0x0100, 312 [TSU_ADRL0] = 0x0104, 313 [TSU_ADRL31] = 0x01fc, 314 }; 315 316 #if defined(CONFIG_CPU_SUBTYPE_SH7734) || \ 317 defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 318 defined(CONFIG_ARCH_R8A7740) 319 static void sh_eth_select_mii(struct net_device *ndev) 320 { 321 u32 value = 0x0; 322 struct sh_eth_private *mdp = netdev_priv(ndev); 323 324 switch (mdp->phy_interface) { 325 case PHY_INTERFACE_MODE_GMII: 326 value = 0x2; 327 break; 328 case PHY_INTERFACE_MODE_MII: 329 value = 0x1; 330 break; 331 case PHY_INTERFACE_MODE_RMII: 332 value = 0x0; 333 break; 334 default: 335 pr_warn("PHY interface mode was not setup. Set to MII.\n"); 336 value = 0x1; 337 break; 338 } 339 340 sh_eth_write(ndev, value, RMII_MII); 341 } 342 #endif 343 344 /* There is CPU dependent code */ 345 #if defined(CONFIG_ARCH_R8A7778) || defined(CONFIG_ARCH_R8A7779) 346 #define SH_ETH_RESET_DEFAULT 1 347 static void sh_eth_set_duplex(struct net_device *ndev) 348 { 349 struct sh_eth_private *mdp = netdev_priv(ndev); 350 351 if (mdp->duplex) /* Full */ 352 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 353 else /* Half */ 354 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 355 } 356 357 static void sh_eth_set_rate(struct net_device *ndev) 358 { 359 struct sh_eth_private *mdp = netdev_priv(ndev); 360 361 switch (mdp->speed) { 362 case 10: /* 10BASE */ 363 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR); 364 break; 365 case 100:/* 100BASE */ 366 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR); 367 break; 368 default: 369 break; 370 } 371 } 372 373 /* R8A7778/9 */ 374 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 375 .set_duplex = sh_eth_set_duplex, 376 .set_rate = sh_eth_set_rate, 377 378 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 379 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 380 .eesipr_value = 0x01ff009f, 381 382 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 383 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 384 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 385 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 386 387 .apr = 1, 388 .mpr = 1, 389 .tpauser = 1, 390 .hw_swap = 1, 391 }; 392 #elif defined(CONFIG_CPU_SUBTYPE_SH7724) 393 #define SH_ETH_RESET_DEFAULT 1 394 static void sh_eth_set_duplex(struct net_device *ndev) 395 { 396 struct sh_eth_private *mdp = netdev_priv(ndev); 397 398 if (mdp->duplex) /* Full */ 399 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 400 else /* Half */ 401 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 402 } 403 404 static void sh_eth_set_rate(struct net_device *ndev) 405 { 406 struct sh_eth_private *mdp = netdev_priv(ndev); 407 408 switch (mdp->speed) { 409 case 10: /* 10BASE */ 410 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); 411 break; 412 case 100:/* 100BASE */ 413 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); 414 break; 415 default: 416 break; 417 } 418 } 419 420 /* SH7724 */ 421 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 422 .set_duplex = sh_eth_set_duplex, 423 .set_rate = sh_eth_set_rate, 424 425 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 426 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 427 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, 428 429 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 430 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 431 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 432 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 433 434 .apr = 1, 435 .mpr = 1, 436 .tpauser = 1, 437 .hw_swap = 1, 438 .rpadir = 1, 439 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ 440 }; 441 #elif defined(CONFIG_CPU_SUBTYPE_SH7757) 442 #define SH_ETH_HAS_BOTH_MODULES 1 443 #define SH_ETH_HAS_TSU 1 444 static int sh_eth_check_reset(struct net_device *ndev); 445 446 static void sh_eth_set_duplex(struct net_device *ndev) 447 { 448 struct sh_eth_private *mdp = netdev_priv(ndev); 449 450 if (mdp->duplex) /* Full */ 451 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 452 else /* Half */ 453 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 454 } 455 456 static void sh_eth_set_rate(struct net_device *ndev) 457 { 458 struct sh_eth_private *mdp = netdev_priv(ndev); 459 460 switch (mdp->speed) { 461 case 10: /* 10BASE */ 462 sh_eth_write(ndev, 0, RTRATE); 463 break; 464 case 100:/* 100BASE */ 465 sh_eth_write(ndev, 1, RTRATE); 466 break; 467 default: 468 break; 469 } 470 } 471 472 /* SH7757 */ 473 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 474 .set_duplex = sh_eth_set_duplex, 475 .set_rate = sh_eth_set_rate, 476 477 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 478 .rmcr_value = 0x00000001, 479 480 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 481 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 482 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 483 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 484 485 .apr = 1, 486 .mpr = 1, 487 .tpauser = 1, 488 .hw_swap = 1, 489 .no_ade = 1, 490 .rpadir = 1, 491 .rpadir_value = 2 << 16, 492 }; 493 494 #define SH_GIGA_ETH_BASE 0xfee00000 495 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) 496 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) 497 static void sh_eth_chip_reset_giga(struct net_device *ndev) 498 { 499 int i; 500 unsigned long mahr[2], malr[2]; 501 502 /* save MAHR and MALR */ 503 for (i = 0; i < 2; i++) { 504 malr[i] = ioread32((void *)GIGA_MALR(i)); 505 mahr[i] = ioread32((void *)GIGA_MAHR(i)); 506 } 507 508 /* reset device */ 509 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800)); 510 mdelay(1); 511 512 /* restore MAHR and MALR */ 513 for (i = 0; i < 2; i++) { 514 iowrite32(malr[i], (void *)GIGA_MALR(i)); 515 iowrite32(mahr[i], (void *)GIGA_MAHR(i)); 516 } 517 } 518 519 static int sh_eth_is_gether(struct sh_eth_private *mdp); 520 static int sh_eth_reset(struct net_device *ndev) 521 { 522 struct sh_eth_private *mdp = netdev_priv(ndev); 523 int ret = 0; 524 525 if (sh_eth_is_gether(mdp)) { 526 sh_eth_write(ndev, 0x03, EDSR); 527 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, 528 EDMR); 529 530 ret = sh_eth_check_reset(ndev); 531 if (ret) 532 goto out; 533 534 /* Table Init */ 535 sh_eth_write(ndev, 0x0, TDLAR); 536 sh_eth_write(ndev, 0x0, TDFAR); 537 sh_eth_write(ndev, 0x0, TDFXR); 538 sh_eth_write(ndev, 0x0, TDFFR); 539 sh_eth_write(ndev, 0x0, RDLAR); 540 sh_eth_write(ndev, 0x0, RDFAR); 541 sh_eth_write(ndev, 0x0, RDFXR); 542 sh_eth_write(ndev, 0x0, RDFFR); 543 } else { 544 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, 545 EDMR); 546 mdelay(3); 547 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, 548 EDMR); 549 } 550 551 out: 552 return ret; 553 } 554 555 static void sh_eth_set_duplex_giga(struct net_device *ndev) 556 { 557 struct sh_eth_private *mdp = netdev_priv(ndev); 558 559 if (mdp->duplex) /* Full */ 560 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 561 else /* Half */ 562 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 563 } 564 565 static void sh_eth_set_rate_giga(struct net_device *ndev) 566 { 567 struct sh_eth_private *mdp = netdev_priv(ndev); 568 569 switch (mdp->speed) { 570 case 10: /* 10BASE */ 571 sh_eth_write(ndev, 0x00000000, GECMR); 572 break; 573 case 100:/* 100BASE */ 574 sh_eth_write(ndev, 0x00000010, GECMR); 575 break; 576 case 1000: /* 1000BASE */ 577 sh_eth_write(ndev, 0x00000020, GECMR); 578 break; 579 default: 580 break; 581 } 582 } 583 584 /* SH7757(GETHERC) */ 585 static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { 586 .chip_reset = sh_eth_chip_reset_giga, 587 .set_duplex = sh_eth_set_duplex_giga, 588 .set_rate = sh_eth_set_rate_giga, 589 590 .ecsr_value = ECSR_ICD | ECSR_MPD, 591 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 592 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 593 594 .tx_check = EESR_TC1 | EESR_FTC, 595 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 596 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 597 EESR_ECI, 598 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 599 EESR_TFE, 600 .fdr_value = 0x0000072f, 601 .rmcr_value = 0x00000001, 602 603 .apr = 1, 604 .mpr = 1, 605 .tpauser = 1, 606 .bculr = 1, 607 .hw_swap = 1, 608 .rpadir = 1, 609 .rpadir_value = 2 << 16, 610 .no_trimd = 1, 611 .no_ade = 1, 612 .tsu = 1, 613 }; 614 615 static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) 616 { 617 if (sh_eth_is_gether(mdp)) 618 return &sh_eth_my_cpu_data_giga; 619 else 620 return &sh_eth_my_cpu_data; 621 } 622 623 #elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) 624 #define SH_ETH_HAS_TSU 1 625 static int sh_eth_check_reset(struct net_device *ndev); 626 static void sh_eth_reset_hw_crc(struct net_device *ndev); 627 628 static void sh_eth_chip_reset(struct net_device *ndev) 629 { 630 struct sh_eth_private *mdp = netdev_priv(ndev); 631 632 /* reset device */ 633 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 634 mdelay(1); 635 } 636 637 static void sh_eth_set_duplex(struct net_device *ndev) 638 { 639 struct sh_eth_private *mdp = netdev_priv(ndev); 640 641 if (mdp->duplex) /* Full */ 642 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 643 else /* Half */ 644 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 645 } 646 647 static void sh_eth_set_rate(struct net_device *ndev) 648 { 649 struct sh_eth_private *mdp = netdev_priv(ndev); 650 651 switch (mdp->speed) { 652 case 10: /* 10BASE */ 653 sh_eth_write(ndev, GECMR_10, GECMR); 654 break; 655 case 100:/* 100BASE */ 656 sh_eth_write(ndev, GECMR_100, GECMR); 657 break; 658 case 1000: /* 1000BASE */ 659 sh_eth_write(ndev, GECMR_1000, GECMR); 660 break; 661 default: 662 break; 663 } 664 } 665 666 /* sh7763 */ 667 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 668 .chip_reset = sh_eth_chip_reset, 669 .set_duplex = sh_eth_set_duplex, 670 .set_rate = sh_eth_set_rate, 671 672 .ecsr_value = ECSR_ICD | ECSR_MPD, 673 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 674 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 675 676 .tx_check = EESR_TC1 | EESR_FTC, 677 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 678 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 679 EESR_ECI, 680 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 681 EESR_TFE, 682 683 .apr = 1, 684 .mpr = 1, 685 .tpauser = 1, 686 .bculr = 1, 687 .hw_swap = 1, 688 .no_trimd = 1, 689 .no_ade = 1, 690 .tsu = 1, 691 #if defined(CONFIG_CPU_SUBTYPE_SH7734) 692 .hw_crc = 1, 693 .select_mii = 1, 694 #endif 695 }; 696 697 static int sh_eth_reset(struct net_device *ndev) 698 { 699 int ret = 0; 700 701 sh_eth_write(ndev, EDSR_ENALL, EDSR); 702 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); 703 704 ret = sh_eth_check_reset(ndev); 705 if (ret) 706 goto out; 707 708 /* Table Init */ 709 sh_eth_write(ndev, 0x0, TDLAR); 710 sh_eth_write(ndev, 0x0, TDFAR); 711 sh_eth_write(ndev, 0x0, TDFXR); 712 sh_eth_write(ndev, 0x0, TDFFR); 713 sh_eth_write(ndev, 0x0, RDLAR); 714 sh_eth_write(ndev, 0x0, RDFAR); 715 sh_eth_write(ndev, 0x0, RDFXR); 716 sh_eth_write(ndev, 0x0, RDFFR); 717 718 /* Reset HW CRC register */ 719 sh_eth_reset_hw_crc(ndev); 720 721 /* Select MII mode */ 722 if (sh_eth_my_cpu_data.select_mii) 723 sh_eth_select_mii(ndev); 724 out: 725 return ret; 726 } 727 728 static void sh_eth_reset_hw_crc(struct net_device *ndev) 729 { 730 if (sh_eth_my_cpu_data.hw_crc) 731 sh_eth_write(ndev, 0x0, CSMR); 732 } 733 734 #elif defined(CONFIG_ARCH_R8A7740) 735 #define SH_ETH_HAS_TSU 1 736 static int sh_eth_check_reset(struct net_device *ndev); 737 738 static void sh_eth_chip_reset(struct net_device *ndev) 739 { 740 struct sh_eth_private *mdp = netdev_priv(ndev); 741 742 /* reset device */ 743 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 744 mdelay(1); 745 746 sh_eth_select_mii(ndev); 747 } 748 749 static int sh_eth_reset(struct net_device *ndev) 750 { 751 int ret = 0; 752 753 sh_eth_write(ndev, EDSR_ENALL, EDSR); 754 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); 755 756 ret = sh_eth_check_reset(ndev); 757 if (ret) 758 goto out; 759 760 /* Table Init */ 761 sh_eth_write(ndev, 0x0, TDLAR); 762 sh_eth_write(ndev, 0x0, TDFAR); 763 sh_eth_write(ndev, 0x0, TDFXR); 764 sh_eth_write(ndev, 0x0, TDFFR); 765 sh_eth_write(ndev, 0x0, RDLAR); 766 sh_eth_write(ndev, 0x0, RDFAR); 767 sh_eth_write(ndev, 0x0, RDFXR); 768 sh_eth_write(ndev, 0x0, RDFFR); 769 770 out: 771 return ret; 772 } 773 774 static void sh_eth_set_duplex(struct net_device *ndev) 775 { 776 struct sh_eth_private *mdp = netdev_priv(ndev); 777 778 if (mdp->duplex) /* Full */ 779 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 780 else /* Half */ 781 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 782 } 783 784 static void sh_eth_set_rate(struct net_device *ndev) 785 { 786 struct sh_eth_private *mdp = netdev_priv(ndev); 787 788 switch (mdp->speed) { 789 case 10: /* 10BASE */ 790 sh_eth_write(ndev, GECMR_10, GECMR); 791 break; 792 case 100:/* 100BASE */ 793 sh_eth_write(ndev, GECMR_100, GECMR); 794 break; 795 case 1000: /* 1000BASE */ 796 sh_eth_write(ndev, GECMR_1000, GECMR); 797 break; 798 default: 799 break; 800 } 801 } 802 803 /* R8A7740 */ 804 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 805 .chip_reset = sh_eth_chip_reset, 806 .set_duplex = sh_eth_set_duplex, 807 .set_rate = sh_eth_set_rate, 808 809 .ecsr_value = ECSR_ICD | ECSR_MPD, 810 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 811 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 812 813 .tx_check = EESR_TC1 | EESR_FTC, 814 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 815 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 816 EESR_ECI, 817 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 818 EESR_TFE, 819 820 .apr = 1, 821 .mpr = 1, 822 .tpauser = 1, 823 .bculr = 1, 824 .hw_swap = 1, 825 .no_trimd = 1, 826 .no_ade = 1, 827 .tsu = 1, 828 .select_mii = 1, 829 }; 830 831 #elif defined(CONFIG_CPU_SUBTYPE_SH7619) 832 #define SH_ETH_RESET_DEFAULT 1 833 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 834 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 835 836 .apr = 1, 837 .mpr = 1, 838 .tpauser = 1, 839 .hw_swap = 1, 840 }; 841 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) 842 #define SH_ETH_RESET_DEFAULT 1 843 #define SH_ETH_HAS_TSU 1 844 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 845 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 846 .tsu = 1, 847 }; 848 #endif 849 850 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) 851 { 852 if (!cd->ecsr_value) 853 cd->ecsr_value = DEFAULT_ECSR_INIT; 854 855 if (!cd->ecsipr_value) 856 cd->ecsipr_value = DEFAULT_ECSIPR_INIT; 857 858 if (!cd->fcftr_value) 859 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \ 860 DEFAULT_FIFO_F_D_RFD; 861 862 if (!cd->fdr_value) 863 cd->fdr_value = DEFAULT_FDR_INIT; 864 865 if (!cd->rmcr_value) 866 cd->rmcr_value = DEFAULT_RMCR_VALUE; 867 868 if (!cd->tx_check) 869 cd->tx_check = DEFAULT_TX_CHECK; 870 871 if (!cd->eesr_err_check) 872 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; 873 874 if (!cd->tx_error_check) 875 cd->tx_error_check = DEFAULT_TX_ERROR_CHECK; 876 } 877 878 #if defined(SH_ETH_RESET_DEFAULT) 879 /* Chip Reset */ 880 static int sh_eth_reset(struct net_device *ndev) 881 { 882 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); 883 mdelay(3); 884 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); 885 886 return 0; 887 } 888 #else 889 static int sh_eth_check_reset(struct net_device *ndev) 890 { 891 int ret = 0; 892 int cnt = 100; 893 894 while (cnt > 0) { 895 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 896 break; 897 mdelay(1); 898 cnt--; 899 } 900 if (cnt < 0) { 901 pr_err("Device reset fail\n"); 902 ret = -ETIMEDOUT; 903 } 904 return ret; 905 } 906 #endif 907 908 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 909 static void sh_eth_set_receive_align(struct sk_buff *skb) 910 { 911 int reserve; 912 913 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1)); 914 if (reserve) 915 skb_reserve(skb, reserve); 916 } 917 #else 918 static void sh_eth_set_receive_align(struct sk_buff *skb) 919 { 920 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN); 921 } 922 #endif 923 924 925 /* CPU <-> EDMAC endian convert */ 926 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) 927 { 928 switch (mdp->edmac_endian) { 929 case EDMAC_LITTLE_ENDIAN: 930 return cpu_to_le32(x); 931 case EDMAC_BIG_ENDIAN: 932 return cpu_to_be32(x); 933 } 934 return x; 935 } 936 937 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) 938 { 939 switch (mdp->edmac_endian) { 940 case EDMAC_LITTLE_ENDIAN: 941 return le32_to_cpu(x); 942 case EDMAC_BIG_ENDIAN: 943 return be32_to_cpu(x); 944 } 945 return x; 946 } 947 948 /* 949 * Program the hardware MAC address from dev->dev_addr. 950 */ 951 static void update_mac_address(struct net_device *ndev) 952 { 953 sh_eth_write(ndev, 954 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 955 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); 956 sh_eth_write(ndev, 957 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); 958 } 959 960 /* 961 * Get MAC address from SuperH MAC address register 962 * 963 * SuperH's Ethernet device doesn't have 'ROM' to MAC address. 964 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). 965 * When you want use this device, you must set MAC address in bootloader. 966 * 967 */ 968 static void read_mac_address(struct net_device *ndev, unsigned char *mac) 969 { 970 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { 971 memcpy(ndev->dev_addr, mac, 6); 972 } else { 973 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); 974 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; 975 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF; 976 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF); 977 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF; 978 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF); 979 } 980 } 981 982 static int sh_eth_is_gether(struct sh_eth_private *mdp) 983 { 984 if (mdp->reg_offset == sh_eth_offset_gigabit) 985 return 1; 986 else 987 return 0; 988 } 989 990 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) 991 { 992 if (sh_eth_is_gether(mdp)) 993 return EDTRR_TRNS_GETHER; 994 else 995 return EDTRR_TRNS_ETHER; 996 } 997 998 struct bb_info { 999 void (*set_gate)(void *addr); 1000 struct mdiobb_ctrl ctrl; 1001 void *addr; 1002 u32 mmd_msk;/* MMD */ 1003 u32 mdo_msk; 1004 u32 mdi_msk; 1005 u32 mdc_msk; 1006 }; 1007 1008 /* PHY bit set */ 1009 static void bb_set(void *addr, u32 msk) 1010 { 1011 iowrite32(ioread32(addr) | msk, addr); 1012 } 1013 1014 /* PHY bit clear */ 1015 static void bb_clr(void *addr, u32 msk) 1016 { 1017 iowrite32((ioread32(addr) & ~msk), addr); 1018 } 1019 1020 /* PHY bit read */ 1021 static int bb_read(void *addr, u32 msk) 1022 { 1023 return (ioread32(addr) & msk) != 0; 1024 } 1025 1026 /* Data I/O pin control */ 1027 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) 1028 { 1029 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1030 1031 if (bitbang->set_gate) 1032 bitbang->set_gate(bitbang->addr); 1033 1034 if (bit) 1035 bb_set(bitbang->addr, bitbang->mmd_msk); 1036 else 1037 bb_clr(bitbang->addr, bitbang->mmd_msk); 1038 } 1039 1040 /* Set bit data*/ 1041 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) 1042 { 1043 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1044 1045 if (bitbang->set_gate) 1046 bitbang->set_gate(bitbang->addr); 1047 1048 if (bit) 1049 bb_set(bitbang->addr, bitbang->mdo_msk); 1050 else 1051 bb_clr(bitbang->addr, bitbang->mdo_msk); 1052 } 1053 1054 /* Get bit data*/ 1055 static int sh_get_mdio(struct mdiobb_ctrl *ctrl) 1056 { 1057 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1058 1059 if (bitbang->set_gate) 1060 bitbang->set_gate(bitbang->addr); 1061 1062 return bb_read(bitbang->addr, bitbang->mdi_msk); 1063 } 1064 1065 /* MDC pin control */ 1066 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) 1067 { 1068 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1069 1070 if (bitbang->set_gate) 1071 bitbang->set_gate(bitbang->addr); 1072 1073 if (bit) 1074 bb_set(bitbang->addr, bitbang->mdc_msk); 1075 else 1076 bb_clr(bitbang->addr, bitbang->mdc_msk); 1077 } 1078 1079 /* mdio bus control struct */ 1080 static struct mdiobb_ops bb_ops = { 1081 .owner = THIS_MODULE, 1082 .set_mdc = sh_mdc_ctrl, 1083 .set_mdio_dir = sh_mmd_ctrl, 1084 .set_mdio_data = sh_set_mdio, 1085 .get_mdio_data = sh_get_mdio, 1086 }; 1087 1088 /* free skb and descriptor buffer */ 1089 static void sh_eth_ring_free(struct net_device *ndev) 1090 { 1091 struct sh_eth_private *mdp = netdev_priv(ndev); 1092 int i; 1093 1094 /* Free Rx skb ringbuffer */ 1095 if (mdp->rx_skbuff) { 1096 for (i = 0; i < mdp->num_rx_ring; i++) { 1097 if (mdp->rx_skbuff[i]) 1098 dev_kfree_skb(mdp->rx_skbuff[i]); 1099 } 1100 } 1101 kfree(mdp->rx_skbuff); 1102 mdp->rx_skbuff = NULL; 1103 1104 /* Free Tx skb ringbuffer */ 1105 if (mdp->tx_skbuff) { 1106 for (i = 0; i < mdp->num_tx_ring; i++) { 1107 if (mdp->tx_skbuff[i]) 1108 dev_kfree_skb(mdp->tx_skbuff[i]); 1109 } 1110 } 1111 kfree(mdp->tx_skbuff); 1112 mdp->tx_skbuff = NULL; 1113 } 1114 1115 /* format skb and descriptor buffer */ 1116 static void sh_eth_ring_format(struct net_device *ndev) 1117 { 1118 struct sh_eth_private *mdp = netdev_priv(ndev); 1119 int i; 1120 struct sk_buff *skb; 1121 struct sh_eth_rxdesc *rxdesc = NULL; 1122 struct sh_eth_txdesc *txdesc = NULL; 1123 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 1124 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1125 1126 mdp->cur_rx = mdp->cur_tx = 0; 1127 mdp->dirty_rx = mdp->dirty_tx = 0; 1128 1129 memset(mdp->rx_ring, 0, rx_ringsize); 1130 1131 /* build Rx ring buffer */ 1132 for (i = 0; i < mdp->num_rx_ring; i++) { 1133 /* skb */ 1134 mdp->rx_skbuff[i] = NULL; 1135 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1136 mdp->rx_skbuff[i] = skb; 1137 if (skb == NULL) 1138 break; 1139 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, 1140 DMA_FROM_DEVICE); 1141 sh_eth_set_receive_align(skb); 1142 1143 /* RX descriptor */ 1144 rxdesc = &mdp->rx_ring[i]; 1145 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1146 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1147 1148 /* The size of the buffer is 16 byte boundary. */ 1149 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1150 /* Rx descriptor address set */ 1151 if (i == 0) { 1152 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); 1153 if (sh_eth_is_gether(mdp)) 1154 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); 1155 } 1156 } 1157 1158 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); 1159 1160 /* Mark the last entry as wrapping the ring. */ 1161 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); 1162 1163 memset(mdp->tx_ring, 0, tx_ringsize); 1164 1165 /* build Tx ring buffer */ 1166 for (i = 0; i < mdp->num_tx_ring; i++) { 1167 mdp->tx_skbuff[i] = NULL; 1168 txdesc = &mdp->tx_ring[i]; 1169 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1170 txdesc->buffer_length = 0; 1171 if (i == 0) { 1172 /* Tx descriptor address set */ 1173 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); 1174 if (sh_eth_is_gether(mdp)) 1175 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); 1176 } 1177 } 1178 1179 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1180 } 1181 1182 /* Get skb and descriptor buffer */ 1183 static int sh_eth_ring_init(struct net_device *ndev) 1184 { 1185 struct sh_eth_private *mdp = netdev_priv(ndev); 1186 int rx_ringsize, tx_ringsize, ret = 0; 1187 1188 /* 1189 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the 1190 * card needs room to do 8 byte alignment, +2 so we can reserve 1191 * the first 2 bytes, and +16 gets room for the status word from the 1192 * card. 1193 */ 1194 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : 1195 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); 1196 if (mdp->cd->rpadir) 1197 mdp->rx_buf_sz += NET_IP_ALIGN; 1198 1199 /* Allocate RX and TX skb rings */ 1200 mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring, 1201 sizeof(*mdp->rx_skbuff), GFP_KERNEL); 1202 if (!mdp->rx_skbuff) { 1203 ret = -ENOMEM; 1204 return ret; 1205 } 1206 1207 mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring, 1208 sizeof(*mdp->tx_skbuff), GFP_KERNEL); 1209 if (!mdp->tx_skbuff) { 1210 ret = -ENOMEM; 1211 goto skb_ring_free; 1212 } 1213 1214 /* Allocate all Rx descriptors. */ 1215 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1216 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 1217 GFP_KERNEL); 1218 if (!mdp->rx_ring) { 1219 ret = -ENOMEM; 1220 goto desc_ring_free; 1221 } 1222 1223 mdp->dirty_rx = 0; 1224 1225 /* Allocate all Tx descriptors. */ 1226 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1227 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 1228 GFP_KERNEL); 1229 if (!mdp->tx_ring) { 1230 ret = -ENOMEM; 1231 goto desc_ring_free; 1232 } 1233 return ret; 1234 1235 desc_ring_free: 1236 /* free DMA buffer */ 1237 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma); 1238 1239 skb_ring_free: 1240 /* Free Rx and Tx skb ring buffer */ 1241 sh_eth_ring_free(ndev); 1242 mdp->tx_ring = NULL; 1243 mdp->rx_ring = NULL; 1244 1245 return ret; 1246 } 1247 1248 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp) 1249 { 1250 int ringsize; 1251 1252 if (mdp->rx_ring) { 1253 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1254 dma_free_coherent(NULL, ringsize, mdp->rx_ring, 1255 mdp->rx_desc_dma); 1256 mdp->rx_ring = NULL; 1257 } 1258 1259 if (mdp->tx_ring) { 1260 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1261 dma_free_coherent(NULL, ringsize, mdp->tx_ring, 1262 mdp->tx_desc_dma); 1263 mdp->tx_ring = NULL; 1264 } 1265 } 1266 1267 static int sh_eth_dev_init(struct net_device *ndev, bool start) 1268 { 1269 int ret = 0; 1270 struct sh_eth_private *mdp = netdev_priv(ndev); 1271 u32 val; 1272 1273 /* Soft Reset */ 1274 ret = sh_eth_reset(ndev); 1275 if (ret) 1276 goto out; 1277 1278 /* Descriptor format */ 1279 sh_eth_ring_format(ndev); 1280 if (mdp->cd->rpadir) 1281 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); 1282 1283 /* all sh_eth int mask */ 1284 sh_eth_write(ndev, 0, EESIPR); 1285 1286 #if defined(__LITTLE_ENDIAN) 1287 if (mdp->cd->hw_swap) 1288 sh_eth_write(ndev, EDMR_EL, EDMR); 1289 else 1290 #endif 1291 sh_eth_write(ndev, 0, EDMR); 1292 1293 /* FIFO size set */ 1294 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); 1295 sh_eth_write(ndev, 0, TFTR); 1296 1297 /* Frame recv control */ 1298 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); 1299 1300 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); 1301 1302 if (mdp->cd->bculr) 1303 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ 1304 1305 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); 1306 1307 if (!mdp->cd->no_trimd) 1308 sh_eth_write(ndev, 0, TRIMD); 1309 1310 /* Recv frame limit set register */ 1311 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, 1312 RFLR); 1313 1314 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); 1315 if (start) 1316 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1317 1318 /* PAUSE Prohibition */ 1319 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | 1320 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 1321 1322 sh_eth_write(ndev, val, ECMR); 1323 1324 if (mdp->cd->set_rate) 1325 mdp->cd->set_rate(ndev); 1326 1327 /* E-MAC Status Register clear */ 1328 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); 1329 1330 /* E-MAC Interrupt Enable register */ 1331 if (start) 1332 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); 1333 1334 /* Set MAC address */ 1335 update_mac_address(ndev); 1336 1337 /* mask reset */ 1338 if (mdp->cd->apr) 1339 sh_eth_write(ndev, APR_AP, APR); 1340 if (mdp->cd->mpr) 1341 sh_eth_write(ndev, MPR_MP, MPR); 1342 if (mdp->cd->tpauser) 1343 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); 1344 1345 if (start) { 1346 /* Setting the Rx mode will start the Rx process. */ 1347 sh_eth_write(ndev, EDRRR_R, EDRRR); 1348 1349 netif_start_queue(ndev); 1350 } 1351 1352 out: 1353 return ret; 1354 } 1355 1356 /* free Tx skb function */ 1357 static int sh_eth_txfree(struct net_device *ndev) 1358 { 1359 struct sh_eth_private *mdp = netdev_priv(ndev); 1360 struct sh_eth_txdesc *txdesc; 1361 int freeNum = 0; 1362 int entry = 0; 1363 1364 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 1365 entry = mdp->dirty_tx % mdp->num_tx_ring; 1366 txdesc = &mdp->tx_ring[entry]; 1367 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 1368 break; 1369 /* Free the original skb. */ 1370 if (mdp->tx_skbuff[entry]) { 1371 dma_unmap_single(&ndev->dev, txdesc->addr, 1372 txdesc->buffer_length, DMA_TO_DEVICE); 1373 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1374 mdp->tx_skbuff[entry] = NULL; 1375 freeNum++; 1376 } 1377 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1378 if (entry >= mdp->num_tx_ring - 1) 1379 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1380 1381 ndev->stats.tx_packets++; 1382 ndev->stats.tx_bytes += txdesc->buffer_length; 1383 } 1384 return freeNum; 1385 } 1386 1387 /* Packet receive function */ 1388 static int sh_eth_rx(struct net_device *ndev, u32 intr_status) 1389 { 1390 struct sh_eth_private *mdp = netdev_priv(ndev); 1391 struct sh_eth_rxdesc *rxdesc; 1392 1393 int entry = mdp->cur_rx % mdp->num_rx_ring; 1394 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; 1395 struct sk_buff *skb; 1396 u16 pkt_len = 0; 1397 u32 desc_status; 1398 1399 rxdesc = &mdp->rx_ring[entry]; 1400 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1401 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1402 pkt_len = rxdesc->frame_length; 1403 1404 #if defined(CONFIG_ARCH_R8A7740) 1405 desc_status >>= 16; 1406 #endif 1407 1408 if (--boguscnt < 0) 1409 break; 1410 1411 if (!(desc_status & RDFEND)) 1412 ndev->stats.rx_length_errors++; 1413 1414 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1415 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 1416 ndev->stats.rx_errors++; 1417 if (desc_status & RD_RFS1) 1418 ndev->stats.rx_crc_errors++; 1419 if (desc_status & RD_RFS2) 1420 ndev->stats.rx_frame_errors++; 1421 if (desc_status & RD_RFS3) 1422 ndev->stats.rx_length_errors++; 1423 if (desc_status & RD_RFS4) 1424 ndev->stats.rx_length_errors++; 1425 if (desc_status & RD_RFS6) 1426 ndev->stats.rx_missed_errors++; 1427 if (desc_status & RD_RFS10) 1428 ndev->stats.rx_over_errors++; 1429 } else { 1430 if (!mdp->cd->hw_swap) 1431 sh_eth_soft_swap( 1432 phys_to_virt(ALIGN(rxdesc->addr, 4)), 1433 pkt_len + 2); 1434 skb = mdp->rx_skbuff[entry]; 1435 mdp->rx_skbuff[entry] = NULL; 1436 if (mdp->cd->rpadir) 1437 skb_reserve(skb, NET_IP_ALIGN); 1438 skb_put(skb, pkt_len); 1439 skb->protocol = eth_type_trans(skb, ndev); 1440 netif_rx(skb); 1441 ndev->stats.rx_packets++; 1442 ndev->stats.rx_bytes += pkt_len; 1443 } 1444 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); 1445 entry = (++mdp->cur_rx) % mdp->num_rx_ring; 1446 rxdesc = &mdp->rx_ring[entry]; 1447 } 1448 1449 /* Refill the Rx ring buffers. */ 1450 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 1451 entry = mdp->dirty_rx % mdp->num_rx_ring; 1452 rxdesc = &mdp->rx_ring[entry]; 1453 /* The size of the buffer is 16 byte boundary. */ 1454 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1455 1456 if (mdp->rx_skbuff[entry] == NULL) { 1457 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1458 mdp->rx_skbuff[entry] = skb; 1459 if (skb == NULL) 1460 break; /* Better luck next round. */ 1461 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, 1462 DMA_FROM_DEVICE); 1463 sh_eth_set_receive_align(skb); 1464 1465 skb_checksum_none_assert(skb); 1466 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1467 } 1468 if (entry >= mdp->num_rx_ring - 1) 1469 rxdesc->status |= 1470 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 1471 else 1472 rxdesc->status |= 1473 cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1474 } 1475 1476 /* Restart Rx engine if stopped. */ 1477 /* If we don't need to check status, don't. -KDU */ 1478 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { 1479 /* fix the values for the next receiving if RDE is set */ 1480 if (intr_status & EESR_RDE) 1481 mdp->cur_rx = mdp->dirty_rx = 1482 (sh_eth_read(ndev, RDFAR) - 1483 sh_eth_read(ndev, RDLAR)) >> 4; 1484 sh_eth_write(ndev, EDRRR_R, EDRRR); 1485 } 1486 1487 return 0; 1488 } 1489 1490 static void sh_eth_rcv_snd_disable(struct net_device *ndev) 1491 { 1492 /* disable tx and rx */ 1493 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & 1494 ~(ECMR_RE | ECMR_TE), ECMR); 1495 } 1496 1497 static void sh_eth_rcv_snd_enable(struct net_device *ndev) 1498 { 1499 /* enable tx and rx */ 1500 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | 1501 (ECMR_RE | ECMR_TE), ECMR); 1502 } 1503 1504 /* error control function */ 1505 static void sh_eth_error(struct net_device *ndev, int intr_status) 1506 { 1507 struct sh_eth_private *mdp = netdev_priv(ndev); 1508 u32 felic_stat; 1509 u32 link_stat; 1510 u32 mask; 1511 1512 if (intr_status & EESR_ECI) { 1513 felic_stat = sh_eth_read(ndev, ECSR); 1514 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ 1515 if (felic_stat & ECSR_ICD) 1516 ndev->stats.tx_carrier_errors++; 1517 if (felic_stat & ECSR_LCHNG) { 1518 /* Link Changed */ 1519 if (mdp->cd->no_psr || mdp->no_ether_link) { 1520 goto ignore_link; 1521 } else { 1522 link_stat = (sh_eth_read(ndev, PSR)); 1523 if (mdp->ether_link_active_low) 1524 link_stat = ~link_stat; 1525 } 1526 if (!(link_stat & PHY_ST_LINK)) 1527 sh_eth_rcv_snd_disable(ndev); 1528 else { 1529 /* Link Up */ 1530 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & 1531 ~DMAC_M_ECI, EESIPR); 1532 /*clear int */ 1533 sh_eth_write(ndev, sh_eth_read(ndev, ECSR), 1534 ECSR); 1535 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | 1536 DMAC_M_ECI, EESIPR); 1537 /* enable tx and rx */ 1538 sh_eth_rcv_snd_enable(ndev); 1539 } 1540 } 1541 } 1542 1543 ignore_link: 1544 if (intr_status & EESR_TWB) { 1545 /* Write buck end. unused write back interrupt */ 1546 if (intr_status & EESR_TABT) /* Transmit Abort int */ 1547 ndev->stats.tx_aborted_errors++; 1548 if (netif_msg_tx_err(mdp)) 1549 dev_err(&ndev->dev, "Transmit Abort\n"); 1550 } 1551 1552 if (intr_status & EESR_RABT) { 1553 /* Receive Abort int */ 1554 if (intr_status & EESR_RFRMER) { 1555 /* Receive Frame Overflow int */ 1556 ndev->stats.rx_frame_errors++; 1557 if (netif_msg_rx_err(mdp)) 1558 dev_err(&ndev->dev, "Receive Abort\n"); 1559 } 1560 } 1561 1562 if (intr_status & EESR_TDE) { 1563 /* Transmit Descriptor Empty int */ 1564 ndev->stats.tx_fifo_errors++; 1565 if (netif_msg_tx_err(mdp)) 1566 dev_err(&ndev->dev, "Transmit Descriptor Empty\n"); 1567 } 1568 1569 if (intr_status & EESR_TFE) { 1570 /* FIFO under flow */ 1571 ndev->stats.tx_fifo_errors++; 1572 if (netif_msg_tx_err(mdp)) 1573 dev_err(&ndev->dev, "Transmit FIFO Under flow\n"); 1574 } 1575 1576 if (intr_status & EESR_RDE) { 1577 /* Receive Descriptor Empty int */ 1578 ndev->stats.rx_over_errors++; 1579 1580 if (netif_msg_rx_err(mdp)) 1581 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 1582 } 1583 1584 if (intr_status & EESR_RFE) { 1585 /* Receive FIFO Overflow int */ 1586 ndev->stats.rx_fifo_errors++; 1587 if (netif_msg_rx_err(mdp)) 1588 dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 1589 } 1590 1591 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1592 /* Address Error */ 1593 ndev->stats.tx_fifo_errors++; 1594 if (netif_msg_tx_err(mdp)) 1595 dev_err(&ndev->dev, "Address Error\n"); 1596 } 1597 1598 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 1599 if (mdp->cd->no_ade) 1600 mask &= ~EESR_ADE; 1601 if (intr_status & mask) { 1602 /* Tx error */ 1603 u32 edtrr = sh_eth_read(ndev, EDTRR); 1604 /* dmesg */ 1605 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", 1606 intr_status, mdp->cur_tx); 1607 dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", 1608 mdp->dirty_tx, (u32) ndev->state, edtrr); 1609 /* dirty buffer free */ 1610 sh_eth_txfree(ndev); 1611 1612 /* SH7712 BUG */ 1613 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { 1614 /* tx dma start */ 1615 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 1616 } 1617 /* wakeup */ 1618 netif_wake_queue(ndev); 1619 } 1620 } 1621 1622 static irqreturn_t sh_eth_interrupt(int irq, void *netdev) 1623 { 1624 struct net_device *ndev = netdev; 1625 struct sh_eth_private *mdp = netdev_priv(ndev); 1626 struct sh_eth_cpu_data *cd = mdp->cd; 1627 irqreturn_t ret = IRQ_NONE; 1628 unsigned long intr_status; 1629 1630 spin_lock(&mdp->lock); 1631 1632 /* Get interrupt status */ 1633 intr_status = sh_eth_read(ndev, EESR); 1634 /* Mask it with the interrupt mask, forcing ECI interrupt to be always 1635 * enabled since it's the one that comes thru regardless of the mask, 1636 * and we need to fully handle it in sh_eth_error() in order to quench 1637 * it as it doesn't get cleared by just writing 1 to the ECI bit... 1638 */ 1639 intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI; 1640 /* Clear interrupt */ 1641 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | 1642 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | 1643 cd->tx_check | cd->eesr_err_check)) { 1644 sh_eth_write(ndev, intr_status, EESR); 1645 ret = IRQ_HANDLED; 1646 } else 1647 goto other_irq; 1648 1649 if (intr_status & (EESR_FRC | /* Frame recv*/ 1650 EESR_RMAF | /* Multi cast address recv*/ 1651 EESR_RRF | /* Bit frame recv */ 1652 EESR_RTLF | /* Long frame recv*/ 1653 EESR_RTSF | /* short frame recv */ 1654 EESR_PRE | /* PHY-LSI recv error */ 1655 EESR_CERF)){ /* recv frame CRC error */ 1656 sh_eth_rx(ndev, intr_status); 1657 } 1658 1659 /* Tx Check */ 1660 if (intr_status & cd->tx_check) { 1661 sh_eth_txfree(ndev); 1662 netif_wake_queue(ndev); 1663 } 1664 1665 if (intr_status & cd->eesr_err_check) 1666 sh_eth_error(ndev, intr_status); 1667 1668 other_irq: 1669 spin_unlock(&mdp->lock); 1670 1671 return ret; 1672 } 1673 1674 /* PHY state control function */ 1675 static void sh_eth_adjust_link(struct net_device *ndev) 1676 { 1677 struct sh_eth_private *mdp = netdev_priv(ndev); 1678 struct phy_device *phydev = mdp->phydev; 1679 int new_state = 0; 1680 1681 if (phydev->link) { 1682 if (phydev->duplex != mdp->duplex) { 1683 new_state = 1; 1684 mdp->duplex = phydev->duplex; 1685 if (mdp->cd->set_duplex) 1686 mdp->cd->set_duplex(ndev); 1687 } 1688 1689 if (phydev->speed != mdp->speed) { 1690 new_state = 1; 1691 mdp->speed = phydev->speed; 1692 if (mdp->cd->set_rate) 1693 mdp->cd->set_rate(ndev); 1694 } 1695 if (!mdp->link) { 1696 sh_eth_write(ndev, 1697 (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); 1698 new_state = 1; 1699 mdp->link = phydev->link; 1700 if (mdp->cd->no_psr || mdp->no_ether_link) 1701 sh_eth_rcv_snd_enable(ndev); 1702 } 1703 } else if (mdp->link) { 1704 new_state = 1; 1705 mdp->link = 0; 1706 mdp->speed = 0; 1707 mdp->duplex = -1; 1708 if (mdp->cd->no_psr || mdp->no_ether_link) 1709 sh_eth_rcv_snd_disable(ndev); 1710 } 1711 1712 if (new_state && netif_msg_link(mdp)) 1713 phy_print_status(phydev); 1714 } 1715 1716 /* PHY init function */ 1717 static int sh_eth_phy_init(struct net_device *ndev) 1718 { 1719 struct sh_eth_private *mdp = netdev_priv(ndev); 1720 char phy_id[MII_BUS_ID_SIZE + 3]; 1721 struct phy_device *phydev = NULL; 1722 1723 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 1724 mdp->mii_bus->id , mdp->phy_id); 1725 1726 mdp->link = 0; 1727 mdp->speed = 0; 1728 mdp->duplex = -1; 1729 1730 /* Try connect to PHY */ 1731 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, 1732 mdp->phy_interface); 1733 if (IS_ERR(phydev)) { 1734 dev_err(&ndev->dev, "phy_connect failed\n"); 1735 return PTR_ERR(phydev); 1736 } 1737 1738 dev_info(&ndev->dev, "attached phy %i to driver %s\n", 1739 phydev->addr, phydev->drv->name); 1740 1741 mdp->phydev = phydev; 1742 1743 return 0; 1744 } 1745 1746 /* PHY control start function */ 1747 static int sh_eth_phy_start(struct net_device *ndev) 1748 { 1749 struct sh_eth_private *mdp = netdev_priv(ndev); 1750 int ret; 1751 1752 ret = sh_eth_phy_init(ndev); 1753 if (ret) 1754 return ret; 1755 1756 /* reset phy - this also wakes it from PDOWN */ 1757 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET); 1758 phy_start(mdp->phydev); 1759 1760 return 0; 1761 } 1762 1763 static int sh_eth_get_settings(struct net_device *ndev, 1764 struct ethtool_cmd *ecmd) 1765 { 1766 struct sh_eth_private *mdp = netdev_priv(ndev); 1767 unsigned long flags; 1768 int ret; 1769 1770 spin_lock_irqsave(&mdp->lock, flags); 1771 ret = phy_ethtool_gset(mdp->phydev, ecmd); 1772 spin_unlock_irqrestore(&mdp->lock, flags); 1773 1774 return ret; 1775 } 1776 1777 static int sh_eth_set_settings(struct net_device *ndev, 1778 struct ethtool_cmd *ecmd) 1779 { 1780 struct sh_eth_private *mdp = netdev_priv(ndev); 1781 unsigned long flags; 1782 int ret; 1783 1784 spin_lock_irqsave(&mdp->lock, flags); 1785 1786 /* disable tx and rx */ 1787 sh_eth_rcv_snd_disable(ndev); 1788 1789 ret = phy_ethtool_sset(mdp->phydev, ecmd); 1790 if (ret) 1791 goto error_exit; 1792 1793 if (ecmd->duplex == DUPLEX_FULL) 1794 mdp->duplex = 1; 1795 else 1796 mdp->duplex = 0; 1797 1798 if (mdp->cd->set_duplex) 1799 mdp->cd->set_duplex(ndev); 1800 1801 error_exit: 1802 mdelay(1); 1803 1804 /* enable tx and rx */ 1805 sh_eth_rcv_snd_enable(ndev); 1806 1807 spin_unlock_irqrestore(&mdp->lock, flags); 1808 1809 return ret; 1810 } 1811 1812 static int sh_eth_nway_reset(struct net_device *ndev) 1813 { 1814 struct sh_eth_private *mdp = netdev_priv(ndev); 1815 unsigned long flags; 1816 int ret; 1817 1818 spin_lock_irqsave(&mdp->lock, flags); 1819 ret = phy_start_aneg(mdp->phydev); 1820 spin_unlock_irqrestore(&mdp->lock, flags); 1821 1822 return ret; 1823 } 1824 1825 static u32 sh_eth_get_msglevel(struct net_device *ndev) 1826 { 1827 struct sh_eth_private *mdp = netdev_priv(ndev); 1828 return mdp->msg_enable; 1829 } 1830 1831 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value) 1832 { 1833 struct sh_eth_private *mdp = netdev_priv(ndev); 1834 mdp->msg_enable = value; 1835 } 1836 1837 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = { 1838 "rx_current", "tx_current", 1839 "rx_dirty", "tx_dirty", 1840 }; 1841 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats) 1842 1843 static int sh_eth_get_sset_count(struct net_device *netdev, int sset) 1844 { 1845 switch (sset) { 1846 case ETH_SS_STATS: 1847 return SH_ETH_STATS_LEN; 1848 default: 1849 return -EOPNOTSUPP; 1850 } 1851 } 1852 1853 static void sh_eth_get_ethtool_stats(struct net_device *ndev, 1854 struct ethtool_stats *stats, u64 *data) 1855 { 1856 struct sh_eth_private *mdp = netdev_priv(ndev); 1857 int i = 0; 1858 1859 /* device-specific stats */ 1860 data[i++] = mdp->cur_rx; 1861 data[i++] = mdp->cur_tx; 1862 data[i++] = mdp->dirty_rx; 1863 data[i++] = mdp->dirty_tx; 1864 } 1865 1866 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 1867 { 1868 switch (stringset) { 1869 case ETH_SS_STATS: 1870 memcpy(data, *sh_eth_gstrings_stats, 1871 sizeof(sh_eth_gstrings_stats)); 1872 break; 1873 } 1874 } 1875 1876 static void sh_eth_get_ringparam(struct net_device *ndev, 1877 struct ethtool_ringparam *ring) 1878 { 1879 struct sh_eth_private *mdp = netdev_priv(ndev); 1880 1881 ring->rx_max_pending = RX_RING_MAX; 1882 ring->tx_max_pending = TX_RING_MAX; 1883 ring->rx_pending = mdp->num_rx_ring; 1884 ring->tx_pending = mdp->num_tx_ring; 1885 } 1886 1887 static int sh_eth_set_ringparam(struct net_device *ndev, 1888 struct ethtool_ringparam *ring) 1889 { 1890 struct sh_eth_private *mdp = netdev_priv(ndev); 1891 int ret; 1892 1893 if (ring->tx_pending > TX_RING_MAX || 1894 ring->rx_pending > RX_RING_MAX || 1895 ring->tx_pending < TX_RING_MIN || 1896 ring->rx_pending < RX_RING_MIN) 1897 return -EINVAL; 1898 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 1899 return -EINVAL; 1900 1901 if (netif_running(ndev)) { 1902 netif_tx_disable(ndev); 1903 /* Disable interrupts by clearing the interrupt mask. */ 1904 sh_eth_write(ndev, 0x0000, EESIPR); 1905 /* Stop the chip's Tx and Rx processes. */ 1906 sh_eth_write(ndev, 0, EDTRR); 1907 sh_eth_write(ndev, 0, EDRRR); 1908 synchronize_irq(ndev->irq); 1909 } 1910 1911 /* Free all the skbuffs in the Rx queue. */ 1912 sh_eth_ring_free(ndev); 1913 /* Free DMA buffer */ 1914 sh_eth_free_dma_buffer(mdp); 1915 1916 /* Set new parameters */ 1917 mdp->num_rx_ring = ring->rx_pending; 1918 mdp->num_tx_ring = ring->tx_pending; 1919 1920 ret = sh_eth_ring_init(ndev); 1921 if (ret < 0) { 1922 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__); 1923 return ret; 1924 } 1925 ret = sh_eth_dev_init(ndev, false); 1926 if (ret < 0) { 1927 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__); 1928 return ret; 1929 } 1930 1931 if (netif_running(ndev)) { 1932 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1933 /* Setting the Rx mode will start the Rx process. */ 1934 sh_eth_write(ndev, EDRRR_R, EDRRR); 1935 netif_wake_queue(ndev); 1936 } 1937 1938 return 0; 1939 } 1940 1941 static const struct ethtool_ops sh_eth_ethtool_ops = { 1942 .get_settings = sh_eth_get_settings, 1943 .set_settings = sh_eth_set_settings, 1944 .nway_reset = sh_eth_nway_reset, 1945 .get_msglevel = sh_eth_get_msglevel, 1946 .set_msglevel = sh_eth_set_msglevel, 1947 .get_link = ethtool_op_get_link, 1948 .get_strings = sh_eth_get_strings, 1949 .get_ethtool_stats = sh_eth_get_ethtool_stats, 1950 .get_sset_count = sh_eth_get_sset_count, 1951 .get_ringparam = sh_eth_get_ringparam, 1952 .set_ringparam = sh_eth_set_ringparam, 1953 }; 1954 1955 /* network device open function */ 1956 static int sh_eth_open(struct net_device *ndev) 1957 { 1958 int ret = 0; 1959 struct sh_eth_private *mdp = netdev_priv(ndev); 1960 1961 pm_runtime_get_sync(&mdp->pdev->dev); 1962 1963 ret = request_irq(ndev->irq, sh_eth_interrupt, 1964 #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1965 defined(CONFIG_CPU_SUBTYPE_SH7764) || \ 1966 defined(CONFIG_CPU_SUBTYPE_SH7757) 1967 IRQF_SHARED, 1968 #else 1969 0, 1970 #endif 1971 ndev->name, ndev); 1972 if (ret) { 1973 dev_err(&ndev->dev, "Can not assign IRQ number\n"); 1974 return ret; 1975 } 1976 1977 /* Descriptor set */ 1978 ret = sh_eth_ring_init(ndev); 1979 if (ret) 1980 goto out_free_irq; 1981 1982 /* device init */ 1983 ret = sh_eth_dev_init(ndev, true); 1984 if (ret) 1985 goto out_free_irq; 1986 1987 /* PHY control start*/ 1988 ret = sh_eth_phy_start(ndev); 1989 if (ret) 1990 goto out_free_irq; 1991 1992 return ret; 1993 1994 out_free_irq: 1995 free_irq(ndev->irq, ndev); 1996 pm_runtime_put_sync(&mdp->pdev->dev); 1997 return ret; 1998 } 1999 2000 /* Timeout function */ 2001 static void sh_eth_tx_timeout(struct net_device *ndev) 2002 { 2003 struct sh_eth_private *mdp = netdev_priv(ndev); 2004 struct sh_eth_rxdesc *rxdesc; 2005 int i; 2006 2007 netif_stop_queue(ndev); 2008 2009 if (netif_msg_timer(mdp)) 2010 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x," 2011 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR)); 2012 2013 /* tx_errors count up */ 2014 ndev->stats.tx_errors++; 2015 2016 /* Free all the skbuffs in the Rx queue. */ 2017 for (i = 0; i < mdp->num_rx_ring; i++) { 2018 rxdesc = &mdp->rx_ring[i]; 2019 rxdesc->status = 0; 2020 rxdesc->addr = 0xBADF00D0; 2021 if (mdp->rx_skbuff[i]) 2022 dev_kfree_skb(mdp->rx_skbuff[i]); 2023 mdp->rx_skbuff[i] = NULL; 2024 } 2025 for (i = 0; i < mdp->num_tx_ring; i++) { 2026 if (mdp->tx_skbuff[i]) 2027 dev_kfree_skb(mdp->tx_skbuff[i]); 2028 mdp->tx_skbuff[i] = NULL; 2029 } 2030 2031 /* device init */ 2032 sh_eth_dev_init(ndev, true); 2033 } 2034 2035 /* Packet transmit function */ 2036 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) 2037 { 2038 struct sh_eth_private *mdp = netdev_priv(ndev); 2039 struct sh_eth_txdesc *txdesc; 2040 u32 entry; 2041 unsigned long flags; 2042 2043 spin_lock_irqsave(&mdp->lock, flags); 2044 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { 2045 if (!sh_eth_txfree(ndev)) { 2046 if (netif_msg_tx_queued(mdp)) 2047 dev_warn(&ndev->dev, "TxFD exhausted.\n"); 2048 netif_stop_queue(ndev); 2049 spin_unlock_irqrestore(&mdp->lock, flags); 2050 return NETDEV_TX_BUSY; 2051 } 2052 } 2053 spin_unlock_irqrestore(&mdp->lock, flags); 2054 2055 entry = mdp->cur_tx % mdp->num_tx_ring; 2056 mdp->tx_skbuff[entry] = skb; 2057 txdesc = &mdp->tx_ring[entry]; 2058 /* soft swap. */ 2059 if (!mdp->cd->hw_swap) 2060 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), 2061 skb->len + 2); 2062 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2063 DMA_TO_DEVICE); 2064 if (skb->len < ETHERSMALL) 2065 txdesc->buffer_length = ETHERSMALL; 2066 else 2067 txdesc->buffer_length = skb->len; 2068 2069 if (entry >= mdp->num_tx_ring - 1) 2070 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 2071 else 2072 txdesc->status |= cpu_to_edmac(mdp, TD_TACT); 2073 2074 mdp->cur_tx++; 2075 2076 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) 2077 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 2078 2079 return NETDEV_TX_OK; 2080 } 2081 2082 /* device close function */ 2083 static int sh_eth_close(struct net_device *ndev) 2084 { 2085 struct sh_eth_private *mdp = netdev_priv(ndev); 2086 2087 netif_stop_queue(ndev); 2088 2089 /* Disable interrupts by clearing the interrupt mask. */ 2090 sh_eth_write(ndev, 0x0000, EESIPR); 2091 2092 /* Stop the chip's Tx and Rx processes. */ 2093 sh_eth_write(ndev, 0, EDTRR); 2094 sh_eth_write(ndev, 0, EDRRR); 2095 2096 /* PHY Disconnect */ 2097 if (mdp->phydev) { 2098 phy_stop(mdp->phydev); 2099 phy_disconnect(mdp->phydev); 2100 } 2101 2102 free_irq(ndev->irq, ndev); 2103 2104 /* Free all the skbuffs in the Rx queue. */ 2105 sh_eth_ring_free(ndev); 2106 2107 /* free DMA buffer */ 2108 sh_eth_free_dma_buffer(mdp); 2109 2110 pm_runtime_put_sync(&mdp->pdev->dev); 2111 2112 return 0; 2113 } 2114 2115 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) 2116 { 2117 struct sh_eth_private *mdp = netdev_priv(ndev); 2118 2119 pm_runtime_get_sync(&mdp->pdev->dev); 2120 2121 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); 2122 sh_eth_write(ndev, 0, TROCR); /* (write clear) */ 2123 ndev->stats.collisions += sh_eth_read(ndev, CDCR); 2124 sh_eth_write(ndev, 0, CDCR); /* (write clear) */ 2125 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); 2126 sh_eth_write(ndev, 0, LCCR); /* (write clear) */ 2127 if (sh_eth_is_gether(mdp)) { 2128 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); 2129 sh_eth_write(ndev, 0, CERCR); /* (write clear) */ 2130 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); 2131 sh_eth_write(ndev, 0, CEECR); /* (write clear) */ 2132 } else { 2133 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); 2134 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ 2135 } 2136 pm_runtime_put_sync(&mdp->pdev->dev); 2137 2138 return &ndev->stats; 2139 } 2140 2141 /* ioctl to device function */ 2142 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, 2143 int cmd) 2144 { 2145 struct sh_eth_private *mdp = netdev_priv(ndev); 2146 struct phy_device *phydev = mdp->phydev; 2147 2148 if (!netif_running(ndev)) 2149 return -EINVAL; 2150 2151 if (!phydev) 2152 return -ENODEV; 2153 2154 return phy_mii_ioctl(phydev, rq, cmd); 2155 } 2156 2157 #if defined(SH_ETH_HAS_TSU) 2158 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ 2159 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, 2160 int entry) 2161 { 2162 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4); 2163 } 2164 2165 static u32 sh_eth_tsu_get_post_mask(int entry) 2166 { 2167 return 0x0f << (28 - ((entry % 8) * 4)); 2168 } 2169 2170 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) 2171 { 2172 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); 2173 } 2174 2175 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev, 2176 int entry) 2177 { 2178 struct sh_eth_private *mdp = netdev_priv(ndev); 2179 u32 tmp; 2180 void *reg_offset; 2181 2182 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 2183 tmp = ioread32(reg_offset); 2184 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset); 2185 } 2186 2187 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev, 2188 int entry) 2189 { 2190 struct sh_eth_private *mdp = netdev_priv(ndev); 2191 u32 post_mask, ref_mask, tmp; 2192 void *reg_offset; 2193 2194 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 2195 post_mask = sh_eth_tsu_get_post_mask(entry); 2196 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; 2197 2198 tmp = ioread32(reg_offset); 2199 iowrite32(tmp & ~post_mask, reg_offset); 2200 2201 /* If other port enables, the function returns "true" */ 2202 return tmp & ref_mask; 2203 } 2204 2205 static int sh_eth_tsu_busy(struct net_device *ndev) 2206 { 2207 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100; 2208 struct sh_eth_private *mdp = netdev_priv(ndev); 2209 2210 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { 2211 udelay(10); 2212 timeout--; 2213 if (timeout <= 0) { 2214 dev_err(&ndev->dev, "%s: timeout\n", __func__); 2215 return -ETIMEDOUT; 2216 } 2217 } 2218 2219 return 0; 2220 } 2221 2222 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg, 2223 const u8 *addr) 2224 { 2225 u32 val; 2226 2227 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]; 2228 iowrite32(val, reg); 2229 if (sh_eth_tsu_busy(ndev) < 0) 2230 return -EBUSY; 2231 2232 val = addr[4] << 8 | addr[5]; 2233 iowrite32(val, reg + 4); 2234 if (sh_eth_tsu_busy(ndev) < 0) 2235 return -EBUSY; 2236 2237 return 0; 2238 } 2239 2240 static void sh_eth_tsu_read_entry(void *reg, u8 *addr) 2241 { 2242 u32 val; 2243 2244 val = ioread32(reg); 2245 addr[0] = (val >> 24) & 0xff; 2246 addr[1] = (val >> 16) & 0xff; 2247 addr[2] = (val >> 8) & 0xff; 2248 addr[3] = val & 0xff; 2249 val = ioread32(reg + 4); 2250 addr[4] = (val >> 8) & 0xff; 2251 addr[5] = val & 0xff; 2252 } 2253 2254 2255 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr) 2256 { 2257 struct sh_eth_private *mdp = netdev_priv(ndev); 2258 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2259 int i; 2260 u8 c_addr[ETH_ALEN]; 2261 2262 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 2263 sh_eth_tsu_read_entry(reg_offset, c_addr); 2264 if (memcmp(addr, c_addr, ETH_ALEN) == 0) 2265 return i; 2266 } 2267 2268 return -ENOENT; 2269 } 2270 2271 static int sh_eth_tsu_find_empty(struct net_device *ndev) 2272 { 2273 u8 blank[ETH_ALEN]; 2274 int entry; 2275 2276 memset(blank, 0, sizeof(blank)); 2277 entry = sh_eth_tsu_find_entry(ndev, blank); 2278 return (entry < 0) ? -ENOMEM : entry; 2279 } 2280 2281 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, 2282 int entry) 2283 { 2284 struct sh_eth_private *mdp = netdev_priv(ndev); 2285 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2286 int ret; 2287 u8 blank[ETH_ALEN]; 2288 2289 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & 2290 ~(1 << (31 - entry)), TSU_TEN); 2291 2292 memset(blank, 0, sizeof(blank)); 2293 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank); 2294 if (ret < 0) 2295 return ret; 2296 return 0; 2297 } 2298 2299 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr) 2300 { 2301 struct sh_eth_private *mdp = netdev_priv(ndev); 2302 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2303 int i, ret; 2304 2305 if (!mdp->cd->tsu) 2306 return 0; 2307 2308 i = sh_eth_tsu_find_entry(ndev, addr); 2309 if (i < 0) { 2310 /* No entry found, create one */ 2311 i = sh_eth_tsu_find_empty(ndev); 2312 if (i < 0) 2313 return -ENOMEM; 2314 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr); 2315 if (ret < 0) 2316 return ret; 2317 2318 /* Enable the entry */ 2319 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | 2320 (1 << (31 - i)), TSU_TEN); 2321 } 2322 2323 /* Entry found or created, enable POST */ 2324 sh_eth_tsu_enable_cam_entry_post(ndev, i); 2325 2326 return 0; 2327 } 2328 2329 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr) 2330 { 2331 struct sh_eth_private *mdp = netdev_priv(ndev); 2332 int i, ret; 2333 2334 if (!mdp->cd->tsu) 2335 return 0; 2336 2337 i = sh_eth_tsu_find_entry(ndev, addr); 2338 if (i) { 2339 /* Entry found */ 2340 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 2341 goto done; 2342 2343 /* Disable the entry if both ports was disabled */ 2344 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 2345 if (ret < 0) 2346 return ret; 2347 } 2348 done: 2349 return 0; 2350 } 2351 2352 static int sh_eth_tsu_purge_all(struct net_device *ndev) 2353 { 2354 struct sh_eth_private *mdp = netdev_priv(ndev); 2355 int i, ret; 2356 2357 if (unlikely(!mdp->cd->tsu)) 2358 return 0; 2359 2360 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { 2361 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 2362 continue; 2363 2364 /* Disable the entry if both ports was disabled */ 2365 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 2366 if (ret < 0) 2367 return ret; 2368 } 2369 2370 return 0; 2371 } 2372 2373 static void sh_eth_tsu_purge_mcast(struct net_device *ndev) 2374 { 2375 struct sh_eth_private *mdp = netdev_priv(ndev); 2376 u8 addr[ETH_ALEN]; 2377 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2378 int i; 2379 2380 if (unlikely(!mdp->cd->tsu)) 2381 return; 2382 2383 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 2384 sh_eth_tsu_read_entry(reg_offset, addr); 2385 if (is_multicast_ether_addr(addr)) 2386 sh_eth_tsu_del_entry(ndev, addr); 2387 } 2388 } 2389 2390 /* Multicast reception directions set */ 2391 static void sh_eth_set_multicast_list(struct net_device *ndev) 2392 { 2393 struct sh_eth_private *mdp = netdev_priv(ndev); 2394 u32 ecmr_bits; 2395 int mcast_all = 0; 2396 unsigned long flags; 2397 2398 spin_lock_irqsave(&mdp->lock, flags); 2399 /* 2400 * Initial condition is MCT = 1, PRM = 0. 2401 * Depending on ndev->flags, set PRM or clear MCT 2402 */ 2403 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT; 2404 2405 if (!(ndev->flags & IFF_MULTICAST)) { 2406 sh_eth_tsu_purge_mcast(ndev); 2407 mcast_all = 1; 2408 } 2409 if (ndev->flags & IFF_ALLMULTI) { 2410 sh_eth_tsu_purge_mcast(ndev); 2411 ecmr_bits &= ~ECMR_MCT; 2412 mcast_all = 1; 2413 } 2414 2415 if (ndev->flags & IFF_PROMISC) { 2416 sh_eth_tsu_purge_all(ndev); 2417 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM; 2418 } else if (mdp->cd->tsu) { 2419 struct netdev_hw_addr *ha; 2420 netdev_for_each_mc_addr(ha, ndev) { 2421 if (mcast_all && is_multicast_ether_addr(ha->addr)) 2422 continue; 2423 2424 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) { 2425 if (!mcast_all) { 2426 sh_eth_tsu_purge_mcast(ndev); 2427 ecmr_bits &= ~ECMR_MCT; 2428 mcast_all = 1; 2429 } 2430 } 2431 } 2432 } else { 2433 /* Normal, unicast/broadcast-only mode. */ 2434 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT; 2435 } 2436 2437 /* update the ethernet mode */ 2438 sh_eth_write(ndev, ecmr_bits, ECMR); 2439 2440 spin_unlock_irqrestore(&mdp->lock, flags); 2441 } 2442 2443 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) 2444 { 2445 if (!mdp->port) 2446 return TSU_VTAG0; 2447 else 2448 return TSU_VTAG1; 2449 } 2450 2451 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, 2452 __be16 proto, u16 vid) 2453 { 2454 struct sh_eth_private *mdp = netdev_priv(ndev); 2455 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2456 2457 if (unlikely(!mdp->cd->tsu)) 2458 return -EPERM; 2459 2460 /* No filtering if vid = 0 */ 2461 if (!vid) 2462 return 0; 2463 2464 mdp->vlan_num_ids++; 2465 2466 /* 2467 * The controller has one VLAN tag HW filter. So, if the filter is 2468 * already enabled, the driver disables it and the filte 2469 */ 2470 if (mdp->vlan_num_ids > 1) { 2471 /* disable VLAN filter */ 2472 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 2473 return 0; 2474 } 2475 2476 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), 2477 vtag_reg_index); 2478 2479 return 0; 2480 } 2481 2482 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, 2483 __be16 proto, u16 vid) 2484 { 2485 struct sh_eth_private *mdp = netdev_priv(ndev); 2486 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2487 2488 if (unlikely(!mdp->cd->tsu)) 2489 return -EPERM; 2490 2491 /* No filtering if vid = 0 */ 2492 if (!vid) 2493 return 0; 2494 2495 mdp->vlan_num_ids--; 2496 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 2497 2498 return 0; 2499 } 2500 #endif /* SH_ETH_HAS_TSU */ 2501 2502 /* SuperH's TSU register init function */ 2503 static void sh_eth_tsu_init(struct sh_eth_private *mdp) 2504 { 2505 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ 2506 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ 2507 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ 2508 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); 2509 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); 2510 sh_eth_tsu_write(mdp, 0, TSU_PRISL0); 2511 sh_eth_tsu_write(mdp, 0, TSU_PRISL1); 2512 sh_eth_tsu_write(mdp, 0, TSU_FWSL0); 2513 sh_eth_tsu_write(mdp, 0, TSU_FWSL1); 2514 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); 2515 if (sh_eth_is_gether(mdp)) { 2516 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ 2517 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ 2518 } else { 2519 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ 2520 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ 2521 } 2522 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ 2523 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ 2524 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ 2525 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ 2526 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ 2527 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ 2528 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ 2529 } 2530 2531 /* MDIO bus release function */ 2532 static int sh_mdio_release(struct net_device *ndev) 2533 { 2534 struct mii_bus *bus = dev_get_drvdata(&ndev->dev); 2535 2536 /* unregister mdio bus */ 2537 mdiobus_unregister(bus); 2538 2539 /* remove mdio bus info from net_device */ 2540 dev_set_drvdata(&ndev->dev, NULL); 2541 2542 /* free bitbang info */ 2543 free_mdio_bitbang(bus); 2544 2545 return 0; 2546 } 2547 2548 /* MDIO bus init function */ 2549 static int sh_mdio_init(struct net_device *ndev, int id, 2550 struct sh_eth_plat_data *pd) 2551 { 2552 int ret, i; 2553 struct bb_info *bitbang; 2554 struct sh_eth_private *mdp = netdev_priv(ndev); 2555 2556 /* create bit control struct for PHY */ 2557 bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info), 2558 GFP_KERNEL); 2559 if (!bitbang) { 2560 ret = -ENOMEM; 2561 goto out; 2562 } 2563 2564 /* bitbang init */ 2565 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; 2566 bitbang->set_gate = pd->set_mdio_gate; 2567 bitbang->mdi_msk = PIR_MDI; 2568 bitbang->mdo_msk = PIR_MDO; 2569 bitbang->mmd_msk = PIR_MMD; 2570 bitbang->mdc_msk = PIR_MDC; 2571 bitbang->ctrl.ops = &bb_ops; 2572 2573 /* MII controller setting */ 2574 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 2575 if (!mdp->mii_bus) { 2576 ret = -ENOMEM; 2577 goto out; 2578 } 2579 2580 /* Hook up MII support for ethtool */ 2581 mdp->mii_bus->name = "sh_mii"; 2582 mdp->mii_bus->parent = &ndev->dev; 2583 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2584 mdp->pdev->name, id); 2585 2586 /* PHY IRQ */ 2587 mdp->mii_bus->irq = devm_kzalloc(&ndev->dev, 2588 sizeof(int) * PHY_MAX_ADDR, 2589 GFP_KERNEL); 2590 if (!mdp->mii_bus->irq) { 2591 ret = -ENOMEM; 2592 goto out_free_bus; 2593 } 2594 2595 for (i = 0; i < PHY_MAX_ADDR; i++) 2596 mdp->mii_bus->irq[i] = PHY_POLL; 2597 2598 /* register mdio bus */ 2599 ret = mdiobus_register(mdp->mii_bus); 2600 if (ret) 2601 goto out_free_bus; 2602 2603 dev_set_drvdata(&ndev->dev, mdp->mii_bus); 2604 2605 return 0; 2606 2607 out_free_bus: 2608 free_mdio_bitbang(mdp->mii_bus); 2609 2610 out: 2611 return ret; 2612 } 2613 2614 static const u16 *sh_eth_get_register_offset(int register_type) 2615 { 2616 const u16 *reg_offset = NULL; 2617 2618 switch (register_type) { 2619 case SH_ETH_REG_GIGABIT: 2620 reg_offset = sh_eth_offset_gigabit; 2621 break; 2622 case SH_ETH_REG_FAST_RCAR: 2623 reg_offset = sh_eth_offset_fast_rcar; 2624 break; 2625 case SH_ETH_REG_FAST_SH4: 2626 reg_offset = sh_eth_offset_fast_sh4; 2627 break; 2628 case SH_ETH_REG_FAST_SH3_SH2: 2629 reg_offset = sh_eth_offset_fast_sh3_sh2; 2630 break; 2631 default: 2632 pr_err("Unknown register type (%d)\n", register_type); 2633 break; 2634 } 2635 2636 return reg_offset; 2637 } 2638 2639 static const struct net_device_ops sh_eth_netdev_ops = { 2640 .ndo_open = sh_eth_open, 2641 .ndo_stop = sh_eth_close, 2642 .ndo_start_xmit = sh_eth_start_xmit, 2643 .ndo_get_stats = sh_eth_get_stats, 2644 #if defined(SH_ETH_HAS_TSU) 2645 .ndo_set_rx_mode = sh_eth_set_multicast_list, 2646 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, 2647 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, 2648 #endif 2649 .ndo_tx_timeout = sh_eth_tx_timeout, 2650 .ndo_do_ioctl = sh_eth_do_ioctl, 2651 .ndo_validate_addr = eth_validate_addr, 2652 .ndo_set_mac_address = eth_mac_addr, 2653 .ndo_change_mtu = eth_change_mtu, 2654 }; 2655 2656 static int sh_eth_drv_probe(struct platform_device *pdev) 2657 { 2658 int ret, devno = 0; 2659 struct resource *res; 2660 struct net_device *ndev = NULL; 2661 struct sh_eth_private *mdp = NULL; 2662 struct sh_eth_plat_data *pd = pdev->dev.platform_data; 2663 2664 /* get base addr */ 2665 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2666 if (unlikely(res == NULL)) { 2667 dev_err(&pdev->dev, "invalid resource\n"); 2668 ret = -EINVAL; 2669 goto out; 2670 } 2671 2672 ndev = alloc_etherdev(sizeof(struct sh_eth_private)); 2673 if (!ndev) { 2674 ret = -ENOMEM; 2675 goto out; 2676 } 2677 2678 /* The sh Ether-specific entries in the device structure. */ 2679 ndev->base_addr = res->start; 2680 devno = pdev->id; 2681 if (devno < 0) 2682 devno = 0; 2683 2684 ndev->dma = -1; 2685 ret = platform_get_irq(pdev, 0); 2686 if (ret < 0) { 2687 ret = -ENODEV; 2688 goto out_release; 2689 } 2690 ndev->irq = ret; 2691 2692 SET_NETDEV_DEV(ndev, &pdev->dev); 2693 2694 /* Fill in the fields of the device structure with ethernet values. */ 2695 ether_setup(ndev); 2696 2697 mdp = netdev_priv(ndev); 2698 mdp->num_tx_ring = TX_RING_SIZE; 2699 mdp->num_rx_ring = RX_RING_SIZE; 2700 mdp->addr = devm_ioremap_resource(&pdev->dev, res); 2701 if (IS_ERR(mdp->addr)) { 2702 ret = PTR_ERR(mdp->addr); 2703 goto out_release; 2704 } 2705 2706 spin_lock_init(&mdp->lock); 2707 mdp->pdev = pdev; 2708 pm_runtime_enable(&pdev->dev); 2709 pm_runtime_resume(&pdev->dev); 2710 2711 /* get PHY ID */ 2712 mdp->phy_id = pd->phy; 2713 mdp->phy_interface = pd->phy_interface; 2714 /* EDMAC endian */ 2715 mdp->edmac_endian = pd->edmac_endian; 2716 mdp->no_ether_link = pd->no_ether_link; 2717 mdp->ether_link_active_low = pd->ether_link_active_low; 2718 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); 2719 2720 /* set cpu data */ 2721 #if defined(SH_ETH_HAS_BOTH_MODULES) 2722 mdp->cd = sh_eth_get_cpu_data(mdp); 2723 #else 2724 mdp->cd = &sh_eth_my_cpu_data; 2725 #endif 2726 sh_eth_set_default_cpu_data(mdp->cd); 2727 2728 /* set function */ 2729 ndev->netdev_ops = &sh_eth_netdev_ops; 2730 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); 2731 ndev->watchdog_timeo = TX_TIMEOUT; 2732 2733 /* debug message level */ 2734 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; 2735 2736 /* read and set MAC address */ 2737 read_mac_address(ndev, pd->mac_addr); 2738 if (!is_valid_ether_addr(ndev->dev_addr)) { 2739 dev_warn(&pdev->dev, 2740 "no valid MAC address supplied, using a random one.\n"); 2741 eth_hw_addr_random(ndev); 2742 } 2743 2744 /* ioremap the TSU registers */ 2745 if (mdp->cd->tsu) { 2746 struct resource *rtsu; 2747 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2748 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); 2749 if (IS_ERR(mdp->tsu_addr)) { 2750 ret = PTR_ERR(mdp->tsu_addr); 2751 goto out_release; 2752 } 2753 mdp->port = devno % 2; 2754 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; 2755 } 2756 2757 /* initialize first or needed device */ 2758 if (!devno || pd->needs_init) { 2759 if (mdp->cd->chip_reset) 2760 mdp->cd->chip_reset(ndev); 2761 2762 if (mdp->cd->tsu) { 2763 /* TSU init (Init only)*/ 2764 sh_eth_tsu_init(mdp); 2765 } 2766 } 2767 2768 /* network device register */ 2769 ret = register_netdev(ndev); 2770 if (ret) 2771 goto out_release; 2772 2773 /* mdio bus init */ 2774 ret = sh_mdio_init(ndev, pdev->id, pd); 2775 if (ret) 2776 goto out_unregister; 2777 2778 /* print device information */ 2779 pr_info("Base address at 0x%x, %pM, IRQ %d.\n", 2780 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); 2781 2782 platform_set_drvdata(pdev, ndev); 2783 2784 return ret; 2785 2786 out_unregister: 2787 unregister_netdev(ndev); 2788 2789 out_release: 2790 /* net_dev free */ 2791 if (ndev) 2792 free_netdev(ndev); 2793 2794 out: 2795 return ret; 2796 } 2797 2798 static int sh_eth_drv_remove(struct platform_device *pdev) 2799 { 2800 struct net_device *ndev = platform_get_drvdata(pdev); 2801 2802 sh_mdio_release(ndev); 2803 unregister_netdev(ndev); 2804 pm_runtime_disable(&pdev->dev); 2805 free_netdev(ndev); 2806 platform_set_drvdata(pdev, NULL); 2807 2808 return 0; 2809 } 2810 2811 static int sh_eth_runtime_nop(struct device *dev) 2812 { 2813 /* 2814 * Runtime PM callback shared between ->runtime_suspend() 2815 * and ->runtime_resume(). Simply returns success. 2816 * 2817 * This driver re-initializes all registers after 2818 * pm_runtime_get_sync() anyway so there is no need 2819 * to save and restore registers here. 2820 */ 2821 return 0; 2822 } 2823 2824 static struct dev_pm_ops sh_eth_dev_pm_ops = { 2825 .runtime_suspend = sh_eth_runtime_nop, 2826 .runtime_resume = sh_eth_runtime_nop, 2827 }; 2828 2829 static struct platform_driver sh_eth_driver = { 2830 .probe = sh_eth_drv_probe, 2831 .remove = sh_eth_drv_remove, 2832 .driver = { 2833 .name = CARDNAME, 2834 .pm = &sh_eth_dev_pm_ops, 2835 }, 2836 }; 2837 2838 module_platform_driver(sh_eth_driver); 2839 2840 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); 2841 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); 2842 MODULE_LICENSE("GPL v2"); 2843