1 /* 2 * SuperH Ethernet device driver 3 * 4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu 5 * Copyright (C) 2008-2013 Renesas Solutions Corp. 6 * Copyright (C) 2013 Cogent Embedded, Inc. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2, as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * You should have received a copy of the GNU General Public License along with 17 * this program; if not, write to the Free Software Foundation, Inc., 18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * The full GNU General Public License is included in this distribution in 21 * the file called "COPYING". 22 */ 23 24 #include <linux/init.h> 25 #include <linux/module.h> 26 #include <linux/kernel.h> 27 #include <linux/spinlock.h> 28 #include <linux/interrupt.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/etherdevice.h> 31 #include <linux/delay.h> 32 #include <linux/platform_device.h> 33 #include <linux/mdio-bitbang.h> 34 #include <linux/netdevice.h> 35 #include <linux/phy.h> 36 #include <linux/cache.h> 37 #include <linux/io.h> 38 #include <linux/pm_runtime.h> 39 #include <linux/slab.h> 40 #include <linux/ethtool.h> 41 #include <linux/if_vlan.h> 42 #include <linux/clk.h> 43 #include <linux/sh_eth.h> 44 45 #include "sh_eth.h" 46 47 #define SH_ETH_DEF_MSG_ENABLE \ 48 (NETIF_MSG_LINK | \ 49 NETIF_MSG_TIMER | \ 50 NETIF_MSG_RX_ERR| \ 51 NETIF_MSG_TX_ERR) 52 53 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { 54 [EDSR] = 0x0000, 55 [EDMR] = 0x0400, 56 [EDTRR] = 0x0408, 57 [EDRRR] = 0x0410, 58 [EESR] = 0x0428, 59 [EESIPR] = 0x0430, 60 [TDLAR] = 0x0010, 61 [TDFAR] = 0x0014, 62 [TDFXR] = 0x0018, 63 [TDFFR] = 0x001c, 64 [RDLAR] = 0x0030, 65 [RDFAR] = 0x0034, 66 [RDFXR] = 0x0038, 67 [RDFFR] = 0x003c, 68 [TRSCER] = 0x0438, 69 [RMFCR] = 0x0440, 70 [TFTR] = 0x0448, 71 [FDR] = 0x0450, 72 [RMCR] = 0x0458, 73 [RPADIR] = 0x0460, 74 [FCFTR] = 0x0468, 75 [CSMR] = 0x04E4, 76 77 [ECMR] = 0x0500, 78 [ECSR] = 0x0510, 79 [ECSIPR] = 0x0518, 80 [PIR] = 0x0520, 81 [PSR] = 0x0528, 82 [PIPR] = 0x052c, 83 [RFLR] = 0x0508, 84 [APR] = 0x0554, 85 [MPR] = 0x0558, 86 [PFTCR] = 0x055c, 87 [PFRCR] = 0x0560, 88 [TPAUSER] = 0x0564, 89 [GECMR] = 0x05b0, 90 [BCULR] = 0x05b4, 91 [MAHR] = 0x05c0, 92 [MALR] = 0x05c8, 93 [TROCR] = 0x0700, 94 [CDCR] = 0x0708, 95 [LCCR] = 0x0710, 96 [CEFCR] = 0x0740, 97 [FRECR] = 0x0748, 98 [TSFRCR] = 0x0750, 99 [TLFRCR] = 0x0758, 100 [RFCR] = 0x0760, 101 [CERCR] = 0x0768, 102 [CEECR] = 0x0770, 103 [MAFCR] = 0x0778, 104 [RMII_MII] = 0x0790, 105 106 [ARSTR] = 0x0000, 107 [TSU_CTRST] = 0x0004, 108 [TSU_FWEN0] = 0x0010, 109 [TSU_FWEN1] = 0x0014, 110 [TSU_FCM] = 0x0018, 111 [TSU_BSYSL0] = 0x0020, 112 [TSU_BSYSL1] = 0x0024, 113 [TSU_PRISL0] = 0x0028, 114 [TSU_PRISL1] = 0x002c, 115 [TSU_FWSL0] = 0x0030, 116 [TSU_FWSL1] = 0x0034, 117 [TSU_FWSLC] = 0x0038, 118 [TSU_QTAG0] = 0x0040, 119 [TSU_QTAG1] = 0x0044, 120 [TSU_FWSR] = 0x0050, 121 [TSU_FWINMK] = 0x0054, 122 [TSU_ADQT0] = 0x0048, 123 [TSU_ADQT1] = 0x004c, 124 [TSU_VTAG0] = 0x0058, 125 [TSU_VTAG1] = 0x005c, 126 [TSU_ADSBSY] = 0x0060, 127 [TSU_TEN] = 0x0064, 128 [TSU_POST1] = 0x0070, 129 [TSU_POST2] = 0x0074, 130 [TSU_POST3] = 0x0078, 131 [TSU_POST4] = 0x007c, 132 [TSU_ADRH0] = 0x0100, 133 [TSU_ADRL0] = 0x0104, 134 [TSU_ADRH31] = 0x01f8, 135 [TSU_ADRL31] = 0x01fc, 136 137 [TXNLCR0] = 0x0080, 138 [TXALCR0] = 0x0084, 139 [RXNLCR0] = 0x0088, 140 [RXALCR0] = 0x008c, 141 [FWNLCR0] = 0x0090, 142 [FWALCR0] = 0x0094, 143 [TXNLCR1] = 0x00a0, 144 [TXALCR1] = 0x00a0, 145 [RXNLCR1] = 0x00a8, 146 [RXALCR1] = 0x00ac, 147 [FWNLCR1] = 0x00b0, 148 [FWALCR1] = 0x00b4, 149 }; 150 151 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = { 152 [ECMR] = 0x0300, 153 [RFLR] = 0x0308, 154 [ECSR] = 0x0310, 155 [ECSIPR] = 0x0318, 156 [PIR] = 0x0320, 157 [PSR] = 0x0328, 158 [RDMLR] = 0x0340, 159 [IPGR] = 0x0350, 160 [APR] = 0x0354, 161 [MPR] = 0x0358, 162 [RFCF] = 0x0360, 163 [TPAUSER] = 0x0364, 164 [TPAUSECR] = 0x0368, 165 [MAHR] = 0x03c0, 166 [MALR] = 0x03c8, 167 [TROCR] = 0x03d0, 168 [CDCR] = 0x03d4, 169 [LCCR] = 0x03d8, 170 [CNDCR] = 0x03dc, 171 [CEFCR] = 0x03e4, 172 [FRECR] = 0x03e8, 173 [TSFRCR] = 0x03ec, 174 [TLFRCR] = 0x03f0, 175 [RFCR] = 0x03f4, 176 [MAFCR] = 0x03f8, 177 178 [EDMR] = 0x0200, 179 [EDTRR] = 0x0208, 180 [EDRRR] = 0x0210, 181 [TDLAR] = 0x0218, 182 [RDLAR] = 0x0220, 183 [EESR] = 0x0228, 184 [EESIPR] = 0x0230, 185 [TRSCER] = 0x0238, 186 [RMFCR] = 0x0240, 187 [TFTR] = 0x0248, 188 [FDR] = 0x0250, 189 [RMCR] = 0x0258, 190 [TFUCR] = 0x0264, 191 [RFOCR] = 0x0268, 192 [FCFTR] = 0x0270, 193 [TRIMD] = 0x027c, 194 }; 195 196 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = { 197 [ECMR] = 0x0100, 198 [RFLR] = 0x0108, 199 [ECSR] = 0x0110, 200 [ECSIPR] = 0x0118, 201 [PIR] = 0x0120, 202 [PSR] = 0x0128, 203 [RDMLR] = 0x0140, 204 [IPGR] = 0x0150, 205 [APR] = 0x0154, 206 [MPR] = 0x0158, 207 [TPAUSER] = 0x0164, 208 [RFCF] = 0x0160, 209 [TPAUSECR] = 0x0168, 210 [BCFRR] = 0x016c, 211 [MAHR] = 0x01c0, 212 [MALR] = 0x01c8, 213 [TROCR] = 0x01d0, 214 [CDCR] = 0x01d4, 215 [LCCR] = 0x01d8, 216 [CNDCR] = 0x01dc, 217 [CEFCR] = 0x01e4, 218 [FRECR] = 0x01e8, 219 [TSFRCR] = 0x01ec, 220 [TLFRCR] = 0x01f0, 221 [RFCR] = 0x01f4, 222 [MAFCR] = 0x01f8, 223 [RTRATE] = 0x01fc, 224 225 [EDMR] = 0x0000, 226 [EDTRR] = 0x0008, 227 [EDRRR] = 0x0010, 228 [TDLAR] = 0x0018, 229 [RDLAR] = 0x0020, 230 [EESR] = 0x0028, 231 [EESIPR] = 0x0030, 232 [TRSCER] = 0x0038, 233 [RMFCR] = 0x0040, 234 [TFTR] = 0x0048, 235 [FDR] = 0x0050, 236 [RMCR] = 0x0058, 237 [TFUCR] = 0x0064, 238 [RFOCR] = 0x0068, 239 [FCFTR] = 0x0070, 240 [RPADIR] = 0x0078, 241 [TRIMD] = 0x007c, 242 [RBWAR] = 0x00c8, 243 [RDFAR] = 0x00cc, 244 [TBRAR] = 0x00d4, 245 [TDFAR] = 0x00d8, 246 }; 247 248 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { 249 [ECMR] = 0x0160, 250 [ECSR] = 0x0164, 251 [ECSIPR] = 0x0168, 252 [PIR] = 0x016c, 253 [MAHR] = 0x0170, 254 [MALR] = 0x0174, 255 [RFLR] = 0x0178, 256 [PSR] = 0x017c, 257 [TROCR] = 0x0180, 258 [CDCR] = 0x0184, 259 [LCCR] = 0x0188, 260 [CNDCR] = 0x018c, 261 [CEFCR] = 0x0194, 262 [FRECR] = 0x0198, 263 [TSFRCR] = 0x019c, 264 [TLFRCR] = 0x01a0, 265 [RFCR] = 0x01a4, 266 [MAFCR] = 0x01a8, 267 [IPGR] = 0x01b4, 268 [APR] = 0x01b8, 269 [MPR] = 0x01bc, 270 [TPAUSER] = 0x01c4, 271 [BCFR] = 0x01cc, 272 273 [ARSTR] = 0x0000, 274 [TSU_CTRST] = 0x0004, 275 [TSU_FWEN0] = 0x0010, 276 [TSU_FWEN1] = 0x0014, 277 [TSU_FCM] = 0x0018, 278 [TSU_BSYSL0] = 0x0020, 279 [TSU_BSYSL1] = 0x0024, 280 [TSU_PRISL0] = 0x0028, 281 [TSU_PRISL1] = 0x002c, 282 [TSU_FWSL0] = 0x0030, 283 [TSU_FWSL1] = 0x0034, 284 [TSU_FWSLC] = 0x0038, 285 [TSU_QTAGM0] = 0x0040, 286 [TSU_QTAGM1] = 0x0044, 287 [TSU_ADQT0] = 0x0048, 288 [TSU_ADQT1] = 0x004c, 289 [TSU_FWSR] = 0x0050, 290 [TSU_FWINMK] = 0x0054, 291 [TSU_ADSBSY] = 0x0060, 292 [TSU_TEN] = 0x0064, 293 [TSU_POST1] = 0x0070, 294 [TSU_POST2] = 0x0074, 295 [TSU_POST3] = 0x0078, 296 [TSU_POST4] = 0x007c, 297 298 [TXNLCR0] = 0x0080, 299 [TXALCR0] = 0x0084, 300 [RXNLCR0] = 0x0088, 301 [RXALCR0] = 0x008c, 302 [FWNLCR0] = 0x0090, 303 [FWALCR0] = 0x0094, 304 [TXNLCR1] = 0x00a0, 305 [TXALCR1] = 0x00a0, 306 [RXNLCR1] = 0x00a8, 307 [RXALCR1] = 0x00ac, 308 [FWNLCR1] = 0x00b0, 309 [FWALCR1] = 0x00b4, 310 311 [TSU_ADRH0] = 0x0100, 312 [TSU_ADRL0] = 0x0104, 313 [TSU_ADRL31] = 0x01fc, 314 }; 315 316 #if defined(CONFIG_CPU_SUBTYPE_SH7734) || \ 317 defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 318 defined(CONFIG_ARCH_R8A7740) 319 static void sh_eth_select_mii(struct net_device *ndev) 320 { 321 u32 value = 0x0; 322 struct sh_eth_private *mdp = netdev_priv(ndev); 323 324 switch (mdp->phy_interface) { 325 case PHY_INTERFACE_MODE_GMII: 326 value = 0x2; 327 break; 328 case PHY_INTERFACE_MODE_MII: 329 value = 0x1; 330 break; 331 case PHY_INTERFACE_MODE_RMII: 332 value = 0x0; 333 break; 334 default: 335 pr_warn("PHY interface mode was not setup. Set to MII.\n"); 336 value = 0x1; 337 break; 338 } 339 340 sh_eth_write(ndev, value, RMII_MII); 341 } 342 #endif 343 344 /* There is CPU dependent code */ 345 #if defined(CONFIG_ARCH_R8A7778) || defined(CONFIG_ARCH_R8A7779) 346 #define SH_ETH_RESET_DEFAULT 1 347 static void sh_eth_set_duplex(struct net_device *ndev) 348 { 349 struct sh_eth_private *mdp = netdev_priv(ndev); 350 351 if (mdp->duplex) /* Full */ 352 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 353 else /* Half */ 354 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 355 } 356 357 static void sh_eth_set_rate(struct net_device *ndev) 358 { 359 struct sh_eth_private *mdp = netdev_priv(ndev); 360 361 switch (mdp->speed) { 362 case 10: /* 10BASE */ 363 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR); 364 break; 365 case 100:/* 100BASE */ 366 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR); 367 break; 368 default: 369 break; 370 } 371 } 372 373 /* R8A7778/9 */ 374 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 375 .set_duplex = sh_eth_set_duplex, 376 .set_rate = sh_eth_set_rate, 377 378 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 379 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 380 .eesipr_value = 0x01ff009f, 381 382 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 383 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 384 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 385 EESR_ECI, 386 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 387 388 .apr = 1, 389 .mpr = 1, 390 .tpauser = 1, 391 .hw_swap = 1, 392 }; 393 #elif defined(CONFIG_CPU_SUBTYPE_SH7724) 394 #define SH_ETH_RESET_DEFAULT 1 395 static void sh_eth_set_duplex(struct net_device *ndev) 396 { 397 struct sh_eth_private *mdp = netdev_priv(ndev); 398 399 if (mdp->duplex) /* Full */ 400 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 401 else /* Half */ 402 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 403 } 404 405 static void sh_eth_set_rate(struct net_device *ndev) 406 { 407 struct sh_eth_private *mdp = netdev_priv(ndev); 408 409 switch (mdp->speed) { 410 case 10: /* 10BASE */ 411 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); 412 break; 413 case 100:/* 100BASE */ 414 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); 415 break; 416 default: 417 break; 418 } 419 } 420 421 /* SH7724 */ 422 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 423 .set_duplex = sh_eth_set_duplex, 424 .set_rate = sh_eth_set_rate, 425 426 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 427 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 428 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, 429 430 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 431 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 432 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 433 EESR_ECI, 434 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 435 436 .apr = 1, 437 .mpr = 1, 438 .tpauser = 1, 439 .hw_swap = 1, 440 .rpadir = 1, 441 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ 442 }; 443 #elif defined(CONFIG_CPU_SUBTYPE_SH7757) 444 #define SH_ETH_HAS_BOTH_MODULES 1 445 #define SH_ETH_HAS_TSU 1 446 static int sh_eth_check_reset(struct net_device *ndev); 447 448 static void sh_eth_set_duplex(struct net_device *ndev) 449 { 450 struct sh_eth_private *mdp = netdev_priv(ndev); 451 452 if (mdp->duplex) /* Full */ 453 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 454 else /* Half */ 455 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 456 } 457 458 static void sh_eth_set_rate(struct net_device *ndev) 459 { 460 struct sh_eth_private *mdp = netdev_priv(ndev); 461 462 switch (mdp->speed) { 463 case 10: /* 10BASE */ 464 sh_eth_write(ndev, 0, RTRATE); 465 break; 466 case 100:/* 100BASE */ 467 sh_eth_write(ndev, 1, RTRATE); 468 break; 469 default: 470 break; 471 } 472 } 473 474 /* SH7757 */ 475 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 476 .set_duplex = sh_eth_set_duplex, 477 .set_rate = sh_eth_set_rate, 478 479 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 480 .rmcr_value = 0x00000001, 481 482 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 483 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 484 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 485 EESR_ECI, 486 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 487 488 .apr = 1, 489 .mpr = 1, 490 .tpauser = 1, 491 .hw_swap = 1, 492 .no_ade = 1, 493 .rpadir = 1, 494 .rpadir_value = 2 << 16, 495 }; 496 497 #define SH_GIGA_ETH_BASE 0xfee00000 498 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) 499 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) 500 static void sh_eth_chip_reset_giga(struct net_device *ndev) 501 { 502 int i; 503 unsigned long mahr[2], malr[2]; 504 505 /* save MAHR and MALR */ 506 for (i = 0; i < 2; i++) { 507 malr[i] = ioread32((void *)GIGA_MALR(i)); 508 mahr[i] = ioread32((void *)GIGA_MAHR(i)); 509 } 510 511 /* reset device */ 512 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800)); 513 mdelay(1); 514 515 /* restore MAHR and MALR */ 516 for (i = 0; i < 2; i++) { 517 iowrite32(malr[i], (void *)GIGA_MALR(i)); 518 iowrite32(mahr[i], (void *)GIGA_MAHR(i)); 519 } 520 } 521 522 static int sh_eth_is_gether(struct sh_eth_private *mdp); 523 static int sh_eth_reset(struct net_device *ndev) 524 { 525 struct sh_eth_private *mdp = netdev_priv(ndev); 526 int ret = 0; 527 528 if (sh_eth_is_gether(mdp)) { 529 sh_eth_write(ndev, 0x03, EDSR); 530 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, 531 EDMR); 532 533 ret = sh_eth_check_reset(ndev); 534 if (ret) 535 goto out; 536 537 /* Table Init */ 538 sh_eth_write(ndev, 0x0, TDLAR); 539 sh_eth_write(ndev, 0x0, TDFAR); 540 sh_eth_write(ndev, 0x0, TDFXR); 541 sh_eth_write(ndev, 0x0, TDFFR); 542 sh_eth_write(ndev, 0x0, RDLAR); 543 sh_eth_write(ndev, 0x0, RDFAR); 544 sh_eth_write(ndev, 0x0, RDFXR); 545 sh_eth_write(ndev, 0x0, RDFFR); 546 } else { 547 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, 548 EDMR); 549 mdelay(3); 550 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, 551 EDMR); 552 } 553 554 out: 555 return ret; 556 } 557 558 static void sh_eth_set_duplex_giga(struct net_device *ndev) 559 { 560 struct sh_eth_private *mdp = netdev_priv(ndev); 561 562 if (mdp->duplex) /* Full */ 563 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 564 else /* Half */ 565 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 566 } 567 568 static void sh_eth_set_rate_giga(struct net_device *ndev) 569 { 570 struct sh_eth_private *mdp = netdev_priv(ndev); 571 572 switch (mdp->speed) { 573 case 10: /* 10BASE */ 574 sh_eth_write(ndev, 0x00000000, GECMR); 575 break; 576 case 100:/* 100BASE */ 577 sh_eth_write(ndev, 0x00000010, GECMR); 578 break; 579 case 1000: /* 1000BASE */ 580 sh_eth_write(ndev, 0x00000020, GECMR); 581 break; 582 default: 583 break; 584 } 585 } 586 587 /* SH7757(GETHERC) */ 588 static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { 589 .chip_reset = sh_eth_chip_reset_giga, 590 .set_duplex = sh_eth_set_duplex_giga, 591 .set_rate = sh_eth_set_rate_giga, 592 593 .ecsr_value = ECSR_ICD | ECSR_MPD, 594 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 595 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 596 597 .tx_check = EESR_TC1 | EESR_FTC, 598 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 599 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 600 EESR_TDE | EESR_ECI, 601 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 602 EESR_TFE, 603 .fdr_value = 0x0000072f, 604 .rmcr_value = 0x00000001, 605 606 .apr = 1, 607 .mpr = 1, 608 .tpauser = 1, 609 .bculr = 1, 610 .hw_swap = 1, 611 .rpadir = 1, 612 .rpadir_value = 2 << 16, 613 .no_trimd = 1, 614 .no_ade = 1, 615 .tsu = 1, 616 }; 617 618 static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) 619 { 620 if (sh_eth_is_gether(mdp)) 621 return &sh_eth_my_cpu_data_giga; 622 else 623 return &sh_eth_my_cpu_data; 624 } 625 626 #elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) 627 #define SH_ETH_HAS_TSU 1 628 static int sh_eth_check_reset(struct net_device *ndev); 629 static void sh_eth_reset_hw_crc(struct net_device *ndev); 630 631 static void sh_eth_chip_reset(struct net_device *ndev) 632 { 633 struct sh_eth_private *mdp = netdev_priv(ndev); 634 635 /* reset device */ 636 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 637 mdelay(1); 638 } 639 640 static void sh_eth_set_duplex(struct net_device *ndev) 641 { 642 struct sh_eth_private *mdp = netdev_priv(ndev); 643 644 if (mdp->duplex) /* Full */ 645 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 646 else /* Half */ 647 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 648 } 649 650 static void sh_eth_set_rate(struct net_device *ndev) 651 { 652 struct sh_eth_private *mdp = netdev_priv(ndev); 653 654 switch (mdp->speed) { 655 case 10: /* 10BASE */ 656 sh_eth_write(ndev, GECMR_10, GECMR); 657 break; 658 case 100:/* 100BASE */ 659 sh_eth_write(ndev, GECMR_100, GECMR); 660 break; 661 case 1000: /* 1000BASE */ 662 sh_eth_write(ndev, GECMR_1000, GECMR); 663 break; 664 default: 665 break; 666 } 667 } 668 669 /* sh7763 */ 670 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 671 .chip_reset = sh_eth_chip_reset, 672 .set_duplex = sh_eth_set_duplex, 673 .set_rate = sh_eth_set_rate, 674 675 .ecsr_value = ECSR_ICD | ECSR_MPD, 676 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 677 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 678 679 .tx_check = EESR_TC1 | EESR_FTC, 680 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 681 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 682 EESR_TDE | EESR_ECI, 683 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 684 EESR_TFE, 685 686 .apr = 1, 687 .mpr = 1, 688 .tpauser = 1, 689 .bculr = 1, 690 .hw_swap = 1, 691 .no_trimd = 1, 692 .no_ade = 1, 693 .tsu = 1, 694 #if defined(CONFIG_CPU_SUBTYPE_SH7734) 695 .hw_crc = 1, 696 .select_mii = 1, 697 #endif 698 }; 699 700 static int sh_eth_reset(struct net_device *ndev) 701 { 702 int ret = 0; 703 704 sh_eth_write(ndev, EDSR_ENALL, EDSR); 705 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); 706 707 ret = sh_eth_check_reset(ndev); 708 if (ret) 709 goto out; 710 711 /* Table Init */ 712 sh_eth_write(ndev, 0x0, TDLAR); 713 sh_eth_write(ndev, 0x0, TDFAR); 714 sh_eth_write(ndev, 0x0, TDFXR); 715 sh_eth_write(ndev, 0x0, TDFFR); 716 sh_eth_write(ndev, 0x0, RDLAR); 717 sh_eth_write(ndev, 0x0, RDFAR); 718 sh_eth_write(ndev, 0x0, RDFXR); 719 sh_eth_write(ndev, 0x0, RDFFR); 720 721 /* Reset HW CRC register */ 722 sh_eth_reset_hw_crc(ndev); 723 724 /* Select MII mode */ 725 if (sh_eth_my_cpu_data.select_mii) 726 sh_eth_select_mii(ndev); 727 out: 728 return ret; 729 } 730 731 static void sh_eth_reset_hw_crc(struct net_device *ndev) 732 { 733 if (sh_eth_my_cpu_data.hw_crc) 734 sh_eth_write(ndev, 0x0, CSMR); 735 } 736 737 #elif defined(CONFIG_ARCH_R8A7740) 738 #define SH_ETH_HAS_TSU 1 739 static int sh_eth_check_reset(struct net_device *ndev); 740 741 static void sh_eth_chip_reset(struct net_device *ndev) 742 { 743 struct sh_eth_private *mdp = netdev_priv(ndev); 744 745 /* reset device */ 746 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 747 mdelay(1); 748 749 sh_eth_select_mii(ndev); 750 } 751 752 static int sh_eth_reset(struct net_device *ndev) 753 { 754 int ret = 0; 755 756 sh_eth_write(ndev, EDSR_ENALL, EDSR); 757 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); 758 759 ret = sh_eth_check_reset(ndev); 760 if (ret) 761 goto out; 762 763 /* Table Init */ 764 sh_eth_write(ndev, 0x0, TDLAR); 765 sh_eth_write(ndev, 0x0, TDFAR); 766 sh_eth_write(ndev, 0x0, TDFXR); 767 sh_eth_write(ndev, 0x0, TDFFR); 768 sh_eth_write(ndev, 0x0, RDLAR); 769 sh_eth_write(ndev, 0x0, RDFAR); 770 sh_eth_write(ndev, 0x0, RDFXR); 771 sh_eth_write(ndev, 0x0, RDFFR); 772 773 out: 774 return ret; 775 } 776 777 static void sh_eth_set_duplex(struct net_device *ndev) 778 { 779 struct sh_eth_private *mdp = netdev_priv(ndev); 780 781 if (mdp->duplex) /* Full */ 782 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 783 else /* Half */ 784 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 785 } 786 787 static void sh_eth_set_rate(struct net_device *ndev) 788 { 789 struct sh_eth_private *mdp = netdev_priv(ndev); 790 791 switch (mdp->speed) { 792 case 10: /* 10BASE */ 793 sh_eth_write(ndev, GECMR_10, GECMR); 794 break; 795 case 100:/* 100BASE */ 796 sh_eth_write(ndev, GECMR_100, GECMR); 797 break; 798 case 1000: /* 1000BASE */ 799 sh_eth_write(ndev, GECMR_1000, GECMR); 800 break; 801 default: 802 break; 803 } 804 } 805 806 /* R8A7740 */ 807 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 808 .chip_reset = sh_eth_chip_reset, 809 .set_duplex = sh_eth_set_duplex, 810 .set_rate = sh_eth_set_rate, 811 812 .ecsr_value = ECSR_ICD | ECSR_MPD, 813 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 814 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 815 816 .tx_check = EESR_TC1 | EESR_FTC, 817 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 818 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 819 EESR_TDE | EESR_ECI, 820 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 821 EESR_TFE, 822 823 .apr = 1, 824 .mpr = 1, 825 .tpauser = 1, 826 .bculr = 1, 827 .hw_swap = 1, 828 .no_trimd = 1, 829 .no_ade = 1, 830 .tsu = 1, 831 .select_mii = 1, 832 }; 833 834 #elif defined(CONFIG_CPU_SUBTYPE_SH7619) 835 #define SH_ETH_RESET_DEFAULT 1 836 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 837 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 838 839 .apr = 1, 840 .mpr = 1, 841 .tpauser = 1, 842 .hw_swap = 1, 843 }; 844 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) 845 #define SH_ETH_RESET_DEFAULT 1 846 #define SH_ETH_HAS_TSU 1 847 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 848 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 849 .tsu = 1, 850 }; 851 #endif 852 853 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) 854 { 855 if (!cd->ecsr_value) 856 cd->ecsr_value = DEFAULT_ECSR_INIT; 857 858 if (!cd->ecsipr_value) 859 cd->ecsipr_value = DEFAULT_ECSIPR_INIT; 860 861 if (!cd->fcftr_value) 862 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \ 863 DEFAULT_FIFO_F_D_RFD; 864 865 if (!cd->fdr_value) 866 cd->fdr_value = DEFAULT_FDR_INIT; 867 868 if (!cd->rmcr_value) 869 cd->rmcr_value = DEFAULT_RMCR_VALUE; 870 871 if (!cd->tx_check) 872 cd->tx_check = DEFAULT_TX_CHECK; 873 874 if (!cd->eesr_err_check) 875 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; 876 877 if (!cd->tx_error_check) 878 cd->tx_error_check = DEFAULT_TX_ERROR_CHECK; 879 } 880 881 #if defined(SH_ETH_RESET_DEFAULT) 882 /* Chip Reset */ 883 static int sh_eth_reset(struct net_device *ndev) 884 { 885 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); 886 mdelay(3); 887 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); 888 889 return 0; 890 } 891 #else 892 static int sh_eth_check_reset(struct net_device *ndev) 893 { 894 int ret = 0; 895 int cnt = 100; 896 897 while (cnt > 0) { 898 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 899 break; 900 mdelay(1); 901 cnt--; 902 } 903 if (cnt <= 0) { 904 pr_err("Device reset failed\n"); 905 ret = -ETIMEDOUT; 906 } 907 return ret; 908 } 909 #endif 910 911 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 912 static void sh_eth_set_receive_align(struct sk_buff *skb) 913 { 914 int reserve; 915 916 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1)); 917 if (reserve) 918 skb_reserve(skb, reserve); 919 } 920 #else 921 static void sh_eth_set_receive_align(struct sk_buff *skb) 922 { 923 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN); 924 } 925 #endif 926 927 928 /* CPU <-> EDMAC endian convert */ 929 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) 930 { 931 switch (mdp->edmac_endian) { 932 case EDMAC_LITTLE_ENDIAN: 933 return cpu_to_le32(x); 934 case EDMAC_BIG_ENDIAN: 935 return cpu_to_be32(x); 936 } 937 return x; 938 } 939 940 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) 941 { 942 switch (mdp->edmac_endian) { 943 case EDMAC_LITTLE_ENDIAN: 944 return le32_to_cpu(x); 945 case EDMAC_BIG_ENDIAN: 946 return be32_to_cpu(x); 947 } 948 return x; 949 } 950 951 /* 952 * Program the hardware MAC address from dev->dev_addr. 953 */ 954 static void update_mac_address(struct net_device *ndev) 955 { 956 sh_eth_write(ndev, 957 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 958 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); 959 sh_eth_write(ndev, 960 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); 961 } 962 963 /* 964 * Get MAC address from SuperH MAC address register 965 * 966 * SuperH's Ethernet device doesn't have 'ROM' to MAC address. 967 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). 968 * When you want use this device, you must set MAC address in bootloader. 969 * 970 */ 971 static void read_mac_address(struct net_device *ndev, unsigned char *mac) 972 { 973 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { 974 memcpy(ndev->dev_addr, mac, 6); 975 } else { 976 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); 977 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; 978 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF; 979 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF); 980 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF; 981 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF); 982 } 983 } 984 985 static int sh_eth_is_gether(struct sh_eth_private *mdp) 986 { 987 if (mdp->reg_offset == sh_eth_offset_gigabit) 988 return 1; 989 else 990 return 0; 991 } 992 993 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) 994 { 995 if (sh_eth_is_gether(mdp)) 996 return EDTRR_TRNS_GETHER; 997 else 998 return EDTRR_TRNS_ETHER; 999 } 1000 1001 struct bb_info { 1002 void (*set_gate)(void *addr); 1003 struct mdiobb_ctrl ctrl; 1004 void *addr; 1005 u32 mmd_msk;/* MMD */ 1006 u32 mdo_msk; 1007 u32 mdi_msk; 1008 u32 mdc_msk; 1009 }; 1010 1011 /* PHY bit set */ 1012 static void bb_set(void *addr, u32 msk) 1013 { 1014 iowrite32(ioread32(addr) | msk, addr); 1015 } 1016 1017 /* PHY bit clear */ 1018 static void bb_clr(void *addr, u32 msk) 1019 { 1020 iowrite32((ioread32(addr) & ~msk), addr); 1021 } 1022 1023 /* PHY bit read */ 1024 static int bb_read(void *addr, u32 msk) 1025 { 1026 return (ioread32(addr) & msk) != 0; 1027 } 1028 1029 /* Data I/O pin control */ 1030 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) 1031 { 1032 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1033 1034 if (bitbang->set_gate) 1035 bitbang->set_gate(bitbang->addr); 1036 1037 if (bit) 1038 bb_set(bitbang->addr, bitbang->mmd_msk); 1039 else 1040 bb_clr(bitbang->addr, bitbang->mmd_msk); 1041 } 1042 1043 /* Set bit data*/ 1044 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) 1045 { 1046 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1047 1048 if (bitbang->set_gate) 1049 bitbang->set_gate(bitbang->addr); 1050 1051 if (bit) 1052 bb_set(bitbang->addr, bitbang->mdo_msk); 1053 else 1054 bb_clr(bitbang->addr, bitbang->mdo_msk); 1055 } 1056 1057 /* Get bit data*/ 1058 static int sh_get_mdio(struct mdiobb_ctrl *ctrl) 1059 { 1060 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1061 1062 if (bitbang->set_gate) 1063 bitbang->set_gate(bitbang->addr); 1064 1065 return bb_read(bitbang->addr, bitbang->mdi_msk); 1066 } 1067 1068 /* MDC pin control */ 1069 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) 1070 { 1071 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1072 1073 if (bitbang->set_gate) 1074 bitbang->set_gate(bitbang->addr); 1075 1076 if (bit) 1077 bb_set(bitbang->addr, bitbang->mdc_msk); 1078 else 1079 bb_clr(bitbang->addr, bitbang->mdc_msk); 1080 } 1081 1082 /* mdio bus control struct */ 1083 static struct mdiobb_ops bb_ops = { 1084 .owner = THIS_MODULE, 1085 .set_mdc = sh_mdc_ctrl, 1086 .set_mdio_dir = sh_mmd_ctrl, 1087 .set_mdio_data = sh_set_mdio, 1088 .get_mdio_data = sh_get_mdio, 1089 }; 1090 1091 /* free skb and descriptor buffer */ 1092 static void sh_eth_ring_free(struct net_device *ndev) 1093 { 1094 struct sh_eth_private *mdp = netdev_priv(ndev); 1095 int i; 1096 1097 /* Free Rx skb ringbuffer */ 1098 if (mdp->rx_skbuff) { 1099 for (i = 0; i < mdp->num_rx_ring; i++) { 1100 if (mdp->rx_skbuff[i]) 1101 dev_kfree_skb(mdp->rx_skbuff[i]); 1102 } 1103 } 1104 kfree(mdp->rx_skbuff); 1105 mdp->rx_skbuff = NULL; 1106 1107 /* Free Tx skb ringbuffer */ 1108 if (mdp->tx_skbuff) { 1109 for (i = 0; i < mdp->num_tx_ring; i++) { 1110 if (mdp->tx_skbuff[i]) 1111 dev_kfree_skb(mdp->tx_skbuff[i]); 1112 } 1113 } 1114 kfree(mdp->tx_skbuff); 1115 mdp->tx_skbuff = NULL; 1116 } 1117 1118 /* format skb and descriptor buffer */ 1119 static void sh_eth_ring_format(struct net_device *ndev) 1120 { 1121 struct sh_eth_private *mdp = netdev_priv(ndev); 1122 int i; 1123 struct sk_buff *skb; 1124 struct sh_eth_rxdesc *rxdesc = NULL; 1125 struct sh_eth_txdesc *txdesc = NULL; 1126 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 1127 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1128 1129 mdp->cur_rx = mdp->cur_tx = 0; 1130 mdp->dirty_rx = mdp->dirty_tx = 0; 1131 1132 memset(mdp->rx_ring, 0, rx_ringsize); 1133 1134 /* build Rx ring buffer */ 1135 for (i = 0; i < mdp->num_rx_ring; i++) { 1136 /* skb */ 1137 mdp->rx_skbuff[i] = NULL; 1138 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1139 mdp->rx_skbuff[i] = skb; 1140 if (skb == NULL) 1141 break; 1142 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, 1143 DMA_FROM_DEVICE); 1144 sh_eth_set_receive_align(skb); 1145 1146 /* RX descriptor */ 1147 rxdesc = &mdp->rx_ring[i]; 1148 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1149 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1150 1151 /* The size of the buffer is 16 byte boundary. */ 1152 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1153 /* Rx descriptor address set */ 1154 if (i == 0) { 1155 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); 1156 if (sh_eth_is_gether(mdp)) 1157 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); 1158 } 1159 } 1160 1161 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); 1162 1163 /* Mark the last entry as wrapping the ring. */ 1164 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); 1165 1166 memset(mdp->tx_ring, 0, tx_ringsize); 1167 1168 /* build Tx ring buffer */ 1169 for (i = 0; i < mdp->num_tx_ring; i++) { 1170 mdp->tx_skbuff[i] = NULL; 1171 txdesc = &mdp->tx_ring[i]; 1172 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1173 txdesc->buffer_length = 0; 1174 if (i == 0) { 1175 /* Tx descriptor address set */ 1176 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); 1177 if (sh_eth_is_gether(mdp)) 1178 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); 1179 } 1180 } 1181 1182 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1183 } 1184 1185 /* Get skb and descriptor buffer */ 1186 static int sh_eth_ring_init(struct net_device *ndev) 1187 { 1188 struct sh_eth_private *mdp = netdev_priv(ndev); 1189 int rx_ringsize, tx_ringsize, ret = 0; 1190 1191 /* 1192 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the 1193 * card needs room to do 8 byte alignment, +2 so we can reserve 1194 * the first 2 bytes, and +16 gets room for the status word from the 1195 * card. 1196 */ 1197 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : 1198 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); 1199 if (mdp->cd->rpadir) 1200 mdp->rx_buf_sz += NET_IP_ALIGN; 1201 1202 /* Allocate RX and TX skb rings */ 1203 mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring, 1204 sizeof(*mdp->rx_skbuff), GFP_KERNEL); 1205 if (!mdp->rx_skbuff) { 1206 ret = -ENOMEM; 1207 return ret; 1208 } 1209 1210 mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring, 1211 sizeof(*mdp->tx_skbuff), GFP_KERNEL); 1212 if (!mdp->tx_skbuff) { 1213 ret = -ENOMEM; 1214 goto skb_ring_free; 1215 } 1216 1217 /* Allocate all Rx descriptors. */ 1218 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1219 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 1220 GFP_KERNEL); 1221 if (!mdp->rx_ring) { 1222 ret = -ENOMEM; 1223 goto desc_ring_free; 1224 } 1225 1226 mdp->dirty_rx = 0; 1227 1228 /* Allocate all Tx descriptors. */ 1229 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1230 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 1231 GFP_KERNEL); 1232 if (!mdp->tx_ring) { 1233 ret = -ENOMEM; 1234 goto desc_ring_free; 1235 } 1236 return ret; 1237 1238 desc_ring_free: 1239 /* free DMA buffer */ 1240 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma); 1241 1242 skb_ring_free: 1243 /* Free Rx and Tx skb ring buffer */ 1244 sh_eth_ring_free(ndev); 1245 mdp->tx_ring = NULL; 1246 mdp->rx_ring = NULL; 1247 1248 return ret; 1249 } 1250 1251 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp) 1252 { 1253 int ringsize; 1254 1255 if (mdp->rx_ring) { 1256 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1257 dma_free_coherent(NULL, ringsize, mdp->rx_ring, 1258 mdp->rx_desc_dma); 1259 mdp->rx_ring = NULL; 1260 } 1261 1262 if (mdp->tx_ring) { 1263 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1264 dma_free_coherent(NULL, ringsize, mdp->tx_ring, 1265 mdp->tx_desc_dma); 1266 mdp->tx_ring = NULL; 1267 } 1268 } 1269 1270 static int sh_eth_dev_init(struct net_device *ndev, bool start) 1271 { 1272 int ret = 0; 1273 struct sh_eth_private *mdp = netdev_priv(ndev); 1274 u32 val; 1275 1276 /* Soft Reset */ 1277 ret = sh_eth_reset(ndev); 1278 if (ret) 1279 goto out; 1280 1281 /* Descriptor format */ 1282 sh_eth_ring_format(ndev); 1283 if (mdp->cd->rpadir) 1284 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); 1285 1286 /* all sh_eth int mask */ 1287 sh_eth_write(ndev, 0, EESIPR); 1288 1289 #if defined(__LITTLE_ENDIAN) 1290 if (mdp->cd->hw_swap) 1291 sh_eth_write(ndev, EDMR_EL, EDMR); 1292 else 1293 #endif 1294 sh_eth_write(ndev, 0, EDMR); 1295 1296 /* FIFO size set */ 1297 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); 1298 sh_eth_write(ndev, 0, TFTR); 1299 1300 /* Frame recv control */ 1301 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); 1302 1303 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); 1304 1305 if (mdp->cd->bculr) 1306 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ 1307 1308 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); 1309 1310 if (!mdp->cd->no_trimd) 1311 sh_eth_write(ndev, 0, TRIMD); 1312 1313 /* Recv frame limit set register */ 1314 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, 1315 RFLR); 1316 1317 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); 1318 if (start) 1319 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1320 1321 /* PAUSE Prohibition */ 1322 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | 1323 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 1324 1325 sh_eth_write(ndev, val, ECMR); 1326 1327 if (mdp->cd->set_rate) 1328 mdp->cd->set_rate(ndev); 1329 1330 /* E-MAC Status Register clear */ 1331 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); 1332 1333 /* E-MAC Interrupt Enable register */ 1334 if (start) 1335 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); 1336 1337 /* Set MAC address */ 1338 update_mac_address(ndev); 1339 1340 /* mask reset */ 1341 if (mdp->cd->apr) 1342 sh_eth_write(ndev, APR_AP, APR); 1343 if (mdp->cd->mpr) 1344 sh_eth_write(ndev, MPR_MP, MPR); 1345 if (mdp->cd->tpauser) 1346 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); 1347 1348 if (start) { 1349 /* Setting the Rx mode will start the Rx process. */ 1350 sh_eth_write(ndev, EDRRR_R, EDRRR); 1351 1352 netif_start_queue(ndev); 1353 } 1354 1355 out: 1356 return ret; 1357 } 1358 1359 /* free Tx skb function */ 1360 static int sh_eth_txfree(struct net_device *ndev) 1361 { 1362 struct sh_eth_private *mdp = netdev_priv(ndev); 1363 struct sh_eth_txdesc *txdesc; 1364 int freeNum = 0; 1365 int entry = 0; 1366 1367 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 1368 entry = mdp->dirty_tx % mdp->num_tx_ring; 1369 txdesc = &mdp->tx_ring[entry]; 1370 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 1371 break; 1372 /* Free the original skb. */ 1373 if (mdp->tx_skbuff[entry]) { 1374 dma_unmap_single(&ndev->dev, txdesc->addr, 1375 txdesc->buffer_length, DMA_TO_DEVICE); 1376 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1377 mdp->tx_skbuff[entry] = NULL; 1378 freeNum++; 1379 } 1380 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1381 if (entry >= mdp->num_tx_ring - 1) 1382 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1383 1384 ndev->stats.tx_packets++; 1385 ndev->stats.tx_bytes += txdesc->buffer_length; 1386 } 1387 return freeNum; 1388 } 1389 1390 /* Packet receive function */ 1391 static int sh_eth_rx(struct net_device *ndev, u32 intr_status) 1392 { 1393 struct sh_eth_private *mdp = netdev_priv(ndev); 1394 struct sh_eth_rxdesc *rxdesc; 1395 1396 int entry = mdp->cur_rx % mdp->num_rx_ring; 1397 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; 1398 struct sk_buff *skb; 1399 u16 pkt_len = 0; 1400 u32 desc_status; 1401 1402 rxdesc = &mdp->rx_ring[entry]; 1403 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1404 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1405 pkt_len = rxdesc->frame_length; 1406 1407 if (--boguscnt < 0) 1408 break; 1409 1410 if (!(desc_status & RDFEND)) 1411 ndev->stats.rx_length_errors++; 1412 1413 #if defined(CONFIG_ARCH_R8A7740) 1414 /* 1415 * In case of almost all GETHER/ETHERs, the Receive Frame State 1416 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to 1417 * bit 0. However, in case of the R8A7740's GETHER, the RFS 1418 * bits are from bit 25 to bit 16. So, the driver needs right 1419 * shifting by 16. 1420 */ 1421 desc_status >>= 16; 1422 #endif 1423 1424 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1425 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 1426 ndev->stats.rx_errors++; 1427 if (desc_status & RD_RFS1) 1428 ndev->stats.rx_crc_errors++; 1429 if (desc_status & RD_RFS2) 1430 ndev->stats.rx_frame_errors++; 1431 if (desc_status & RD_RFS3) 1432 ndev->stats.rx_length_errors++; 1433 if (desc_status & RD_RFS4) 1434 ndev->stats.rx_length_errors++; 1435 if (desc_status & RD_RFS6) 1436 ndev->stats.rx_missed_errors++; 1437 if (desc_status & RD_RFS10) 1438 ndev->stats.rx_over_errors++; 1439 } else { 1440 if (!mdp->cd->hw_swap) 1441 sh_eth_soft_swap( 1442 phys_to_virt(ALIGN(rxdesc->addr, 4)), 1443 pkt_len + 2); 1444 skb = mdp->rx_skbuff[entry]; 1445 mdp->rx_skbuff[entry] = NULL; 1446 if (mdp->cd->rpadir) 1447 skb_reserve(skb, NET_IP_ALIGN); 1448 skb_put(skb, pkt_len); 1449 skb->protocol = eth_type_trans(skb, ndev); 1450 netif_rx(skb); 1451 ndev->stats.rx_packets++; 1452 ndev->stats.rx_bytes += pkt_len; 1453 } 1454 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); 1455 entry = (++mdp->cur_rx) % mdp->num_rx_ring; 1456 rxdesc = &mdp->rx_ring[entry]; 1457 } 1458 1459 /* Refill the Rx ring buffers. */ 1460 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 1461 entry = mdp->dirty_rx % mdp->num_rx_ring; 1462 rxdesc = &mdp->rx_ring[entry]; 1463 /* The size of the buffer is 16 byte boundary. */ 1464 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1465 1466 if (mdp->rx_skbuff[entry] == NULL) { 1467 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1468 mdp->rx_skbuff[entry] = skb; 1469 if (skb == NULL) 1470 break; /* Better luck next round. */ 1471 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, 1472 DMA_FROM_DEVICE); 1473 sh_eth_set_receive_align(skb); 1474 1475 skb_checksum_none_assert(skb); 1476 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1477 } 1478 if (entry >= mdp->num_rx_ring - 1) 1479 rxdesc->status |= 1480 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 1481 else 1482 rxdesc->status |= 1483 cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1484 } 1485 1486 /* Restart Rx engine if stopped. */ 1487 /* If we don't need to check status, don't. -KDU */ 1488 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { 1489 /* fix the values for the next receiving if RDE is set */ 1490 if (intr_status & EESR_RDE) 1491 mdp->cur_rx = mdp->dirty_rx = 1492 (sh_eth_read(ndev, RDFAR) - 1493 sh_eth_read(ndev, RDLAR)) >> 4; 1494 sh_eth_write(ndev, EDRRR_R, EDRRR); 1495 } 1496 1497 return 0; 1498 } 1499 1500 static void sh_eth_rcv_snd_disable(struct net_device *ndev) 1501 { 1502 /* disable tx and rx */ 1503 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & 1504 ~(ECMR_RE | ECMR_TE), ECMR); 1505 } 1506 1507 static void sh_eth_rcv_snd_enable(struct net_device *ndev) 1508 { 1509 /* enable tx and rx */ 1510 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | 1511 (ECMR_RE | ECMR_TE), ECMR); 1512 } 1513 1514 /* error control function */ 1515 static void sh_eth_error(struct net_device *ndev, int intr_status) 1516 { 1517 struct sh_eth_private *mdp = netdev_priv(ndev); 1518 u32 felic_stat; 1519 u32 link_stat; 1520 u32 mask; 1521 1522 if (intr_status & EESR_ECI) { 1523 felic_stat = sh_eth_read(ndev, ECSR); 1524 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ 1525 if (felic_stat & ECSR_ICD) 1526 ndev->stats.tx_carrier_errors++; 1527 if (felic_stat & ECSR_LCHNG) { 1528 /* Link Changed */ 1529 if (mdp->cd->no_psr || mdp->no_ether_link) { 1530 goto ignore_link; 1531 } else { 1532 link_stat = (sh_eth_read(ndev, PSR)); 1533 if (mdp->ether_link_active_low) 1534 link_stat = ~link_stat; 1535 } 1536 if (!(link_stat & PHY_ST_LINK)) 1537 sh_eth_rcv_snd_disable(ndev); 1538 else { 1539 /* Link Up */ 1540 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & 1541 ~DMAC_M_ECI, EESIPR); 1542 /*clear int */ 1543 sh_eth_write(ndev, sh_eth_read(ndev, ECSR), 1544 ECSR); 1545 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | 1546 DMAC_M_ECI, EESIPR); 1547 /* enable tx and rx */ 1548 sh_eth_rcv_snd_enable(ndev); 1549 } 1550 } 1551 } 1552 1553 ignore_link: 1554 if (intr_status & EESR_TWB) { 1555 /* Unused write back interrupt */ 1556 if (intr_status & EESR_TABT) { /* Transmit Abort int */ 1557 ndev->stats.tx_aborted_errors++; 1558 if (netif_msg_tx_err(mdp)) 1559 dev_err(&ndev->dev, "Transmit Abort\n"); 1560 } 1561 } 1562 1563 if (intr_status & EESR_RABT) { 1564 /* Receive Abort int */ 1565 if (intr_status & EESR_RFRMER) { 1566 /* Receive Frame Overflow int */ 1567 ndev->stats.rx_frame_errors++; 1568 if (netif_msg_rx_err(mdp)) 1569 dev_err(&ndev->dev, "Receive Abort\n"); 1570 } 1571 } 1572 1573 if (intr_status & EESR_TDE) { 1574 /* Transmit Descriptor Empty int */ 1575 ndev->stats.tx_fifo_errors++; 1576 if (netif_msg_tx_err(mdp)) 1577 dev_err(&ndev->dev, "Transmit Descriptor Empty\n"); 1578 } 1579 1580 if (intr_status & EESR_TFE) { 1581 /* FIFO under flow */ 1582 ndev->stats.tx_fifo_errors++; 1583 if (netif_msg_tx_err(mdp)) 1584 dev_err(&ndev->dev, "Transmit FIFO Under flow\n"); 1585 } 1586 1587 if (intr_status & EESR_RDE) { 1588 /* Receive Descriptor Empty int */ 1589 ndev->stats.rx_over_errors++; 1590 1591 if (netif_msg_rx_err(mdp)) 1592 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 1593 } 1594 1595 if (intr_status & EESR_RFE) { 1596 /* Receive FIFO Overflow int */ 1597 ndev->stats.rx_fifo_errors++; 1598 if (netif_msg_rx_err(mdp)) 1599 dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 1600 } 1601 1602 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1603 /* Address Error */ 1604 ndev->stats.tx_fifo_errors++; 1605 if (netif_msg_tx_err(mdp)) 1606 dev_err(&ndev->dev, "Address Error\n"); 1607 } 1608 1609 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 1610 if (mdp->cd->no_ade) 1611 mask &= ~EESR_ADE; 1612 if (intr_status & mask) { 1613 /* Tx error */ 1614 u32 edtrr = sh_eth_read(ndev, EDTRR); 1615 /* dmesg */ 1616 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", 1617 intr_status, mdp->cur_tx); 1618 dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", 1619 mdp->dirty_tx, (u32) ndev->state, edtrr); 1620 /* dirty buffer free */ 1621 sh_eth_txfree(ndev); 1622 1623 /* SH7712 BUG */ 1624 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { 1625 /* tx dma start */ 1626 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 1627 } 1628 /* wakeup */ 1629 netif_wake_queue(ndev); 1630 } 1631 } 1632 1633 static irqreturn_t sh_eth_interrupt(int irq, void *netdev) 1634 { 1635 struct net_device *ndev = netdev; 1636 struct sh_eth_private *mdp = netdev_priv(ndev); 1637 struct sh_eth_cpu_data *cd = mdp->cd; 1638 irqreturn_t ret = IRQ_NONE; 1639 unsigned long intr_status; 1640 1641 spin_lock(&mdp->lock); 1642 1643 /* Get interrupt status */ 1644 intr_status = sh_eth_read(ndev, EESR); 1645 /* Mask it with the interrupt mask, forcing ECI interrupt to be always 1646 * enabled since it's the one that comes thru regardless of the mask, 1647 * and we need to fully handle it in sh_eth_error() in order to quench 1648 * it as it doesn't get cleared by just writing 1 to the ECI bit... 1649 */ 1650 intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI; 1651 /* Clear interrupt */ 1652 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | 1653 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | 1654 cd->tx_check | cd->eesr_err_check)) { 1655 sh_eth_write(ndev, intr_status, EESR); 1656 ret = IRQ_HANDLED; 1657 } else 1658 goto other_irq; 1659 1660 if (intr_status & (EESR_FRC | /* Frame recv*/ 1661 EESR_RMAF | /* Multi cast address recv*/ 1662 EESR_RRF | /* Bit frame recv */ 1663 EESR_RTLF | /* Long frame recv*/ 1664 EESR_RTSF | /* short frame recv */ 1665 EESR_PRE | /* PHY-LSI recv error */ 1666 EESR_CERF)){ /* recv frame CRC error */ 1667 sh_eth_rx(ndev, intr_status); 1668 } 1669 1670 /* Tx Check */ 1671 if (intr_status & cd->tx_check) { 1672 sh_eth_txfree(ndev); 1673 netif_wake_queue(ndev); 1674 } 1675 1676 if (intr_status & cd->eesr_err_check) 1677 sh_eth_error(ndev, intr_status); 1678 1679 other_irq: 1680 spin_unlock(&mdp->lock); 1681 1682 return ret; 1683 } 1684 1685 /* PHY state control function */ 1686 static void sh_eth_adjust_link(struct net_device *ndev) 1687 { 1688 struct sh_eth_private *mdp = netdev_priv(ndev); 1689 struct phy_device *phydev = mdp->phydev; 1690 int new_state = 0; 1691 1692 if (phydev->link) { 1693 if (phydev->duplex != mdp->duplex) { 1694 new_state = 1; 1695 mdp->duplex = phydev->duplex; 1696 if (mdp->cd->set_duplex) 1697 mdp->cd->set_duplex(ndev); 1698 } 1699 1700 if (phydev->speed != mdp->speed) { 1701 new_state = 1; 1702 mdp->speed = phydev->speed; 1703 if (mdp->cd->set_rate) 1704 mdp->cd->set_rate(ndev); 1705 } 1706 if (!mdp->link) { 1707 sh_eth_write(ndev, 1708 (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); 1709 new_state = 1; 1710 mdp->link = phydev->link; 1711 if (mdp->cd->no_psr || mdp->no_ether_link) 1712 sh_eth_rcv_snd_enable(ndev); 1713 } 1714 } else if (mdp->link) { 1715 new_state = 1; 1716 mdp->link = 0; 1717 mdp->speed = 0; 1718 mdp->duplex = -1; 1719 if (mdp->cd->no_psr || mdp->no_ether_link) 1720 sh_eth_rcv_snd_disable(ndev); 1721 } 1722 1723 if (new_state && netif_msg_link(mdp)) 1724 phy_print_status(phydev); 1725 } 1726 1727 /* PHY init function */ 1728 static int sh_eth_phy_init(struct net_device *ndev) 1729 { 1730 struct sh_eth_private *mdp = netdev_priv(ndev); 1731 char phy_id[MII_BUS_ID_SIZE + 3]; 1732 struct phy_device *phydev = NULL; 1733 1734 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 1735 mdp->mii_bus->id , mdp->phy_id); 1736 1737 mdp->link = 0; 1738 mdp->speed = 0; 1739 mdp->duplex = -1; 1740 1741 /* Try connect to PHY */ 1742 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, 1743 mdp->phy_interface); 1744 if (IS_ERR(phydev)) { 1745 dev_err(&ndev->dev, "phy_connect failed\n"); 1746 return PTR_ERR(phydev); 1747 } 1748 1749 dev_info(&ndev->dev, "attached phy %i to driver %s\n", 1750 phydev->addr, phydev->drv->name); 1751 1752 mdp->phydev = phydev; 1753 1754 return 0; 1755 } 1756 1757 /* PHY control start function */ 1758 static int sh_eth_phy_start(struct net_device *ndev) 1759 { 1760 struct sh_eth_private *mdp = netdev_priv(ndev); 1761 int ret; 1762 1763 ret = sh_eth_phy_init(ndev); 1764 if (ret) 1765 return ret; 1766 1767 /* reset phy - this also wakes it from PDOWN */ 1768 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET); 1769 phy_start(mdp->phydev); 1770 1771 return 0; 1772 } 1773 1774 static int sh_eth_get_settings(struct net_device *ndev, 1775 struct ethtool_cmd *ecmd) 1776 { 1777 struct sh_eth_private *mdp = netdev_priv(ndev); 1778 unsigned long flags; 1779 int ret; 1780 1781 spin_lock_irqsave(&mdp->lock, flags); 1782 ret = phy_ethtool_gset(mdp->phydev, ecmd); 1783 spin_unlock_irqrestore(&mdp->lock, flags); 1784 1785 return ret; 1786 } 1787 1788 static int sh_eth_set_settings(struct net_device *ndev, 1789 struct ethtool_cmd *ecmd) 1790 { 1791 struct sh_eth_private *mdp = netdev_priv(ndev); 1792 unsigned long flags; 1793 int ret; 1794 1795 spin_lock_irqsave(&mdp->lock, flags); 1796 1797 /* disable tx and rx */ 1798 sh_eth_rcv_snd_disable(ndev); 1799 1800 ret = phy_ethtool_sset(mdp->phydev, ecmd); 1801 if (ret) 1802 goto error_exit; 1803 1804 if (ecmd->duplex == DUPLEX_FULL) 1805 mdp->duplex = 1; 1806 else 1807 mdp->duplex = 0; 1808 1809 if (mdp->cd->set_duplex) 1810 mdp->cd->set_duplex(ndev); 1811 1812 error_exit: 1813 mdelay(1); 1814 1815 /* enable tx and rx */ 1816 sh_eth_rcv_snd_enable(ndev); 1817 1818 spin_unlock_irqrestore(&mdp->lock, flags); 1819 1820 return ret; 1821 } 1822 1823 static int sh_eth_nway_reset(struct net_device *ndev) 1824 { 1825 struct sh_eth_private *mdp = netdev_priv(ndev); 1826 unsigned long flags; 1827 int ret; 1828 1829 spin_lock_irqsave(&mdp->lock, flags); 1830 ret = phy_start_aneg(mdp->phydev); 1831 spin_unlock_irqrestore(&mdp->lock, flags); 1832 1833 return ret; 1834 } 1835 1836 static u32 sh_eth_get_msglevel(struct net_device *ndev) 1837 { 1838 struct sh_eth_private *mdp = netdev_priv(ndev); 1839 return mdp->msg_enable; 1840 } 1841 1842 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value) 1843 { 1844 struct sh_eth_private *mdp = netdev_priv(ndev); 1845 mdp->msg_enable = value; 1846 } 1847 1848 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = { 1849 "rx_current", "tx_current", 1850 "rx_dirty", "tx_dirty", 1851 }; 1852 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats) 1853 1854 static int sh_eth_get_sset_count(struct net_device *netdev, int sset) 1855 { 1856 switch (sset) { 1857 case ETH_SS_STATS: 1858 return SH_ETH_STATS_LEN; 1859 default: 1860 return -EOPNOTSUPP; 1861 } 1862 } 1863 1864 static void sh_eth_get_ethtool_stats(struct net_device *ndev, 1865 struct ethtool_stats *stats, u64 *data) 1866 { 1867 struct sh_eth_private *mdp = netdev_priv(ndev); 1868 int i = 0; 1869 1870 /* device-specific stats */ 1871 data[i++] = mdp->cur_rx; 1872 data[i++] = mdp->cur_tx; 1873 data[i++] = mdp->dirty_rx; 1874 data[i++] = mdp->dirty_tx; 1875 } 1876 1877 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 1878 { 1879 switch (stringset) { 1880 case ETH_SS_STATS: 1881 memcpy(data, *sh_eth_gstrings_stats, 1882 sizeof(sh_eth_gstrings_stats)); 1883 break; 1884 } 1885 } 1886 1887 static void sh_eth_get_ringparam(struct net_device *ndev, 1888 struct ethtool_ringparam *ring) 1889 { 1890 struct sh_eth_private *mdp = netdev_priv(ndev); 1891 1892 ring->rx_max_pending = RX_RING_MAX; 1893 ring->tx_max_pending = TX_RING_MAX; 1894 ring->rx_pending = mdp->num_rx_ring; 1895 ring->tx_pending = mdp->num_tx_ring; 1896 } 1897 1898 static int sh_eth_set_ringparam(struct net_device *ndev, 1899 struct ethtool_ringparam *ring) 1900 { 1901 struct sh_eth_private *mdp = netdev_priv(ndev); 1902 int ret; 1903 1904 if (ring->tx_pending > TX_RING_MAX || 1905 ring->rx_pending > RX_RING_MAX || 1906 ring->tx_pending < TX_RING_MIN || 1907 ring->rx_pending < RX_RING_MIN) 1908 return -EINVAL; 1909 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 1910 return -EINVAL; 1911 1912 if (netif_running(ndev)) { 1913 netif_tx_disable(ndev); 1914 /* Disable interrupts by clearing the interrupt mask. */ 1915 sh_eth_write(ndev, 0x0000, EESIPR); 1916 /* Stop the chip's Tx and Rx processes. */ 1917 sh_eth_write(ndev, 0, EDTRR); 1918 sh_eth_write(ndev, 0, EDRRR); 1919 synchronize_irq(ndev->irq); 1920 } 1921 1922 /* Free all the skbuffs in the Rx queue. */ 1923 sh_eth_ring_free(ndev); 1924 /* Free DMA buffer */ 1925 sh_eth_free_dma_buffer(mdp); 1926 1927 /* Set new parameters */ 1928 mdp->num_rx_ring = ring->rx_pending; 1929 mdp->num_tx_ring = ring->tx_pending; 1930 1931 ret = sh_eth_ring_init(ndev); 1932 if (ret < 0) { 1933 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__); 1934 return ret; 1935 } 1936 ret = sh_eth_dev_init(ndev, false); 1937 if (ret < 0) { 1938 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__); 1939 return ret; 1940 } 1941 1942 if (netif_running(ndev)) { 1943 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1944 /* Setting the Rx mode will start the Rx process. */ 1945 sh_eth_write(ndev, EDRRR_R, EDRRR); 1946 netif_wake_queue(ndev); 1947 } 1948 1949 return 0; 1950 } 1951 1952 static const struct ethtool_ops sh_eth_ethtool_ops = { 1953 .get_settings = sh_eth_get_settings, 1954 .set_settings = sh_eth_set_settings, 1955 .nway_reset = sh_eth_nway_reset, 1956 .get_msglevel = sh_eth_get_msglevel, 1957 .set_msglevel = sh_eth_set_msglevel, 1958 .get_link = ethtool_op_get_link, 1959 .get_strings = sh_eth_get_strings, 1960 .get_ethtool_stats = sh_eth_get_ethtool_stats, 1961 .get_sset_count = sh_eth_get_sset_count, 1962 .get_ringparam = sh_eth_get_ringparam, 1963 .set_ringparam = sh_eth_set_ringparam, 1964 }; 1965 1966 /* network device open function */ 1967 static int sh_eth_open(struct net_device *ndev) 1968 { 1969 int ret = 0; 1970 struct sh_eth_private *mdp = netdev_priv(ndev); 1971 1972 pm_runtime_get_sync(&mdp->pdev->dev); 1973 1974 ret = request_irq(ndev->irq, sh_eth_interrupt, 1975 #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1976 defined(CONFIG_CPU_SUBTYPE_SH7764) || \ 1977 defined(CONFIG_CPU_SUBTYPE_SH7757) 1978 IRQF_SHARED, 1979 #else 1980 0, 1981 #endif 1982 ndev->name, ndev); 1983 if (ret) { 1984 dev_err(&ndev->dev, "Can not assign IRQ number\n"); 1985 return ret; 1986 } 1987 1988 /* Descriptor set */ 1989 ret = sh_eth_ring_init(ndev); 1990 if (ret) 1991 goto out_free_irq; 1992 1993 /* device init */ 1994 ret = sh_eth_dev_init(ndev, true); 1995 if (ret) 1996 goto out_free_irq; 1997 1998 /* PHY control start*/ 1999 ret = sh_eth_phy_start(ndev); 2000 if (ret) 2001 goto out_free_irq; 2002 2003 return ret; 2004 2005 out_free_irq: 2006 free_irq(ndev->irq, ndev); 2007 pm_runtime_put_sync(&mdp->pdev->dev); 2008 return ret; 2009 } 2010 2011 /* Timeout function */ 2012 static void sh_eth_tx_timeout(struct net_device *ndev) 2013 { 2014 struct sh_eth_private *mdp = netdev_priv(ndev); 2015 struct sh_eth_rxdesc *rxdesc; 2016 int i; 2017 2018 netif_stop_queue(ndev); 2019 2020 if (netif_msg_timer(mdp)) 2021 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x," 2022 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR)); 2023 2024 /* tx_errors count up */ 2025 ndev->stats.tx_errors++; 2026 2027 /* Free all the skbuffs in the Rx queue. */ 2028 for (i = 0; i < mdp->num_rx_ring; i++) { 2029 rxdesc = &mdp->rx_ring[i]; 2030 rxdesc->status = 0; 2031 rxdesc->addr = 0xBADF00D0; 2032 if (mdp->rx_skbuff[i]) 2033 dev_kfree_skb(mdp->rx_skbuff[i]); 2034 mdp->rx_skbuff[i] = NULL; 2035 } 2036 for (i = 0; i < mdp->num_tx_ring; i++) { 2037 if (mdp->tx_skbuff[i]) 2038 dev_kfree_skb(mdp->tx_skbuff[i]); 2039 mdp->tx_skbuff[i] = NULL; 2040 } 2041 2042 /* device init */ 2043 sh_eth_dev_init(ndev, true); 2044 } 2045 2046 /* Packet transmit function */ 2047 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) 2048 { 2049 struct sh_eth_private *mdp = netdev_priv(ndev); 2050 struct sh_eth_txdesc *txdesc; 2051 u32 entry; 2052 unsigned long flags; 2053 2054 spin_lock_irqsave(&mdp->lock, flags); 2055 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { 2056 if (!sh_eth_txfree(ndev)) { 2057 if (netif_msg_tx_queued(mdp)) 2058 dev_warn(&ndev->dev, "TxFD exhausted.\n"); 2059 netif_stop_queue(ndev); 2060 spin_unlock_irqrestore(&mdp->lock, flags); 2061 return NETDEV_TX_BUSY; 2062 } 2063 } 2064 spin_unlock_irqrestore(&mdp->lock, flags); 2065 2066 entry = mdp->cur_tx % mdp->num_tx_ring; 2067 mdp->tx_skbuff[entry] = skb; 2068 txdesc = &mdp->tx_ring[entry]; 2069 /* soft swap. */ 2070 if (!mdp->cd->hw_swap) 2071 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), 2072 skb->len + 2); 2073 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2074 DMA_TO_DEVICE); 2075 if (skb->len < ETHERSMALL) 2076 txdesc->buffer_length = ETHERSMALL; 2077 else 2078 txdesc->buffer_length = skb->len; 2079 2080 if (entry >= mdp->num_tx_ring - 1) 2081 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 2082 else 2083 txdesc->status |= cpu_to_edmac(mdp, TD_TACT); 2084 2085 mdp->cur_tx++; 2086 2087 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) 2088 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 2089 2090 return NETDEV_TX_OK; 2091 } 2092 2093 /* device close function */ 2094 static int sh_eth_close(struct net_device *ndev) 2095 { 2096 struct sh_eth_private *mdp = netdev_priv(ndev); 2097 2098 netif_stop_queue(ndev); 2099 2100 /* Disable interrupts by clearing the interrupt mask. */ 2101 sh_eth_write(ndev, 0x0000, EESIPR); 2102 2103 /* Stop the chip's Tx and Rx processes. */ 2104 sh_eth_write(ndev, 0, EDTRR); 2105 sh_eth_write(ndev, 0, EDRRR); 2106 2107 /* PHY Disconnect */ 2108 if (mdp->phydev) { 2109 phy_stop(mdp->phydev); 2110 phy_disconnect(mdp->phydev); 2111 } 2112 2113 free_irq(ndev->irq, ndev); 2114 2115 /* Free all the skbuffs in the Rx queue. */ 2116 sh_eth_ring_free(ndev); 2117 2118 /* free DMA buffer */ 2119 sh_eth_free_dma_buffer(mdp); 2120 2121 pm_runtime_put_sync(&mdp->pdev->dev); 2122 2123 return 0; 2124 } 2125 2126 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) 2127 { 2128 struct sh_eth_private *mdp = netdev_priv(ndev); 2129 2130 pm_runtime_get_sync(&mdp->pdev->dev); 2131 2132 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); 2133 sh_eth_write(ndev, 0, TROCR); /* (write clear) */ 2134 ndev->stats.collisions += sh_eth_read(ndev, CDCR); 2135 sh_eth_write(ndev, 0, CDCR); /* (write clear) */ 2136 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); 2137 sh_eth_write(ndev, 0, LCCR); /* (write clear) */ 2138 if (sh_eth_is_gether(mdp)) { 2139 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); 2140 sh_eth_write(ndev, 0, CERCR); /* (write clear) */ 2141 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); 2142 sh_eth_write(ndev, 0, CEECR); /* (write clear) */ 2143 } else { 2144 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); 2145 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ 2146 } 2147 pm_runtime_put_sync(&mdp->pdev->dev); 2148 2149 return &ndev->stats; 2150 } 2151 2152 /* ioctl to device function */ 2153 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, 2154 int cmd) 2155 { 2156 struct sh_eth_private *mdp = netdev_priv(ndev); 2157 struct phy_device *phydev = mdp->phydev; 2158 2159 if (!netif_running(ndev)) 2160 return -EINVAL; 2161 2162 if (!phydev) 2163 return -ENODEV; 2164 2165 return phy_mii_ioctl(phydev, rq, cmd); 2166 } 2167 2168 #if defined(SH_ETH_HAS_TSU) 2169 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ 2170 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, 2171 int entry) 2172 { 2173 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4); 2174 } 2175 2176 static u32 sh_eth_tsu_get_post_mask(int entry) 2177 { 2178 return 0x0f << (28 - ((entry % 8) * 4)); 2179 } 2180 2181 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) 2182 { 2183 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); 2184 } 2185 2186 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev, 2187 int entry) 2188 { 2189 struct sh_eth_private *mdp = netdev_priv(ndev); 2190 u32 tmp; 2191 void *reg_offset; 2192 2193 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 2194 tmp = ioread32(reg_offset); 2195 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset); 2196 } 2197 2198 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev, 2199 int entry) 2200 { 2201 struct sh_eth_private *mdp = netdev_priv(ndev); 2202 u32 post_mask, ref_mask, tmp; 2203 void *reg_offset; 2204 2205 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 2206 post_mask = sh_eth_tsu_get_post_mask(entry); 2207 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; 2208 2209 tmp = ioread32(reg_offset); 2210 iowrite32(tmp & ~post_mask, reg_offset); 2211 2212 /* If other port enables, the function returns "true" */ 2213 return tmp & ref_mask; 2214 } 2215 2216 static int sh_eth_tsu_busy(struct net_device *ndev) 2217 { 2218 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100; 2219 struct sh_eth_private *mdp = netdev_priv(ndev); 2220 2221 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { 2222 udelay(10); 2223 timeout--; 2224 if (timeout <= 0) { 2225 dev_err(&ndev->dev, "%s: timeout\n", __func__); 2226 return -ETIMEDOUT; 2227 } 2228 } 2229 2230 return 0; 2231 } 2232 2233 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg, 2234 const u8 *addr) 2235 { 2236 u32 val; 2237 2238 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]; 2239 iowrite32(val, reg); 2240 if (sh_eth_tsu_busy(ndev) < 0) 2241 return -EBUSY; 2242 2243 val = addr[4] << 8 | addr[5]; 2244 iowrite32(val, reg + 4); 2245 if (sh_eth_tsu_busy(ndev) < 0) 2246 return -EBUSY; 2247 2248 return 0; 2249 } 2250 2251 static void sh_eth_tsu_read_entry(void *reg, u8 *addr) 2252 { 2253 u32 val; 2254 2255 val = ioread32(reg); 2256 addr[0] = (val >> 24) & 0xff; 2257 addr[1] = (val >> 16) & 0xff; 2258 addr[2] = (val >> 8) & 0xff; 2259 addr[3] = val & 0xff; 2260 val = ioread32(reg + 4); 2261 addr[4] = (val >> 8) & 0xff; 2262 addr[5] = val & 0xff; 2263 } 2264 2265 2266 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr) 2267 { 2268 struct sh_eth_private *mdp = netdev_priv(ndev); 2269 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2270 int i; 2271 u8 c_addr[ETH_ALEN]; 2272 2273 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 2274 sh_eth_tsu_read_entry(reg_offset, c_addr); 2275 if (memcmp(addr, c_addr, ETH_ALEN) == 0) 2276 return i; 2277 } 2278 2279 return -ENOENT; 2280 } 2281 2282 static int sh_eth_tsu_find_empty(struct net_device *ndev) 2283 { 2284 u8 blank[ETH_ALEN]; 2285 int entry; 2286 2287 memset(blank, 0, sizeof(blank)); 2288 entry = sh_eth_tsu_find_entry(ndev, blank); 2289 return (entry < 0) ? -ENOMEM : entry; 2290 } 2291 2292 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, 2293 int entry) 2294 { 2295 struct sh_eth_private *mdp = netdev_priv(ndev); 2296 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2297 int ret; 2298 u8 blank[ETH_ALEN]; 2299 2300 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & 2301 ~(1 << (31 - entry)), TSU_TEN); 2302 2303 memset(blank, 0, sizeof(blank)); 2304 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank); 2305 if (ret < 0) 2306 return ret; 2307 return 0; 2308 } 2309 2310 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr) 2311 { 2312 struct sh_eth_private *mdp = netdev_priv(ndev); 2313 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2314 int i, ret; 2315 2316 if (!mdp->cd->tsu) 2317 return 0; 2318 2319 i = sh_eth_tsu_find_entry(ndev, addr); 2320 if (i < 0) { 2321 /* No entry found, create one */ 2322 i = sh_eth_tsu_find_empty(ndev); 2323 if (i < 0) 2324 return -ENOMEM; 2325 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr); 2326 if (ret < 0) 2327 return ret; 2328 2329 /* Enable the entry */ 2330 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | 2331 (1 << (31 - i)), TSU_TEN); 2332 } 2333 2334 /* Entry found or created, enable POST */ 2335 sh_eth_tsu_enable_cam_entry_post(ndev, i); 2336 2337 return 0; 2338 } 2339 2340 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr) 2341 { 2342 struct sh_eth_private *mdp = netdev_priv(ndev); 2343 int i, ret; 2344 2345 if (!mdp->cd->tsu) 2346 return 0; 2347 2348 i = sh_eth_tsu_find_entry(ndev, addr); 2349 if (i) { 2350 /* Entry found */ 2351 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 2352 goto done; 2353 2354 /* Disable the entry if both ports was disabled */ 2355 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 2356 if (ret < 0) 2357 return ret; 2358 } 2359 done: 2360 return 0; 2361 } 2362 2363 static int sh_eth_tsu_purge_all(struct net_device *ndev) 2364 { 2365 struct sh_eth_private *mdp = netdev_priv(ndev); 2366 int i, ret; 2367 2368 if (unlikely(!mdp->cd->tsu)) 2369 return 0; 2370 2371 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { 2372 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 2373 continue; 2374 2375 /* Disable the entry if both ports was disabled */ 2376 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 2377 if (ret < 0) 2378 return ret; 2379 } 2380 2381 return 0; 2382 } 2383 2384 static void sh_eth_tsu_purge_mcast(struct net_device *ndev) 2385 { 2386 struct sh_eth_private *mdp = netdev_priv(ndev); 2387 u8 addr[ETH_ALEN]; 2388 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2389 int i; 2390 2391 if (unlikely(!mdp->cd->tsu)) 2392 return; 2393 2394 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 2395 sh_eth_tsu_read_entry(reg_offset, addr); 2396 if (is_multicast_ether_addr(addr)) 2397 sh_eth_tsu_del_entry(ndev, addr); 2398 } 2399 } 2400 2401 /* Multicast reception directions set */ 2402 static void sh_eth_set_multicast_list(struct net_device *ndev) 2403 { 2404 struct sh_eth_private *mdp = netdev_priv(ndev); 2405 u32 ecmr_bits; 2406 int mcast_all = 0; 2407 unsigned long flags; 2408 2409 spin_lock_irqsave(&mdp->lock, flags); 2410 /* 2411 * Initial condition is MCT = 1, PRM = 0. 2412 * Depending on ndev->flags, set PRM or clear MCT 2413 */ 2414 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT; 2415 2416 if (!(ndev->flags & IFF_MULTICAST)) { 2417 sh_eth_tsu_purge_mcast(ndev); 2418 mcast_all = 1; 2419 } 2420 if (ndev->flags & IFF_ALLMULTI) { 2421 sh_eth_tsu_purge_mcast(ndev); 2422 ecmr_bits &= ~ECMR_MCT; 2423 mcast_all = 1; 2424 } 2425 2426 if (ndev->flags & IFF_PROMISC) { 2427 sh_eth_tsu_purge_all(ndev); 2428 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM; 2429 } else if (mdp->cd->tsu) { 2430 struct netdev_hw_addr *ha; 2431 netdev_for_each_mc_addr(ha, ndev) { 2432 if (mcast_all && is_multicast_ether_addr(ha->addr)) 2433 continue; 2434 2435 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) { 2436 if (!mcast_all) { 2437 sh_eth_tsu_purge_mcast(ndev); 2438 ecmr_bits &= ~ECMR_MCT; 2439 mcast_all = 1; 2440 } 2441 } 2442 } 2443 } else { 2444 /* Normal, unicast/broadcast-only mode. */ 2445 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT; 2446 } 2447 2448 /* update the ethernet mode */ 2449 sh_eth_write(ndev, ecmr_bits, ECMR); 2450 2451 spin_unlock_irqrestore(&mdp->lock, flags); 2452 } 2453 2454 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) 2455 { 2456 if (!mdp->port) 2457 return TSU_VTAG0; 2458 else 2459 return TSU_VTAG1; 2460 } 2461 2462 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, 2463 __be16 proto, u16 vid) 2464 { 2465 struct sh_eth_private *mdp = netdev_priv(ndev); 2466 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2467 2468 if (unlikely(!mdp->cd->tsu)) 2469 return -EPERM; 2470 2471 /* No filtering if vid = 0 */ 2472 if (!vid) 2473 return 0; 2474 2475 mdp->vlan_num_ids++; 2476 2477 /* 2478 * The controller has one VLAN tag HW filter. So, if the filter is 2479 * already enabled, the driver disables it and the filte 2480 */ 2481 if (mdp->vlan_num_ids > 1) { 2482 /* disable VLAN filter */ 2483 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 2484 return 0; 2485 } 2486 2487 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), 2488 vtag_reg_index); 2489 2490 return 0; 2491 } 2492 2493 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, 2494 __be16 proto, u16 vid) 2495 { 2496 struct sh_eth_private *mdp = netdev_priv(ndev); 2497 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2498 2499 if (unlikely(!mdp->cd->tsu)) 2500 return -EPERM; 2501 2502 /* No filtering if vid = 0 */ 2503 if (!vid) 2504 return 0; 2505 2506 mdp->vlan_num_ids--; 2507 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 2508 2509 return 0; 2510 } 2511 #endif /* SH_ETH_HAS_TSU */ 2512 2513 /* SuperH's TSU register init function */ 2514 static void sh_eth_tsu_init(struct sh_eth_private *mdp) 2515 { 2516 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ 2517 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ 2518 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ 2519 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); 2520 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); 2521 sh_eth_tsu_write(mdp, 0, TSU_PRISL0); 2522 sh_eth_tsu_write(mdp, 0, TSU_PRISL1); 2523 sh_eth_tsu_write(mdp, 0, TSU_FWSL0); 2524 sh_eth_tsu_write(mdp, 0, TSU_FWSL1); 2525 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); 2526 if (sh_eth_is_gether(mdp)) { 2527 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ 2528 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ 2529 } else { 2530 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ 2531 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ 2532 } 2533 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ 2534 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ 2535 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ 2536 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ 2537 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ 2538 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ 2539 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ 2540 } 2541 2542 /* MDIO bus release function */ 2543 static int sh_mdio_release(struct net_device *ndev) 2544 { 2545 struct mii_bus *bus = dev_get_drvdata(&ndev->dev); 2546 2547 /* unregister mdio bus */ 2548 mdiobus_unregister(bus); 2549 2550 /* remove mdio bus info from net_device */ 2551 dev_set_drvdata(&ndev->dev, NULL); 2552 2553 /* free bitbang info */ 2554 free_mdio_bitbang(bus); 2555 2556 return 0; 2557 } 2558 2559 /* MDIO bus init function */ 2560 static int sh_mdio_init(struct net_device *ndev, int id, 2561 struct sh_eth_plat_data *pd) 2562 { 2563 int ret, i; 2564 struct bb_info *bitbang; 2565 struct sh_eth_private *mdp = netdev_priv(ndev); 2566 2567 /* create bit control struct for PHY */ 2568 bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info), 2569 GFP_KERNEL); 2570 if (!bitbang) { 2571 ret = -ENOMEM; 2572 goto out; 2573 } 2574 2575 /* bitbang init */ 2576 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; 2577 bitbang->set_gate = pd->set_mdio_gate; 2578 bitbang->mdi_msk = PIR_MDI; 2579 bitbang->mdo_msk = PIR_MDO; 2580 bitbang->mmd_msk = PIR_MMD; 2581 bitbang->mdc_msk = PIR_MDC; 2582 bitbang->ctrl.ops = &bb_ops; 2583 2584 /* MII controller setting */ 2585 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 2586 if (!mdp->mii_bus) { 2587 ret = -ENOMEM; 2588 goto out; 2589 } 2590 2591 /* Hook up MII support for ethtool */ 2592 mdp->mii_bus->name = "sh_mii"; 2593 mdp->mii_bus->parent = &ndev->dev; 2594 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2595 mdp->pdev->name, id); 2596 2597 /* PHY IRQ */ 2598 mdp->mii_bus->irq = devm_kzalloc(&ndev->dev, 2599 sizeof(int) * PHY_MAX_ADDR, 2600 GFP_KERNEL); 2601 if (!mdp->mii_bus->irq) { 2602 ret = -ENOMEM; 2603 goto out_free_bus; 2604 } 2605 2606 for (i = 0; i < PHY_MAX_ADDR; i++) 2607 mdp->mii_bus->irq[i] = PHY_POLL; 2608 2609 /* register mdio bus */ 2610 ret = mdiobus_register(mdp->mii_bus); 2611 if (ret) 2612 goto out_free_bus; 2613 2614 dev_set_drvdata(&ndev->dev, mdp->mii_bus); 2615 2616 return 0; 2617 2618 out_free_bus: 2619 free_mdio_bitbang(mdp->mii_bus); 2620 2621 out: 2622 return ret; 2623 } 2624 2625 static const u16 *sh_eth_get_register_offset(int register_type) 2626 { 2627 const u16 *reg_offset = NULL; 2628 2629 switch (register_type) { 2630 case SH_ETH_REG_GIGABIT: 2631 reg_offset = sh_eth_offset_gigabit; 2632 break; 2633 case SH_ETH_REG_FAST_RCAR: 2634 reg_offset = sh_eth_offset_fast_rcar; 2635 break; 2636 case SH_ETH_REG_FAST_SH4: 2637 reg_offset = sh_eth_offset_fast_sh4; 2638 break; 2639 case SH_ETH_REG_FAST_SH3_SH2: 2640 reg_offset = sh_eth_offset_fast_sh3_sh2; 2641 break; 2642 default: 2643 pr_err("Unknown register type (%d)\n", register_type); 2644 break; 2645 } 2646 2647 return reg_offset; 2648 } 2649 2650 static const struct net_device_ops sh_eth_netdev_ops = { 2651 .ndo_open = sh_eth_open, 2652 .ndo_stop = sh_eth_close, 2653 .ndo_start_xmit = sh_eth_start_xmit, 2654 .ndo_get_stats = sh_eth_get_stats, 2655 #if defined(SH_ETH_HAS_TSU) 2656 .ndo_set_rx_mode = sh_eth_set_multicast_list, 2657 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, 2658 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, 2659 #endif 2660 .ndo_tx_timeout = sh_eth_tx_timeout, 2661 .ndo_do_ioctl = sh_eth_do_ioctl, 2662 .ndo_validate_addr = eth_validate_addr, 2663 .ndo_set_mac_address = eth_mac_addr, 2664 .ndo_change_mtu = eth_change_mtu, 2665 }; 2666 2667 static int sh_eth_drv_probe(struct platform_device *pdev) 2668 { 2669 int ret, devno = 0; 2670 struct resource *res; 2671 struct net_device *ndev = NULL; 2672 struct sh_eth_private *mdp = NULL; 2673 struct sh_eth_plat_data *pd = pdev->dev.platform_data; 2674 2675 /* get base addr */ 2676 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2677 if (unlikely(res == NULL)) { 2678 dev_err(&pdev->dev, "invalid resource\n"); 2679 ret = -EINVAL; 2680 goto out; 2681 } 2682 2683 ndev = alloc_etherdev(sizeof(struct sh_eth_private)); 2684 if (!ndev) { 2685 ret = -ENOMEM; 2686 goto out; 2687 } 2688 2689 /* The sh Ether-specific entries in the device structure. */ 2690 ndev->base_addr = res->start; 2691 devno = pdev->id; 2692 if (devno < 0) 2693 devno = 0; 2694 2695 ndev->dma = -1; 2696 ret = platform_get_irq(pdev, 0); 2697 if (ret < 0) { 2698 ret = -ENODEV; 2699 goto out_release; 2700 } 2701 ndev->irq = ret; 2702 2703 SET_NETDEV_DEV(ndev, &pdev->dev); 2704 2705 /* Fill in the fields of the device structure with ethernet values. */ 2706 ether_setup(ndev); 2707 2708 mdp = netdev_priv(ndev); 2709 mdp->num_tx_ring = TX_RING_SIZE; 2710 mdp->num_rx_ring = RX_RING_SIZE; 2711 mdp->addr = devm_ioremap_resource(&pdev->dev, res); 2712 if (IS_ERR(mdp->addr)) { 2713 ret = PTR_ERR(mdp->addr); 2714 goto out_release; 2715 } 2716 2717 spin_lock_init(&mdp->lock); 2718 mdp->pdev = pdev; 2719 pm_runtime_enable(&pdev->dev); 2720 pm_runtime_resume(&pdev->dev); 2721 2722 /* get PHY ID */ 2723 mdp->phy_id = pd->phy; 2724 mdp->phy_interface = pd->phy_interface; 2725 /* EDMAC endian */ 2726 mdp->edmac_endian = pd->edmac_endian; 2727 mdp->no_ether_link = pd->no_ether_link; 2728 mdp->ether_link_active_low = pd->ether_link_active_low; 2729 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); 2730 2731 /* set cpu data */ 2732 #if defined(SH_ETH_HAS_BOTH_MODULES) 2733 mdp->cd = sh_eth_get_cpu_data(mdp); 2734 #else 2735 mdp->cd = &sh_eth_my_cpu_data; 2736 #endif 2737 sh_eth_set_default_cpu_data(mdp->cd); 2738 2739 /* set function */ 2740 ndev->netdev_ops = &sh_eth_netdev_ops; 2741 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); 2742 ndev->watchdog_timeo = TX_TIMEOUT; 2743 2744 /* debug message level */ 2745 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; 2746 2747 /* read and set MAC address */ 2748 read_mac_address(ndev, pd->mac_addr); 2749 if (!is_valid_ether_addr(ndev->dev_addr)) { 2750 dev_warn(&pdev->dev, 2751 "no valid MAC address supplied, using a random one.\n"); 2752 eth_hw_addr_random(ndev); 2753 } 2754 2755 /* ioremap the TSU registers */ 2756 if (mdp->cd->tsu) { 2757 struct resource *rtsu; 2758 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2759 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); 2760 if (IS_ERR(mdp->tsu_addr)) { 2761 ret = PTR_ERR(mdp->tsu_addr); 2762 goto out_release; 2763 } 2764 mdp->port = devno % 2; 2765 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; 2766 } 2767 2768 /* initialize first or needed device */ 2769 if (!devno || pd->needs_init) { 2770 if (mdp->cd->chip_reset) 2771 mdp->cd->chip_reset(ndev); 2772 2773 if (mdp->cd->tsu) { 2774 /* TSU init (Init only)*/ 2775 sh_eth_tsu_init(mdp); 2776 } 2777 } 2778 2779 /* network device register */ 2780 ret = register_netdev(ndev); 2781 if (ret) 2782 goto out_release; 2783 2784 /* mdio bus init */ 2785 ret = sh_mdio_init(ndev, pdev->id, pd); 2786 if (ret) 2787 goto out_unregister; 2788 2789 /* print device information */ 2790 pr_info("Base address at 0x%x, %pM, IRQ %d.\n", 2791 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); 2792 2793 platform_set_drvdata(pdev, ndev); 2794 2795 return ret; 2796 2797 out_unregister: 2798 unregister_netdev(ndev); 2799 2800 out_release: 2801 /* net_dev free */ 2802 if (ndev) 2803 free_netdev(ndev); 2804 2805 out: 2806 return ret; 2807 } 2808 2809 static int sh_eth_drv_remove(struct platform_device *pdev) 2810 { 2811 struct net_device *ndev = platform_get_drvdata(pdev); 2812 2813 sh_mdio_release(ndev); 2814 unregister_netdev(ndev); 2815 pm_runtime_disable(&pdev->dev); 2816 free_netdev(ndev); 2817 platform_set_drvdata(pdev, NULL); 2818 2819 return 0; 2820 } 2821 2822 static int sh_eth_runtime_nop(struct device *dev) 2823 { 2824 /* 2825 * Runtime PM callback shared between ->runtime_suspend() 2826 * and ->runtime_resume(). Simply returns success. 2827 * 2828 * This driver re-initializes all registers after 2829 * pm_runtime_get_sync() anyway so there is no need 2830 * to save and restore registers here. 2831 */ 2832 return 0; 2833 } 2834 2835 static struct dev_pm_ops sh_eth_dev_pm_ops = { 2836 .runtime_suspend = sh_eth_runtime_nop, 2837 .runtime_resume = sh_eth_runtime_nop, 2838 }; 2839 2840 static struct platform_driver sh_eth_driver = { 2841 .probe = sh_eth_drv_probe, 2842 .remove = sh_eth_drv_remove, 2843 .driver = { 2844 .name = CARDNAME, 2845 .pm = &sh_eth_dev_pm_ops, 2846 }, 2847 }; 2848 2849 module_platform_driver(sh_eth_driver); 2850 2851 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); 2852 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); 2853 MODULE_LICENSE("GPL v2"); 2854