1 /* SuperH Ethernet device driver 2 * 3 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu 4 * Copyright (C) 2008-2013 Renesas Solutions Corp. 5 * Copyright (C) 2013 Cogent Embedded, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * The full GNU General Public License is included in this distribution in 17 * the file called "COPYING". 18 */ 19 20 #include <linux/module.h> 21 #include <linux/kernel.h> 22 #include <linux/spinlock.h> 23 #include <linux/interrupt.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/etherdevice.h> 26 #include <linux/delay.h> 27 #include <linux/platform_device.h> 28 #include <linux/mdio-bitbang.h> 29 #include <linux/netdevice.h> 30 #include <linux/phy.h> 31 #include <linux/cache.h> 32 #include <linux/io.h> 33 #include <linux/pm_runtime.h> 34 #include <linux/slab.h> 35 #include <linux/ethtool.h> 36 #include <linux/if_vlan.h> 37 #include <linux/clk.h> 38 #include <linux/sh_eth.h> 39 40 #include "sh_eth.h" 41 42 #define SH_ETH_DEF_MSG_ENABLE \ 43 (NETIF_MSG_LINK | \ 44 NETIF_MSG_TIMER | \ 45 NETIF_MSG_RX_ERR| \ 46 NETIF_MSG_TX_ERR) 47 48 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { 49 [EDSR] = 0x0000, 50 [EDMR] = 0x0400, 51 [EDTRR] = 0x0408, 52 [EDRRR] = 0x0410, 53 [EESR] = 0x0428, 54 [EESIPR] = 0x0430, 55 [TDLAR] = 0x0010, 56 [TDFAR] = 0x0014, 57 [TDFXR] = 0x0018, 58 [TDFFR] = 0x001c, 59 [RDLAR] = 0x0030, 60 [RDFAR] = 0x0034, 61 [RDFXR] = 0x0038, 62 [RDFFR] = 0x003c, 63 [TRSCER] = 0x0438, 64 [RMFCR] = 0x0440, 65 [TFTR] = 0x0448, 66 [FDR] = 0x0450, 67 [RMCR] = 0x0458, 68 [RPADIR] = 0x0460, 69 [FCFTR] = 0x0468, 70 [CSMR] = 0x04E4, 71 72 [ECMR] = 0x0500, 73 [ECSR] = 0x0510, 74 [ECSIPR] = 0x0518, 75 [PIR] = 0x0520, 76 [PSR] = 0x0528, 77 [PIPR] = 0x052c, 78 [RFLR] = 0x0508, 79 [APR] = 0x0554, 80 [MPR] = 0x0558, 81 [PFTCR] = 0x055c, 82 [PFRCR] = 0x0560, 83 [TPAUSER] = 0x0564, 84 [GECMR] = 0x05b0, 85 [BCULR] = 0x05b4, 86 [MAHR] = 0x05c0, 87 [MALR] = 0x05c8, 88 [TROCR] = 0x0700, 89 [CDCR] = 0x0708, 90 [LCCR] = 0x0710, 91 [CEFCR] = 0x0740, 92 [FRECR] = 0x0748, 93 [TSFRCR] = 0x0750, 94 [TLFRCR] = 0x0758, 95 [RFCR] = 0x0760, 96 [CERCR] = 0x0768, 97 [CEECR] = 0x0770, 98 [MAFCR] = 0x0778, 99 [RMII_MII] = 0x0790, 100 101 [ARSTR] = 0x0000, 102 [TSU_CTRST] = 0x0004, 103 [TSU_FWEN0] = 0x0010, 104 [TSU_FWEN1] = 0x0014, 105 [TSU_FCM] = 0x0018, 106 [TSU_BSYSL0] = 0x0020, 107 [TSU_BSYSL1] = 0x0024, 108 [TSU_PRISL0] = 0x0028, 109 [TSU_PRISL1] = 0x002c, 110 [TSU_FWSL0] = 0x0030, 111 [TSU_FWSL1] = 0x0034, 112 [TSU_FWSLC] = 0x0038, 113 [TSU_QTAG0] = 0x0040, 114 [TSU_QTAG1] = 0x0044, 115 [TSU_FWSR] = 0x0050, 116 [TSU_FWINMK] = 0x0054, 117 [TSU_ADQT0] = 0x0048, 118 [TSU_ADQT1] = 0x004c, 119 [TSU_VTAG0] = 0x0058, 120 [TSU_VTAG1] = 0x005c, 121 [TSU_ADSBSY] = 0x0060, 122 [TSU_TEN] = 0x0064, 123 [TSU_POST1] = 0x0070, 124 [TSU_POST2] = 0x0074, 125 [TSU_POST3] = 0x0078, 126 [TSU_POST4] = 0x007c, 127 [TSU_ADRH0] = 0x0100, 128 [TSU_ADRL0] = 0x0104, 129 [TSU_ADRH31] = 0x01f8, 130 [TSU_ADRL31] = 0x01fc, 131 132 [TXNLCR0] = 0x0080, 133 [TXALCR0] = 0x0084, 134 [RXNLCR0] = 0x0088, 135 [RXALCR0] = 0x008c, 136 [FWNLCR0] = 0x0090, 137 [FWALCR0] = 0x0094, 138 [TXNLCR1] = 0x00a0, 139 [TXALCR1] = 0x00a0, 140 [RXNLCR1] = 0x00a8, 141 [RXALCR1] = 0x00ac, 142 [FWNLCR1] = 0x00b0, 143 [FWALCR1] = 0x00b4, 144 }; 145 146 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = { 147 [EDSR] = 0x0000, 148 [EDMR] = 0x0400, 149 [EDTRR] = 0x0408, 150 [EDRRR] = 0x0410, 151 [EESR] = 0x0428, 152 [EESIPR] = 0x0430, 153 [TDLAR] = 0x0010, 154 [TDFAR] = 0x0014, 155 [TDFXR] = 0x0018, 156 [TDFFR] = 0x001c, 157 [RDLAR] = 0x0030, 158 [RDFAR] = 0x0034, 159 [RDFXR] = 0x0038, 160 [RDFFR] = 0x003c, 161 [TRSCER] = 0x0438, 162 [RMFCR] = 0x0440, 163 [TFTR] = 0x0448, 164 [FDR] = 0x0450, 165 [RMCR] = 0x0458, 166 [RPADIR] = 0x0460, 167 [FCFTR] = 0x0468, 168 [CSMR] = 0x04E4, 169 170 [ECMR] = 0x0500, 171 [RFLR] = 0x0508, 172 [ECSR] = 0x0510, 173 [ECSIPR] = 0x0518, 174 [PIR] = 0x0520, 175 [APR] = 0x0554, 176 [MPR] = 0x0558, 177 [PFTCR] = 0x055c, 178 [PFRCR] = 0x0560, 179 [TPAUSER] = 0x0564, 180 [MAHR] = 0x05c0, 181 [MALR] = 0x05c8, 182 [CEFCR] = 0x0740, 183 [FRECR] = 0x0748, 184 [TSFRCR] = 0x0750, 185 [TLFRCR] = 0x0758, 186 [RFCR] = 0x0760, 187 [MAFCR] = 0x0778, 188 189 [ARSTR] = 0x0000, 190 [TSU_CTRST] = 0x0004, 191 [TSU_VTAG0] = 0x0058, 192 [TSU_ADSBSY] = 0x0060, 193 [TSU_TEN] = 0x0064, 194 [TSU_ADRH0] = 0x0100, 195 [TSU_ADRL0] = 0x0104, 196 [TSU_ADRH31] = 0x01f8, 197 [TSU_ADRL31] = 0x01fc, 198 199 [TXNLCR0] = 0x0080, 200 [TXALCR0] = 0x0084, 201 [RXNLCR0] = 0x0088, 202 [RXALCR0] = 0x008C, 203 }; 204 205 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = { 206 [ECMR] = 0x0300, 207 [RFLR] = 0x0308, 208 [ECSR] = 0x0310, 209 [ECSIPR] = 0x0318, 210 [PIR] = 0x0320, 211 [PSR] = 0x0328, 212 [RDMLR] = 0x0340, 213 [IPGR] = 0x0350, 214 [APR] = 0x0354, 215 [MPR] = 0x0358, 216 [RFCF] = 0x0360, 217 [TPAUSER] = 0x0364, 218 [TPAUSECR] = 0x0368, 219 [MAHR] = 0x03c0, 220 [MALR] = 0x03c8, 221 [TROCR] = 0x03d0, 222 [CDCR] = 0x03d4, 223 [LCCR] = 0x03d8, 224 [CNDCR] = 0x03dc, 225 [CEFCR] = 0x03e4, 226 [FRECR] = 0x03e8, 227 [TSFRCR] = 0x03ec, 228 [TLFRCR] = 0x03f0, 229 [RFCR] = 0x03f4, 230 [MAFCR] = 0x03f8, 231 232 [EDMR] = 0x0200, 233 [EDTRR] = 0x0208, 234 [EDRRR] = 0x0210, 235 [TDLAR] = 0x0218, 236 [RDLAR] = 0x0220, 237 [EESR] = 0x0228, 238 [EESIPR] = 0x0230, 239 [TRSCER] = 0x0238, 240 [RMFCR] = 0x0240, 241 [TFTR] = 0x0248, 242 [FDR] = 0x0250, 243 [RMCR] = 0x0258, 244 [TFUCR] = 0x0264, 245 [RFOCR] = 0x0268, 246 [RMIIMODE] = 0x026c, 247 [FCFTR] = 0x0270, 248 [TRIMD] = 0x027c, 249 }; 250 251 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = { 252 [ECMR] = 0x0100, 253 [RFLR] = 0x0108, 254 [ECSR] = 0x0110, 255 [ECSIPR] = 0x0118, 256 [PIR] = 0x0120, 257 [PSR] = 0x0128, 258 [RDMLR] = 0x0140, 259 [IPGR] = 0x0150, 260 [APR] = 0x0154, 261 [MPR] = 0x0158, 262 [TPAUSER] = 0x0164, 263 [RFCF] = 0x0160, 264 [TPAUSECR] = 0x0168, 265 [BCFRR] = 0x016c, 266 [MAHR] = 0x01c0, 267 [MALR] = 0x01c8, 268 [TROCR] = 0x01d0, 269 [CDCR] = 0x01d4, 270 [LCCR] = 0x01d8, 271 [CNDCR] = 0x01dc, 272 [CEFCR] = 0x01e4, 273 [FRECR] = 0x01e8, 274 [TSFRCR] = 0x01ec, 275 [TLFRCR] = 0x01f0, 276 [RFCR] = 0x01f4, 277 [MAFCR] = 0x01f8, 278 [RTRATE] = 0x01fc, 279 280 [EDMR] = 0x0000, 281 [EDTRR] = 0x0008, 282 [EDRRR] = 0x0010, 283 [TDLAR] = 0x0018, 284 [RDLAR] = 0x0020, 285 [EESR] = 0x0028, 286 [EESIPR] = 0x0030, 287 [TRSCER] = 0x0038, 288 [RMFCR] = 0x0040, 289 [TFTR] = 0x0048, 290 [FDR] = 0x0050, 291 [RMCR] = 0x0058, 292 [TFUCR] = 0x0064, 293 [RFOCR] = 0x0068, 294 [FCFTR] = 0x0070, 295 [RPADIR] = 0x0078, 296 [TRIMD] = 0x007c, 297 [RBWAR] = 0x00c8, 298 [RDFAR] = 0x00cc, 299 [TBRAR] = 0x00d4, 300 [TDFAR] = 0x00d8, 301 }; 302 303 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { 304 [ECMR] = 0x0160, 305 [ECSR] = 0x0164, 306 [ECSIPR] = 0x0168, 307 [PIR] = 0x016c, 308 [MAHR] = 0x0170, 309 [MALR] = 0x0174, 310 [RFLR] = 0x0178, 311 [PSR] = 0x017c, 312 [TROCR] = 0x0180, 313 [CDCR] = 0x0184, 314 [LCCR] = 0x0188, 315 [CNDCR] = 0x018c, 316 [CEFCR] = 0x0194, 317 [FRECR] = 0x0198, 318 [TSFRCR] = 0x019c, 319 [TLFRCR] = 0x01a0, 320 [RFCR] = 0x01a4, 321 [MAFCR] = 0x01a8, 322 [IPGR] = 0x01b4, 323 [APR] = 0x01b8, 324 [MPR] = 0x01bc, 325 [TPAUSER] = 0x01c4, 326 [BCFR] = 0x01cc, 327 328 [ARSTR] = 0x0000, 329 [TSU_CTRST] = 0x0004, 330 [TSU_FWEN0] = 0x0010, 331 [TSU_FWEN1] = 0x0014, 332 [TSU_FCM] = 0x0018, 333 [TSU_BSYSL0] = 0x0020, 334 [TSU_BSYSL1] = 0x0024, 335 [TSU_PRISL0] = 0x0028, 336 [TSU_PRISL1] = 0x002c, 337 [TSU_FWSL0] = 0x0030, 338 [TSU_FWSL1] = 0x0034, 339 [TSU_FWSLC] = 0x0038, 340 [TSU_QTAGM0] = 0x0040, 341 [TSU_QTAGM1] = 0x0044, 342 [TSU_ADQT0] = 0x0048, 343 [TSU_ADQT1] = 0x004c, 344 [TSU_FWSR] = 0x0050, 345 [TSU_FWINMK] = 0x0054, 346 [TSU_ADSBSY] = 0x0060, 347 [TSU_TEN] = 0x0064, 348 [TSU_POST1] = 0x0070, 349 [TSU_POST2] = 0x0074, 350 [TSU_POST3] = 0x0078, 351 [TSU_POST4] = 0x007c, 352 353 [TXNLCR0] = 0x0080, 354 [TXALCR0] = 0x0084, 355 [RXNLCR0] = 0x0088, 356 [RXALCR0] = 0x008c, 357 [FWNLCR0] = 0x0090, 358 [FWALCR0] = 0x0094, 359 [TXNLCR1] = 0x00a0, 360 [TXALCR1] = 0x00a0, 361 [RXNLCR1] = 0x00a8, 362 [RXALCR1] = 0x00ac, 363 [FWNLCR1] = 0x00b0, 364 [FWALCR1] = 0x00b4, 365 366 [TSU_ADRH0] = 0x0100, 367 [TSU_ADRL0] = 0x0104, 368 [TSU_ADRL31] = 0x01fc, 369 }; 370 371 static bool sh_eth_is_gether(struct sh_eth_private *mdp) 372 { 373 return mdp->reg_offset == sh_eth_offset_gigabit; 374 } 375 376 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp) 377 { 378 return mdp->reg_offset == sh_eth_offset_fast_rz; 379 } 380 381 static void sh_eth_select_mii(struct net_device *ndev) 382 { 383 u32 value = 0x0; 384 struct sh_eth_private *mdp = netdev_priv(ndev); 385 386 switch (mdp->phy_interface) { 387 case PHY_INTERFACE_MODE_GMII: 388 value = 0x2; 389 break; 390 case PHY_INTERFACE_MODE_MII: 391 value = 0x1; 392 break; 393 case PHY_INTERFACE_MODE_RMII: 394 value = 0x0; 395 break; 396 default: 397 pr_warn("PHY interface mode was not setup. Set to MII.\n"); 398 value = 0x1; 399 break; 400 } 401 402 sh_eth_write(ndev, value, RMII_MII); 403 } 404 405 static void sh_eth_set_duplex(struct net_device *ndev) 406 { 407 struct sh_eth_private *mdp = netdev_priv(ndev); 408 409 if (mdp->duplex) /* Full */ 410 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 411 else /* Half */ 412 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 413 } 414 415 /* There is CPU dependent code */ 416 static void sh_eth_set_rate_r8a777x(struct net_device *ndev) 417 { 418 struct sh_eth_private *mdp = netdev_priv(ndev); 419 420 switch (mdp->speed) { 421 case 10: /* 10BASE */ 422 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR); 423 break; 424 case 100:/* 100BASE */ 425 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR); 426 break; 427 default: 428 break; 429 } 430 } 431 432 /* R8A7778/9 */ 433 static struct sh_eth_cpu_data r8a777x_data = { 434 .set_duplex = sh_eth_set_duplex, 435 .set_rate = sh_eth_set_rate_r8a777x, 436 437 .register_type = SH_ETH_REG_FAST_RCAR, 438 439 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 440 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 441 .eesipr_value = 0x01ff009f, 442 443 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 444 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 445 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 446 EESR_ECI, 447 448 .apr = 1, 449 .mpr = 1, 450 .tpauser = 1, 451 .hw_swap = 1, 452 }; 453 454 /* R8A7790/1 */ 455 static struct sh_eth_cpu_data r8a779x_data = { 456 .set_duplex = sh_eth_set_duplex, 457 .set_rate = sh_eth_set_rate_r8a777x, 458 459 .register_type = SH_ETH_REG_FAST_RCAR, 460 461 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 462 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 463 .eesipr_value = 0x01ff009f, 464 465 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 466 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 467 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 468 EESR_ECI, 469 470 .apr = 1, 471 .mpr = 1, 472 .tpauser = 1, 473 .hw_swap = 1, 474 .rmiimode = 1, 475 .shift_rd0 = 1, 476 }; 477 478 static void sh_eth_set_rate_sh7724(struct net_device *ndev) 479 { 480 struct sh_eth_private *mdp = netdev_priv(ndev); 481 482 switch (mdp->speed) { 483 case 10: /* 10BASE */ 484 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); 485 break; 486 case 100:/* 100BASE */ 487 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); 488 break; 489 default: 490 break; 491 } 492 } 493 494 /* SH7724 */ 495 static struct sh_eth_cpu_data sh7724_data = { 496 .set_duplex = sh_eth_set_duplex, 497 .set_rate = sh_eth_set_rate_sh7724, 498 499 .register_type = SH_ETH_REG_FAST_SH4, 500 501 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 502 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 503 .eesipr_value = 0x01ff009f, 504 505 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 506 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 507 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 508 EESR_ECI, 509 510 .apr = 1, 511 .mpr = 1, 512 .tpauser = 1, 513 .hw_swap = 1, 514 .rpadir = 1, 515 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ 516 }; 517 518 static void sh_eth_set_rate_sh7757(struct net_device *ndev) 519 { 520 struct sh_eth_private *mdp = netdev_priv(ndev); 521 522 switch (mdp->speed) { 523 case 10: /* 10BASE */ 524 sh_eth_write(ndev, 0, RTRATE); 525 break; 526 case 100:/* 100BASE */ 527 sh_eth_write(ndev, 1, RTRATE); 528 break; 529 default: 530 break; 531 } 532 } 533 534 /* SH7757 */ 535 static struct sh_eth_cpu_data sh7757_data = { 536 .set_duplex = sh_eth_set_duplex, 537 .set_rate = sh_eth_set_rate_sh7757, 538 539 .register_type = SH_ETH_REG_FAST_SH4, 540 541 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 542 .rmcr_value = RMCR_RNC, 543 544 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 545 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 546 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 547 EESR_ECI, 548 549 .irq_flags = IRQF_SHARED, 550 .apr = 1, 551 .mpr = 1, 552 .tpauser = 1, 553 .hw_swap = 1, 554 .no_ade = 1, 555 .rpadir = 1, 556 .rpadir_value = 2 << 16, 557 }; 558 559 #define SH_GIGA_ETH_BASE 0xfee00000UL 560 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) 561 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) 562 static void sh_eth_chip_reset_giga(struct net_device *ndev) 563 { 564 int i; 565 unsigned long mahr[2], malr[2]; 566 567 /* save MAHR and MALR */ 568 for (i = 0; i < 2; i++) { 569 malr[i] = ioread32((void *)GIGA_MALR(i)); 570 mahr[i] = ioread32((void *)GIGA_MAHR(i)); 571 } 572 573 /* reset device */ 574 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800)); 575 mdelay(1); 576 577 /* restore MAHR and MALR */ 578 for (i = 0; i < 2; i++) { 579 iowrite32(malr[i], (void *)GIGA_MALR(i)); 580 iowrite32(mahr[i], (void *)GIGA_MAHR(i)); 581 } 582 } 583 584 static void sh_eth_set_rate_giga(struct net_device *ndev) 585 { 586 struct sh_eth_private *mdp = netdev_priv(ndev); 587 588 switch (mdp->speed) { 589 case 10: /* 10BASE */ 590 sh_eth_write(ndev, 0x00000000, GECMR); 591 break; 592 case 100:/* 100BASE */ 593 sh_eth_write(ndev, 0x00000010, GECMR); 594 break; 595 case 1000: /* 1000BASE */ 596 sh_eth_write(ndev, 0x00000020, GECMR); 597 break; 598 default: 599 break; 600 } 601 } 602 603 /* SH7757(GETHERC) */ 604 static struct sh_eth_cpu_data sh7757_data_giga = { 605 .chip_reset = sh_eth_chip_reset_giga, 606 .set_duplex = sh_eth_set_duplex, 607 .set_rate = sh_eth_set_rate_giga, 608 609 .register_type = SH_ETH_REG_GIGABIT, 610 611 .ecsr_value = ECSR_ICD | ECSR_MPD, 612 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 613 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 614 615 .tx_check = EESR_TC1 | EESR_FTC, 616 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 617 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 618 EESR_TDE | EESR_ECI, 619 .fdr_value = 0x0000072f, 620 .rmcr_value = RMCR_RNC, 621 622 .irq_flags = IRQF_SHARED, 623 .apr = 1, 624 .mpr = 1, 625 .tpauser = 1, 626 .bculr = 1, 627 .hw_swap = 1, 628 .rpadir = 1, 629 .rpadir_value = 2 << 16, 630 .no_trimd = 1, 631 .no_ade = 1, 632 .tsu = 1, 633 }; 634 635 static void sh_eth_chip_reset(struct net_device *ndev) 636 { 637 struct sh_eth_private *mdp = netdev_priv(ndev); 638 639 /* reset device */ 640 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 641 mdelay(1); 642 } 643 644 static void sh_eth_set_rate_gether(struct net_device *ndev) 645 { 646 struct sh_eth_private *mdp = netdev_priv(ndev); 647 648 switch (mdp->speed) { 649 case 10: /* 10BASE */ 650 sh_eth_write(ndev, GECMR_10, GECMR); 651 break; 652 case 100:/* 100BASE */ 653 sh_eth_write(ndev, GECMR_100, GECMR); 654 break; 655 case 1000: /* 1000BASE */ 656 sh_eth_write(ndev, GECMR_1000, GECMR); 657 break; 658 default: 659 break; 660 } 661 } 662 663 /* SH7734 */ 664 static struct sh_eth_cpu_data sh7734_data = { 665 .chip_reset = sh_eth_chip_reset, 666 .set_duplex = sh_eth_set_duplex, 667 .set_rate = sh_eth_set_rate_gether, 668 669 .register_type = SH_ETH_REG_GIGABIT, 670 671 .ecsr_value = ECSR_ICD | ECSR_MPD, 672 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 673 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 674 675 .tx_check = EESR_TC1 | EESR_FTC, 676 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 677 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 678 EESR_TDE | EESR_ECI, 679 680 .apr = 1, 681 .mpr = 1, 682 .tpauser = 1, 683 .bculr = 1, 684 .hw_swap = 1, 685 .no_trimd = 1, 686 .no_ade = 1, 687 .tsu = 1, 688 .hw_crc = 1, 689 .select_mii = 1, 690 }; 691 692 /* SH7763 */ 693 static struct sh_eth_cpu_data sh7763_data = { 694 .chip_reset = sh_eth_chip_reset, 695 .set_duplex = sh_eth_set_duplex, 696 .set_rate = sh_eth_set_rate_gether, 697 698 .register_type = SH_ETH_REG_GIGABIT, 699 700 .ecsr_value = ECSR_ICD | ECSR_MPD, 701 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 702 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 703 704 .tx_check = EESR_TC1 | EESR_FTC, 705 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 706 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 707 EESR_ECI, 708 709 .apr = 1, 710 .mpr = 1, 711 .tpauser = 1, 712 .bculr = 1, 713 .hw_swap = 1, 714 .no_trimd = 1, 715 .no_ade = 1, 716 .tsu = 1, 717 .irq_flags = IRQF_SHARED, 718 }; 719 720 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) 721 { 722 struct sh_eth_private *mdp = netdev_priv(ndev); 723 724 /* reset device */ 725 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 726 mdelay(1); 727 728 sh_eth_select_mii(ndev); 729 } 730 731 /* R8A7740 */ 732 static struct sh_eth_cpu_data r8a7740_data = { 733 .chip_reset = sh_eth_chip_reset_r8a7740, 734 .set_duplex = sh_eth_set_duplex, 735 .set_rate = sh_eth_set_rate_gether, 736 737 .register_type = SH_ETH_REG_GIGABIT, 738 739 .ecsr_value = ECSR_ICD | ECSR_MPD, 740 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 741 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 742 743 .tx_check = EESR_TC1 | EESR_FTC, 744 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 745 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 746 EESR_TDE | EESR_ECI, 747 .fdr_value = 0x0000070f, 748 .rmcr_value = RMCR_RNC, 749 750 .apr = 1, 751 .mpr = 1, 752 .tpauser = 1, 753 .bculr = 1, 754 .hw_swap = 1, 755 .rpadir = 1, 756 .rpadir_value = 2 << 16, 757 .no_trimd = 1, 758 .no_ade = 1, 759 .tsu = 1, 760 .select_mii = 1, 761 .shift_rd0 = 1, 762 }; 763 764 /* R7S72100 */ 765 static struct sh_eth_cpu_data r7s72100_data = { 766 .chip_reset = sh_eth_chip_reset, 767 .set_duplex = sh_eth_set_duplex, 768 769 .register_type = SH_ETH_REG_FAST_RZ, 770 771 .ecsr_value = ECSR_ICD, 772 .ecsipr_value = ECSIPR_ICDIP, 773 .eesipr_value = 0xff7f009f, 774 775 .tx_check = EESR_TC1 | EESR_FTC, 776 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 777 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 778 EESR_TDE | EESR_ECI, 779 .fdr_value = 0x0000070f, 780 .rmcr_value = RMCR_RNC, 781 782 .no_psr = 1, 783 .apr = 1, 784 .mpr = 1, 785 .tpauser = 1, 786 .hw_swap = 1, 787 .rpadir = 1, 788 .rpadir_value = 2 << 16, 789 .no_trimd = 1, 790 .no_ade = 1, 791 .hw_crc = 1, 792 .tsu = 1, 793 .shift_rd0 = 1, 794 }; 795 796 static struct sh_eth_cpu_data sh7619_data = { 797 .register_type = SH_ETH_REG_FAST_SH3_SH2, 798 799 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 800 801 .apr = 1, 802 .mpr = 1, 803 .tpauser = 1, 804 .hw_swap = 1, 805 }; 806 807 static struct sh_eth_cpu_data sh771x_data = { 808 .register_type = SH_ETH_REG_FAST_SH3_SH2, 809 810 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 811 .tsu = 1, 812 }; 813 814 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) 815 { 816 if (!cd->ecsr_value) 817 cd->ecsr_value = DEFAULT_ECSR_INIT; 818 819 if (!cd->ecsipr_value) 820 cd->ecsipr_value = DEFAULT_ECSIPR_INIT; 821 822 if (!cd->fcftr_value) 823 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | 824 DEFAULT_FIFO_F_D_RFD; 825 826 if (!cd->fdr_value) 827 cd->fdr_value = DEFAULT_FDR_INIT; 828 829 if (!cd->rmcr_value) 830 cd->rmcr_value = DEFAULT_RMCR_VALUE; 831 832 if (!cd->tx_check) 833 cd->tx_check = DEFAULT_TX_CHECK; 834 835 if (!cd->eesr_err_check) 836 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; 837 } 838 839 static int sh_eth_check_reset(struct net_device *ndev) 840 { 841 int ret = 0; 842 int cnt = 100; 843 844 while (cnt > 0) { 845 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 846 break; 847 mdelay(1); 848 cnt--; 849 } 850 if (cnt <= 0) { 851 pr_err("Device reset failed\n"); 852 ret = -ETIMEDOUT; 853 } 854 return ret; 855 } 856 857 static int sh_eth_reset(struct net_device *ndev) 858 { 859 struct sh_eth_private *mdp = netdev_priv(ndev); 860 int ret = 0; 861 862 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) { 863 sh_eth_write(ndev, EDSR_ENALL, EDSR); 864 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, 865 EDMR); 866 867 ret = sh_eth_check_reset(ndev); 868 if (ret) 869 goto out; 870 871 /* Table Init */ 872 sh_eth_write(ndev, 0x0, TDLAR); 873 sh_eth_write(ndev, 0x0, TDFAR); 874 sh_eth_write(ndev, 0x0, TDFXR); 875 sh_eth_write(ndev, 0x0, TDFFR); 876 sh_eth_write(ndev, 0x0, RDLAR); 877 sh_eth_write(ndev, 0x0, RDFAR); 878 sh_eth_write(ndev, 0x0, RDFXR); 879 sh_eth_write(ndev, 0x0, RDFFR); 880 881 /* Reset HW CRC register */ 882 if (mdp->cd->hw_crc) 883 sh_eth_write(ndev, 0x0, CSMR); 884 885 /* Select MII mode */ 886 if (mdp->cd->select_mii) 887 sh_eth_select_mii(ndev); 888 } else { 889 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, 890 EDMR); 891 mdelay(3); 892 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, 893 EDMR); 894 } 895 896 out: 897 return ret; 898 } 899 900 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 901 static void sh_eth_set_receive_align(struct sk_buff *skb) 902 { 903 int reserve; 904 905 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1)); 906 if (reserve) 907 skb_reserve(skb, reserve); 908 } 909 #else 910 static void sh_eth_set_receive_align(struct sk_buff *skb) 911 { 912 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN); 913 } 914 #endif 915 916 917 /* CPU <-> EDMAC endian convert */ 918 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) 919 { 920 switch (mdp->edmac_endian) { 921 case EDMAC_LITTLE_ENDIAN: 922 return cpu_to_le32(x); 923 case EDMAC_BIG_ENDIAN: 924 return cpu_to_be32(x); 925 } 926 return x; 927 } 928 929 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) 930 { 931 switch (mdp->edmac_endian) { 932 case EDMAC_LITTLE_ENDIAN: 933 return le32_to_cpu(x); 934 case EDMAC_BIG_ENDIAN: 935 return be32_to_cpu(x); 936 } 937 return x; 938 } 939 940 /* Program the hardware MAC address from dev->dev_addr. */ 941 static void update_mac_address(struct net_device *ndev) 942 { 943 sh_eth_write(ndev, 944 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 945 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); 946 sh_eth_write(ndev, 947 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); 948 } 949 950 /* Get MAC address from SuperH MAC address register 951 * 952 * SuperH's Ethernet device doesn't have 'ROM' to MAC address. 953 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). 954 * When you want use this device, you must set MAC address in bootloader. 955 * 956 */ 957 static void read_mac_address(struct net_device *ndev, unsigned char *mac) 958 { 959 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { 960 memcpy(ndev->dev_addr, mac, ETH_ALEN); 961 } else { 962 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); 963 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; 964 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF; 965 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF); 966 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF; 967 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF); 968 } 969 } 970 971 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) 972 { 973 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) 974 return EDTRR_TRNS_GETHER; 975 else 976 return EDTRR_TRNS_ETHER; 977 } 978 979 struct bb_info { 980 void (*set_gate)(void *addr); 981 struct mdiobb_ctrl ctrl; 982 void *addr; 983 u32 mmd_msk;/* MMD */ 984 u32 mdo_msk; 985 u32 mdi_msk; 986 u32 mdc_msk; 987 }; 988 989 /* PHY bit set */ 990 static void bb_set(void *addr, u32 msk) 991 { 992 iowrite32(ioread32(addr) | msk, addr); 993 } 994 995 /* PHY bit clear */ 996 static void bb_clr(void *addr, u32 msk) 997 { 998 iowrite32((ioread32(addr) & ~msk), addr); 999 } 1000 1001 /* PHY bit read */ 1002 static int bb_read(void *addr, u32 msk) 1003 { 1004 return (ioread32(addr) & msk) != 0; 1005 } 1006 1007 /* Data I/O pin control */ 1008 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) 1009 { 1010 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1011 1012 if (bitbang->set_gate) 1013 bitbang->set_gate(bitbang->addr); 1014 1015 if (bit) 1016 bb_set(bitbang->addr, bitbang->mmd_msk); 1017 else 1018 bb_clr(bitbang->addr, bitbang->mmd_msk); 1019 } 1020 1021 /* Set bit data*/ 1022 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) 1023 { 1024 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1025 1026 if (bitbang->set_gate) 1027 bitbang->set_gate(bitbang->addr); 1028 1029 if (bit) 1030 bb_set(bitbang->addr, bitbang->mdo_msk); 1031 else 1032 bb_clr(bitbang->addr, bitbang->mdo_msk); 1033 } 1034 1035 /* Get bit data*/ 1036 static int sh_get_mdio(struct mdiobb_ctrl *ctrl) 1037 { 1038 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1039 1040 if (bitbang->set_gate) 1041 bitbang->set_gate(bitbang->addr); 1042 1043 return bb_read(bitbang->addr, bitbang->mdi_msk); 1044 } 1045 1046 /* MDC pin control */ 1047 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) 1048 { 1049 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1050 1051 if (bitbang->set_gate) 1052 bitbang->set_gate(bitbang->addr); 1053 1054 if (bit) 1055 bb_set(bitbang->addr, bitbang->mdc_msk); 1056 else 1057 bb_clr(bitbang->addr, bitbang->mdc_msk); 1058 } 1059 1060 /* mdio bus control struct */ 1061 static struct mdiobb_ops bb_ops = { 1062 .owner = THIS_MODULE, 1063 .set_mdc = sh_mdc_ctrl, 1064 .set_mdio_dir = sh_mmd_ctrl, 1065 .set_mdio_data = sh_set_mdio, 1066 .get_mdio_data = sh_get_mdio, 1067 }; 1068 1069 /* free skb and descriptor buffer */ 1070 static void sh_eth_ring_free(struct net_device *ndev) 1071 { 1072 struct sh_eth_private *mdp = netdev_priv(ndev); 1073 int i; 1074 1075 /* Free Rx skb ringbuffer */ 1076 if (mdp->rx_skbuff) { 1077 for (i = 0; i < mdp->num_rx_ring; i++) { 1078 if (mdp->rx_skbuff[i]) 1079 dev_kfree_skb(mdp->rx_skbuff[i]); 1080 } 1081 } 1082 kfree(mdp->rx_skbuff); 1083 mdp->rx_skbuff = NULL; 1084 1085 /* Free Tx skb ringbuffer */ 1086 if (mdp->tx_skbuff) { 1087 for (i = 0; i < mdp->num_tx_ring; i++) { 1088 if (mdp->tx_skbuff[i]) 1089 dev_kfree_skb(mdp->tx_skbuff[i]); 1090 } 1091 } 1092 kfree(mdp->tx_skbuff); 1093 mdp->tx_skbuff = NULL; 1094 } 1095 1096 /* format skb and descriptor buffer */ 1097 static void sh_eth_ring_format(struct net_device *ndev) 1098 { 1099 struct sh_eth_private *mdp = netdev_priv(ndev); 1100 int i; 1101 struct sk_buff *skb; 1102 struct sh_eth_rxdesc *rxdesc = NULL; 1103 struct sh_eth_txdesc *txdesc = NULL; 1104 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 1105 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1106 1107 mdp->cur_rx = 0; 1108 mdp->cur_tx = 0; 1109 mdp->dirty_rx = 0; 1110 mdp->dirty_tx = 0; 1111 1112 memset(mdp->rx_ring, 0, rx_ringsize); 1113 1114 /* build Rx ring buffer */ 1115 for (i = 0; i < mdp->num_rx_ring; i++) { 1116 /* skb */ 1117 mdp->rx_skbuff[i] = NULL; 1118 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1119 mdp->rx_skbuff[i] = skb; 1120 if (skb == NULL) 1121 break; 1122 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, 1123 DMA_FROM_DEVICE); 1124 sh_eth_set_receive_align(skb); 1125 1126 /* RX descriptor */ 1127 rxdesc = &mdp->rx_ring[i]; 1128 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1129 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1130 1131 /* The size of the buffer is 16 byte boundary. */ 1132 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1133 /* Rx descriptor address set */ 1134 if (i == 0) { 1135 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); 1136 if (sh_eth_is_gether(mdp) || 1137 sh_eth_is_rz_fast_ether(mdp)) 1138 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); 1139 } 1140 } 1141 1142 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); 1143 1144 /* Mark the last entry as wrapping the ring. */ 1145 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); 1146 1147 memset(mdp->tx_ring, 0, tx_ringsize); 1148 1149 /* build Tx ring buffer */ 1150 for (i = 0; i < mdp->num_tx_ring; i++) { 1151 mdp->tx_skbuff[i] = NULL; 1152 txdesc = &mdp->tx_ring[i]; 1153 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1154 txdesc->buffer_length = 0; 1155 if (i == 0) { 1156 /* Tx descriptor address set */ 1157 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); 1158 if (sh_eth_is_gether(mdp) || 1159 sh_eth_is_rz_fast_ether(mdp)) 1160 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); 1161 } 1162 } 1163 1164 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1165 } 1166 1167 /* Get skb and descriptor buffer */ 1168 static int sh_eth_ring_init(struct net_device *ndev) 1169 { 1170 struct sh_eth_private *mdp = netdev_priv(ndev); 1171 int rx_ringsize, tx_ringsize, ret = 0; 1172 1173 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the 1174 * card needs room to do 8 byte alignment, +2 so we can reserve 1175 * the first 2 bytes, and +16 gets room for the status word from the 1176 * card. 1177 */ 1178 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : 1179 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); 1180 if (mdp->cd->rpadir) 1181 mdp->rx_buf_sz += NET_IP_ALIGN; 1182 1183 /* Allocate RX and TX skb rings */ 1184 mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring, 1185 sizeof(*mdp->rx_skbuff), GFP_KERNEL); 1186 if (!mdp->rx_skbuff) { 1187 ret = -ENOMEM; 1188 return ret; 1189 } 1190 1191 mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring, 1192 sizeof(*mdp->tx_skbuff), GFP_KERNEL); 1193 if (!mdp->tx_skbuff) { 1194 ret = -ENOMEM; 1195 goto skb_ring_free; 1196 } 1197 1198 /* Allocate all Rx descriptors. */ 1199 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1200 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 1201 GFP_KERNEL); 1202 if (!mdp->rx_ring) { 1203 ret = -ENOMEM; 1204 goto desc_ring_free; 1205 } 1206 1207 mdp->dirty_rx = 0; 1208 1209 /* Allocate all Tx descriptors. */ 1210 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1211 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 1212 GFP_KERNEL); 1213 if (!mdp->tx_ring) { 1214 ret = -ENOMEM; 1215 goto desc_ring_free; 1216 } 1217 return ret; 1218 1219 desc_ring_free: 1220 /* free DMA buffer */ 1221 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma); 1222 1223 skb_ring_free: 1224 /* Free Rx and Tx skb ring buffer */ 1225 sh_eth_ring_free(ndev); 1226 mdp->tx_ring = NULL; 1227 mdp->rx_ring = NULL; 1228 1229 return ret; 1230 } 1231 1232 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp) 1233 { 1234 int ringsize; 1235 1236 if (mdp->rx_ring) { 1237 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1238 dma_free_coherent(NULL, ringsize, mdp->rx_ring, 1239 mdp->rx_desc_dma); 1240 mdp->rx_ring = NULL; 1241 } 1242 1243 if (mdp->tx_ring) { 1244 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1245 dma_free_coherent(NULL, ringsize, mdp->tx_ring, 1246 mdp->tx_desc_dma); 1247 mdp->tx_ring = NULL; 1248 } 1249 } 1250 1251 static int sh_eth_dev_init(struct net_device *ndev, bool start) 1252 { 1253 int ret = 0; 1254 struct sh_eth_private *mdp = netdev_priv(ndev); 1255 u32 val; 1256 1257 /* Soft Reset */ 1258 ret = sh_eth_reset(ndev); 1259 if (ret) 1260 goto out; 1261 1262 if (mdp->cd->rmiimode) 1263 sh_eth_write(ndev, 0x1, RMIIMODE); 1264 1265 /* Descriptor format */ 1266 sh_eth_ring_format(ndev); 1267 if (mdp->cd->rpadir) 1268 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); 1269 1270 /* all sh_eth int mask */ 1271 sh_eth_write(ndev, 0, EESIPR); 1272 1273 #if defined(__LITTLE_ENDIAN) 1274 if (mdp->cd->hw_swap) 1275 sh_eth_write(ndev, EDMR_EL, EDMR); 1276 else 1277 #endif 1278 sh_eth_write(ndev, 0, EDMR); 1279 1280 /* FIFO size set */ 1281 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); 1282 sh_eth_write(ndev, 0, TFTR); 1283 1284 /* Frame recv control */ 1285 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); 1286 1287 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); 1288 1289 if (mdp->cd->bculr) 1290 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ 1291 1292 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); 1293 1294 if (!mdp->cd->no_trimd) 1295 sh_eth_write(ndev, 0, TRIMD); 1296 1297 /* Recv frame limit set register */ 1298 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, 1299 RFLR); 1300 1301 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); 1302 if (start) 1303 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1304 1305 /* PAUSE Prohibition */ 1306 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | 1307 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 1308 1309 sh_eth_write(ndev, val, ECMR); 1310 1311 if (mdp->cd->set_rate) 1312 mdp->cd->set_rate(ndev); 1313 1314 /* E-MAC Status Register clear */ 1315 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); 1316 1317 /* E-MAC Interrupt Enable register */ 1318 if (start) 1319 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); 1320 1321 /* Set MAC address */ 1322 update_mac_address(ndev); 1323 1324 /* mask reset */ 1325 if (mdp->cd->apr) 1326 sh_eth_write(ndev, APR_AP, APR); 1327 if (mdp->cd->mpr) 1328 sh_eth_write(ndev, MPR_MP, MPR); 1329 if (mdp->cd->tpauser) 1330 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); 1331 1332 if (start) { 1333 /* Setting the Rx mode will start the Rx process. */ 1334 sh_eth_write(ndev, EDRRR_R, EDRRR); 1335 1336 netif_start_queue(ndev); 1337 } 1338 1339 out: 1340 return ret; 1341 } 1342 1343 /* free Tx skb function */ 1344 static int sh_eth_txfree(struct net_device *ndev) 1345 { 1346 struct sh_eth_private *mdp = netdev_priv(ndev); 1347 struct sh_eth_txdesc *txdesc; 1348 int free_num = 0; 1349 int entry = 0; 1350 1351 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 1352 entry = mdp->dirty_tx % mdp->num_tx_ring; 1353 txdesc = &mdp->tx_ring[entry]; 1354 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 1355 break; 1356 /* Free the original skb. */ 1357 if (mdp->tx_skbuff[entry]) { 1358 dma_unmap_single(&ndev->dev, txdesc->addr, 1359 txdesc->buffer_length, DMA_TO_DEVICE); 1360 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1361 mdp->tx_skbuff[entry] = NULL; 1362 free_num++; 1363 } 1364 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1365 if (entry >= mdp->num_tx_ring - 1) 1366 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1367 1368 ndev->stats.tx_packets++; 1369 ndev->stats.tx_bytes += txdesc->buffer_length; 1370 } 1371 return free_num; 1372 } 1373 1374 /* Packet receive function */ 1375 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) 1376 { 1377 struct sh_eth_private *mdp = netdev_priv(ndev); 1378 struct sh_eth_rxdesc *rxdesc; 1379 1380 int entry = mdp->cur_rx % mdp->num_rx_ring; 1381 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; 1382 struct sk_buff *skb; 1383 int exceeded = 0; 1384 u16 pkt_len = 0; 1385 u32 desc_status; 1386 1387 rxdesc = &mdp->rx_ring[entry]; 1388 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1389 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1390 pkt_len = rxdesc->frame_length; 1391 1392 if (--boguscnt < 0) 1393 break; 1394 1395 if (*quota <= 0) { 1396 exceeded = 1; 1397 break; 1398 } 1399 (*quota)--; 1400 1401 if (!(desc_status & RDFEND)) 1402 ndev->stats.rx_length_errors++; 1403 1404 /* In case of almost all GETHER/ETHERs, the Receive Frame State 1405 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to 1406 * bit 0. However, in case of the R8A7740, R8A779x, and 1407 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the 1408 * driver needs right shifting by 16. 1409 */ 1410 if (mdp->cd->shift_rd0) 1411 desc_status >>= 16; 1412 1413 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1414 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 1415 ndev->stats.rx_errors++; 1416 if (desc_status & RD_RFS1) 1417 ndev->stats.rx_crc_errors++; 1418 if (desc_status & RD_RFS2) 1419 ndev->stats.rx_frame_errors++; 1420 if (desc_status & RD_RFS3) 1421 ndev->stats.rx_length_errors++; 1422 if (desc_status & RD_RFS4) 1423 ndev->stats.rx_length_errors++; 1424 if (desc_status & RD_RFS6) 1425 ndev->stats.rx_missed_errors++; 1426 if (desc_status & RD_RFS10) 1427 ndev->stats.rx_over_errors++; 1428 } else { 1429 if (!mdp->cd->hw_swap) 1430 sh_eth_soft_swap( 1431 phys_to_virt(ALIGN(rxdesc->addr, 4)), 1432 pkt_len + 2); 1433 skb = mdp->rx_skbuff[entry]; 1434 mdp->rx_skbuff[entry] = NULL; 1435 if (mdp->cd->rpadir) 1436 skb_reserve(skb, NET_IP_ALIGN); 1437 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, 1438 mdp->rx_buf_sz, 1439 DMA_FROM_DEVICE); 1440 skb_put(skb, pkt_len); 1441 skb->protocol = eth_type_trans(skb, ndev); 1442 netif_receive_skb(skb); 1443 ndev->stats.rx_packets++; 1444 ndev->stats.rx_bytes += pkt_len; 1445 } 1446 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); 1447 entry = (++mdp->cur_rx) % mdp->num_rx_ring; 1448 rxdesc = &mdp->rx_ring[entry]; 1449 } 1450 1451 /* Refill the Rx ring buffers. */ 1452 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 1453 entry = mdp->dirty_rx % mdp->num_rx_ring; 1454 rxdesc = &mdp->rx_ring[entry]; 1455 /* The size of the buffer is 16 byte boundary. */ 1456 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1457 1458 if (mdp->rx_skbuff[entry] == NULL) { 1459 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1460 mdp->rx_skbuff[entry] = skb; 1461 if (skb == NULL) 1462 break; /* Better luck next round. */ 1463 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, 1464 DMA_FROM_DEVICE); 1465 sh_eth_set_receive_align(skb); 1466 1467 skb_checksum_none_assert(skb); 1468 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1469 } 1470 if (entry >= mdp->num_rx_ring - 1) 1471 rxdesc->status |= 1472 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 1473 else 1474 rxdesc->status |= 1475 cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1476 } 1477 1478 /* Restart Rx engine if stopped. */ 1479 /* If we don't need to check status, don't. -KDU */ 1480 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { 1481 /* fix the values for the next receiving if RDE is set */ 1482 if (intr_status & EESR_RDE) { 1483 u32 count = (sh_eth_read(ndev, RDFAR) - 1484 sh_eth_read(ndev, RDLAR)) >> 4; 1485 1486 mdp->cur_rx = count; 1487 mdp->dirty_rx = count; 1488 } 1489 sh_eth_write(ndev, EDRRR_R, EDRRR); 1490 } 1491 1492 return exceeded; 1493 } 1494 1495 static void sh_eth_rcv_snd_disable(struct net_device *ndev) 1496 { 1497 /* disable tx and rx */ 1498 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & 1499 ~(ECMR_RE | ECMR_TE), ECMR); 1500 } 1501 1502 static void sh_eth_rcv_snd_enable(struct net_device *ndev) 1503 { 1504 /* enable tx and rx */ 1505 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | 1506 (ECMR_RE | ECMR_TE), ECMR); 1507 } 1508 1509 /* error control function */ 1510 static void sh_eth_error(struct net_device *ndev, int intr_status) 1511 { 1512 struct sh_eth_private *mdp = netdev_priv(ndev); 1513 u32 felic_stat; 1514 u32 link_stat; 1515 u32 mask; 1516 1517 if (intr_status & EESR_ECI) { 1518 felic_stat = sh_eth_read(ndev, ECSR); 1519 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ 1520 if (felic_stat & ECSR_ICD) 1521 ndev->stats.tx_carrier_errors++; 1522 if (felic_stat & ECSR_LCHNG) { 1523 /* Link Changed */ 1524 if (mdp->cd->no_psr || mdp->no_ether_link) { 1525 goto ignore_link; 1526 } else { 1527 link_stat = (sh_eth_read(ndev, PSR)); 1528 if (mdp->ether_link_active_low) 1529 link_stat = ~link_stat; 1530 } 1531 if (!(link_stat & PHY_ST_LINK)) { 1532 sh_eth_rcv_snd_disable(ndev); 1533 } else { 1534 /* Link Up */ 1535 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & 1536 ~DMAC_M_ECI, EESIPR); 1537 /* clear int */ 1538 sh_eth_write(ndev, sh_eth_read(ndev, ECSR), 1539 ECSR); 1540 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | 1541 DMAC_M_ECI, EESIPR); 1542 /* enable tx and rx */ 1543 sh_eth_rcv_snd_enable(ndev); 1544 } 1545 } 1546 } 1547 1548 ignore_link: 1549 if (intr_status & EESR_TWB) { 1550 /* Unused write back interrupt */ 1551 if (intr_status & EESR_TABT) { /* Transmit Abort int */ 1552 ndev->stats.tx_aborted_errors++; 1553 if (netif_msg_tx_err(mdp)) 1554 dev_err(&ndev->dev, "Transmit Abort\n"); 1555 } 1556 } 1557 1558 if (intr_status & EESR_RABT) { 1559 /* Receive Abort int */ 1560 if (intr_status & EESR_RFRMER) { 1561 /* Receive Frame Overflow int */ 1562 ndev->stats.rx_frame_errors++; 1563 if (netif_msg_rx_err(mdp)) 1564 dev_err(&ndev->dev, "Receive Abort\n"); 1565 } 1566 } 1567 1568 if (intr_status & EESR_TDE) { 1569 /* Transmit Descriptor Empty int */ 1570 ndev->stats.tx_fifo_errors++; 1571 if (netif_msg_tx_err(mdp)) 1572 dev_err(&ndev->dev, "Transmit Descriptor Empty\n"); 1573 } 1574 1575 if (intr_status & EESR_TFE) { 1576 /* FIFO under flow */ 1577 ndev->stats.tx_fifo_errors++; 1578 if (netif_msg_tx_err(mdp)) 1579 dev_err(&ndev->dev, "Transmit FIFO Under flow\n"); 1580 } 1581 1582 if (intr_status & EESR_RDE) { 1583 /* Receive Descriptor Empty int */ 1584 ndev->stats.rx_over_errors++; 1585 1586 if (netif_msg_rx_err(mdp)) 1587 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 1588 } 1589 1590 if (intr_status & EESR_RFE) { 1591 /* Receive FIFO Overflow int */ 1592 ndev->stats.rx_fifo_errors++; 1593 if (netif_msg_rx_err(mdp)) 1594 dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 1595 } 1596 1597 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1598 /* Address Error */ 1599 ndev->stats.tx_fifo_errors++; 1600 if (netif_msg_tx_err(mdp)) 1601 dev_err(&ndev->dev, "Address Error\n"); 1602 } 1603 1604 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 1605 if (mdp->cd->no_ade) 1606 mask &= ~EESR_ADE; 1607 if (intr_status & mask) { 1608 /* Tx error */ 1609 u32 edtrr = sh_eth_read(ndev, EDTRR); 1610 1611 /* dmesg */ 1612 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", 1613 intr_status, mdp->cur_tx, mdp->dirty_tx, 1614 (u32)ndev->state, edtrr); 1615 /* dirty buffer free */ 1616 sh_eth_txfree(ndev); 1617 1618 /* SH7712 BUG */ 1619 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { 1620 /* tx dma start */ 1621 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 1622 } 1623 /* wakeup */ 1624 netif_wake_queue(ndev); 1625 } 1626 } 1627 1628 static irqreturn_t sh_eth_interrupt(int irq, void *netdev) 1629 { 1630 struct net_device *ndev = netdev; 1631 struct sh_eth_private *mdp = netdev_priv(ndev); 1632 struct sh_eth_cpu_data *cd = mdp->cd; 1633 irqreturn_t ret = IRQ_NONE; 1634 unsigned long intr_status, intr_enable; 1635 1636 spin_lock(&mdp->lock); 1637 1638 /* Get interrupt status */ 1639 intr_status = sh_eth_read(ndev, EESR); 1640 /* Mask it with the interrupt mask, forcing ECI interrupt to be always 1641 * enabled since it's the one that comes thru regardless of the mask, 1642 * and we need to fully handle it in sh_eth_error() in order to quench 1643 * it as it doesn't get cleared by just writing 1 to the ECI bit... 1644 */ 1645 intr_enable = sh_eth_read(ndev, EESIPR); 1646 intr_status &= intr_enable | DMAC_M_ECI; 1647 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) 1648 ret = IRQ_HANDLED; 1649 else 1650 goto other_irq; 1651 1652 if (intr_status & EESR_RX_CHECK) { 1653 if (napi_schedule_prep(&mdp->napi)) { 1654 /* Mask Rx interrupts */ 1655 sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK, 1656 EESIPR); 1657 __napi_schedule(&mdp->napi); 1658 } else { 1659 dev_warn(&ndev->dev, 1660 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n", 1661 intr_status, intr_enable); 1662 } 1663 } 1664 1665 /* Tx Check */ 1666 if (intr_status & cd->tx_check) { 1667 /* Clear Tx interrupts */ 1668 sh_eth_write(ndev, intr_status & cd->tx_check, EESR); 1669 1670 sh_eth_txfree(ndev); 1671 netif_wake_queue(ndev); 1672 } 1673 1674 if (intr_status & cd->eesr_err_check) { 1675 /* Clear error interrupts */ 1676 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR); 1677 1678 sh_eth_error(ndev, intr_status); 1679 } 1680 1681 other_irq: 1682 spin_unlock(&mdp->lock); 1683 1684 return ret; 1685 } 1686 1687 static int sh_eth_poll(struct napi_struct *napi, int budget) 1688 { 1689 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private, 1690 napi); 1691 struct net_device *ndev = napi->dev; 1692 int quota = budget; 1693 unsigned long intr_status; 1694 1695 for (;;) { 1696 intr_status = sh_eth_read(ndev, EESR); 1697 if (!(intr_status & EESR_RX_CHECK)) 1698 break; 1699 /* Clear Rx interrupts */ 1700 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR); 1701 1702 if (sh_eth_rx(ndev, intr_status, "a)) 1703 goto out; 1704 } 1705 1706 napi_complete(napi); 1707 1708 /* Reenable Rx interrupts */ 1709 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1710 out: 1711 return budget - quota; 1712 } 1713 1714 /* PHY state control function */ 1715 static void sh_eth_adjust_link(struct net_device *ndev) 1716 { 1717 struct sh_eth_private *mdp = netdev_priv(ndev); 1718 struct phy_device *phydev = mdp->phydev; 1719 int new_state = 0; 1720 1721 if (phydev->link) { 1722 if (phydev->duplex != mdp->duplex) { 1723 new_state = 1; 1724 mdp->duplex = phydev->duplex; 1725 if (mdp->cd->set_duplex) 1726 mdp->cd->set_duplex(ndev); 1727 } 1728 1729 if (phydev->speed != mdp->speed) { 1730 new_state = 1; 1731 mdp->speed = phydev->speed; 1732 if (mdp->cd->set_rate) 1733 mdp->cd->set_rate(ndev); 1734 } 1735 if (!mdp->link) { 1736 sh_eth_write(ndev, 1737 sh_eth_read(ndev, ECMR) & ~ECMR_TXF, 1738 ECMR); 1739 new_state = 1; 1740 mdp->link = phydev->link; 1741 if (mdp->cd->no_psr || mdp->no_ether_link) 1742 sh_eth_rcv_snd_enable(ndev); 1743 } 1744 } else if (mdp->link) { 1745 new_state = 1; 1746 mdp->link = 0; 1747 mdp->speed = 0; 1748 mdp->duplex = -1; 1749 if (mdp->cd->no_psr || mdp->no_ether_link) 1750 sh_eth_rcv_snd_disable(ndev); 1751 } 1752 1753 if (new_state && netif_msg_link(mdp)) 1754 phy_print_status(phydev); 1755 } 1756 1757 /* PHY init function */ 1758 static int sh_eth_phy_init(struct net_device *ndev) 1759 { 1760 struct sh_eth_private *mdp = netdev_priv(ndev); 1761 char phy_id[MII_BUS_ID_SIZE + 3]; 1762 struct phy_device *phydev = NULL; 1763 1764 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 1765 mdp->mii_bus->id, mdp->phy_id); 1766 1767 mdp->link = 0; 1768 mdp->speed = 0; 1769 mdp->duplex = -1; 1770 1771 /* Try connect to PHY */ 1772 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, 1773 mdp->phy_interface); 1774 if (IS_ERR(phydev)) { 1775 dev_err(&ndev->dev, "phy_connect failed\n"); 1776 return PTR_ERR(phydev); 1777 } 1778 1779 dev_info(&ndev->dev, "attached PHY %d (IRQ %d) to driver %s\n", 1780 phydev->addr, phydev->irq, phydev->drv->name); 1781 1782 mdp->phydev = phydev; 1783 1784 return 0; 1785 } 1786 1787 /* PHY control start function */ 1788 static int sh_eth_phy_start(struct net_device *ndev) 1789 { 1790 struct sh_eth_private *mdp = netdev_priv(ndev); 1791 int ret; 1792 1793 ret = sh_eth_phy_init(ndev); 1794 if (ret) 1795 return ret; 1796 1797 phy_start(mdp->phydev); 1798 1799 return 0; 1800 } 1801 1802 static int sh_eth_get_settings(struct net_device *ndev, 1803 struct ethtool_cmd *ecmd) 1804 { 1805 struct sh_eth_private *mdp = netdev_priv(ndev); 1806 unsigned long flags; 1807 int ret; 1808 1809 spin_lock_irqsave(&mdp->lock, flags); 1810 ret = phy_ethtool_gset(mdp->phydev, ecmd); 1811 spin_unlock_irqrestore(&mdp->lock, flags); 1812 1813 return ret; 1814 } 1815 1816 static int sh_eth_set_settings(struct net_device *ndev, 1817 struct ethtool_cmd *ecmd) 1818 { 1819 struct sh_eth_private *mdp = netdev_priv(ndev); 1820 unsigned long flags; 1821 int ret; 1822 1823 spin_lock_irqsave(&mdp->lock, flags); 1824 1825 /* disable tx and rx */ 1826 sh_eth_rcv_snd_disable(ndev); 1827 1828 ret = phy_ethtool_sset(mdp->phydev, ecmd); 1829 if (ret) 1830 goto error_exit; 1831 1832 if (ecmd->duplex == DUPLEX_FULL) 1833 mdp->duplex = 1; 1834 else 1835 mdp->duplex = 0; 1836 1837 if (mdp->cd->set_duplex) 1838 mdp->cd->set_duplex(ndev); 1839 1840 error_exit: 1841 mdelay(1); 1842 1843 /* enable tx and rx */ 1844 sh_eth_rcv_snd_enable(ndev); 1845 1846 spin_unlock_irqrestore(&mdp->lock, flags); 1847 1848 return ret; 1849 } 1850 1851 static int sh_eth_nway_reset(struct net_device *ndev) 1852 { 1853 struct sh_eth_private *mdp = netdev_priv(ndev); 1854 unsigned long flags; 1855 int ret; 1856 1857 spin_lock_irqsave(&mdp->lock, flags); 1858 ret = phy_start_aneg(mdp->phydev); 1859 spin_unlock_irqrestore(&mdp->lock, flags); 1860 1861 return ret; 1862 } 1863 1864 static u32 sh_eth_get_msglevel(struct net_device *ndev) 1865 { 1866 struct sh_eth_private *mdp = netdev_priv(ndev); 1867 return mdp->msg_enable; 1868 } 1869 1870 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value) 1871 { 1872 struct sh_eth_private *mdp = netdev_priv(ndev); 1873 mdp->msg_enable = value; 1874 } 1875 1876 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = { 1877 "rx_current", "tx_current", 1878 "rx_dirty", "tx_dirty", 1879 }; 1880 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats) 1881 1882 static int sh_eth_get_sset_count(struct net_device *netdev, int sset) 1883 { 1884 switch (sset) { 1885 case ETH_SS_STATS: 1886 return SH_ETH_STATS_LEN; 1887 default: 1888 return -EOPNOTSUPP; 1889 } 1890 } 1891 1892 static void sh_eth_get_ethtool_stats(struct net_device *ndev, 1893 struct ethtool_stats *stats, u64 *data) 1894 { 1895 struct sh_eth_private *mdp = netdev_priv(ndev); 1896 int i = 0; 1897 1898 /* device-specific stats */ 1899 data[i++] = mdp->cur_rx; 1900 data[i++] = mdp->cur_tx; 1901 data[i++] = mdp->dirty_rx; 1902 data[i++] = mdp->dirty_tx; 1903 } 1904 1905 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 1906 { 1907 switch (stringset) { 1908 case ETH_SS_STATS: 1909 memcpy(data, *sh_eth_gstrings_stats, 1910 sizeof(sh_eth_gstrings_stats)); 1911 break; 1912 } 1913 } 1914 1915 static void sh_eth_get_ringparam(struct net_device *ndev, 1916 struct ethtool_ringparam *ring) 1917 { 1918 struct sh_eth_private *mdp = netdev_priv(ndev); 1919 1920 ring->rx_max_pending = RX_RING_MAX; 1921 ring->tx_max_pending = TX_RING_MAX; 1922 ring->rx_pending = mdp->num_rx_ring; 1923 ring->tx_pending = mdp->num_tx_ring; 1924 } 1925 1926 static int sh_eth_set_ringparam(struct net_device *ndev, 1927 struct ethtool_ringparam *ring) 1928 { 1929 struct sh_eth_private *mdp = netdev_priv(ndev); 1930 int ret; 1931 1932 if (ring->tx_pending > TX_RING_MAX || 1933 ring->rx_pending > RX_RING_MAX || 1934 ring->tx_pending < TX_RING_MIN || 1935 ring->rx_pending < RX_RING_MIN) 1936 return -EINVAL; 1937 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 1938 return -EINVAL; 1939 1940 if (netif_running(ndev)) { 1941 netif_tx_disable(ndev); 1942 /* Disable interrupts by clearing the interrupt mask. */ 1943 sh_eth_write(ndev, 0x0000, EESIPR); 1944 /* Stop the chip's Tx and Rx processes. */ 1945 sh_eth_write(ndev, 0, EDTRR); 1946 sh_eth_write(ndev, 0, EDRRR); 1947 synchronize_irq(ndev->irq); 1948 } 1949 1950 /* Free all the skbuffs in the Rx queue. */ 1951 sh_eth_ring_free(ndev); 1952 /* Free DMA buffer */ 1953 sh_eth_free_dma_buffer(mdp); 1954 1955 /* Set new parameters */ 1956 mdp->num_rx_ring = ring->rx_pending; 1957 mdp->num_tx_ring = ring->tx_pending; 1958 1959 ret = sh_eth_ring_init(ndev); 1960 if (ret < 0) { 1961 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__); 1962 return ret; 1963 } 1964 ret = sh_eth_dev_init(ndev, false); 1965 if (ret < 0) { 1966 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__); 1967 return ret; 1968 } 1969 1970 if (netif_running(ndev)) { 1971 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1972 /* Setting the Rx mode will start the Rx process. */ 1973 sh_eth_write(ndev, EDRRR_R, EDRRR); 1974 netif_wake_queue(ndev); 1975 } 1976 1977 return 0; 1978 } 1979 1980 static const struct ethtool_ops sh_eth_ethtool_ops = { 1981 .get_settings = sh_eth_get_settings, 1982 .set_settings = sh_eth_set_settings, 1983 .nway_reset = sh_eth_nway_reset, 1984 .get_msglevel = sh_eth_get_msglevel, 1985 .set_msglevel = sh_eth_set_msglevel, 1986 .get_link = ethtool_op_get_link, 1987 .get_strings = sh_eth_get_strings, 1988 .get_ethtool_stats = sh_eth_get_ethtool_stats, 1989 .get_sset_count = sh_eth_get_sset_count, 1990 .get_ringparam = sh_eth_get_ringparam, 1991 .set_ringparam = sh_eth_set_ringparam, 1992 }; 1993 1994 /* network device open function */ 1995 static int sh_eth_open(struct net_device *ndev) 1996 { 1997 int ret = 0; 1998 struct sh_eth_private *mdp = netdev_priv(ndev); 1999 2000 pm_runtime_get_sync(&mdp->pdev->dev); 2001 2002 napi_enable(&mdp->napi); 2003 2004 ret = request_irq(ndev->irq, sh_eth_interrupt, 2005 mdp->cd->irq_flags, ndev->name, ndev); 2006 if (ret) { 2007 dev_err(&ndev->dev, "Can not assign IRQ number\n"); 2008 goto out_napi_off; 2009 } 2010 2011 /* Descriptor set */ 2012 ret = sh_eth_ring_init(ndev); 2013 if (ret) 2014 goto out_free_irq; 2015 2016 /* device init */ 2017 ret = sh_eth_dev_init(ndev, true); 2018 if (ret) 2019 goto out_free_irq; 2020 2021 /* PHY control start*/ 2022 ret = sh_eth_phy_start(ndev); 2023 if (ret) 2024 goto out_free_irq; 2025 2026 return ret; 2027 2028 out_free_irq: 2029 free_irq(ndev->irq, ndev); 2030 out_napi_off: 2031 napi_disable(&mdp->napi); 2032 pm_runtime_put_sync(&mdp->pdev->dev); 2033 return ret; 2034 } 2035 2036 /* Timeout function */ 2037 static void sh_eth_tx_timeout(struct net_device *ndev) 2038 { 2039 struct sh_eth_private *mdp = netdev_priv(ndev); 2040 struct sh_eth_rxdesc *rxdesc; 2041 int i; 2042 2043 netif_stop_queue(ndev); 2044 2045 if (netif_msg_timer(mdp)) { 2046 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x, resetting...\n", 2047 ndev->name, (int)sh_eth_read(ndev, EESR)); 2048 } 2049 2050 /* tx_errors count up */ 2051 ndev->stats.tx_errors++; 2052 2053 /* Free all the skbuffs in the Rx queue. */ 2054 for (i = 0; i < mdp->num_rx_ring; i++) { 2055 rxdesc = &mdp->rx_ring[i]; 2056 rxdesc->status = 0; 2057 rxdesc->addr = 0xBADF00D0; 2058 if (mdp->rx_skbuff[i]) 2059 dev_kfree_skb(mdp->rx_skbuff[i]); 2060 mdp->rx_skbuff[i] = NULL; 2061 } 2062 for (i = 0; i < mdp->num_tx_ring; i++) { 2063 if (mdp->tx_skbuff[i]) 2064 dev_kfree_skb(mdp->tx_skbuff[i]); 2065 mdp->tx_skbuff[i] = NULL; 2066 } 2067 2068 /* device init */ 2069 sh_eth_dev_init(ndev, true); 2070 } 2071 2072 /* Packet transmit function */ 2073 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) 2074 { 2075 struct sh_eth_private *mdp = netdev_priv(ndev); 2076 struct sh_eth_txdesc *txdesc; 2077 u32 entry; 2078 unsigned long flags; 2079 2080 spin_lock_irqsave(&mdp->lock, flags); 2081 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { 2082 if (!sh_eth_txfree(ndev)) { 2083 if (netif_msg_tx_queued(mdp)) 2084 dev_warn(&ndev->dev, "TxFD exhausted.\n"); 2085 netif_stop_queue(ndev); 2086 spin_unlock_irqrestore(&mdp->lock, flags); 2087 return NETDEV_TX_BUSY; 2088 } 2089 } 2090 spin_unlock_irqrestore(&mdp->lock, flags); 2091 2092 entry = mdp->cur_tx % mdp->num_tx_ring; 2093 mdp->tx_skbuff[entry] = skb; 2094 txdesc = &mdp->tx_ring[entry]; 2095 /* soft swap. */ 2096 if (!mdp->cd->hw_swap) 2097 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), 2098 skb->len + 2); 2099 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2100 DMA_TO_DEVICE); 2101 if (skb->len < ETHERSMALL) 2102 txdesc->buffer_length = ETHERSMALL; 2103 else 2104 txdesc->buffer_length = skb->len; 2105 2106 if (entry >= mdp->num_tx_ring - 1) 2107 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 2108 else 2109 txdesc->status |= cpu_to_edmac(mdp, TD_TACT); 2110 2111 mdp->cur_tx++; 2112 2113 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) 2114 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 2115 2116 return NETDEV_TX_OK; 2117 } 2118 2119 /* device close function */ 2120 static int sh_eth_close(struct net_device *ndev) 2121 { 2122 struct sh_eth_private *mdp = netdev_priv(ndev); 2123 2124 netif_stop_queue(ndev); 2125 2126 /* Disable interrupts by clearing the interrupt mask. */ 2127 sh_eth_write(ndev, 0x0000, EESIPR); 2128 2129 /* Stop the chip's Tx and Rx processes. */ 2130 sh_eth_write(ndev, 0, EDTRR); 2131 sh_eth_write(ndev, 0, EDRRR); 2132 2133 /* PHY Disconnect */ 2134 if (mdp->phydev) { 2135 phy_stop(mdp->phydev); 2136 phy_disconnect(mdp->phydev); 2137 } 2138 2139 free_irq(ndev->irq, ndev); 2140 2141 napi_disable(&mdp->napi); 2142 2143 /* Free all the skbuffs in the Rx queue. */ 2144 sh_eth_ring_free(ndev); 2145 2146 /* free DMA buffer */ 2147 sh_eth_free_dma_buffer(mdp); 2148 2149 pm_runtime_put_sync(&mdp->pdev->dev); 2150 2151 return 0; 2152 } 2153 2154 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) 2155 { 2156 struct sh_eth_private *mdp = netdev_priv(ndev); 2157 2158 if (sh_eth_is_rz_fast_ether(mdp)) 2159 return &ndev->stats; 2160 2161 pm_runtime_get_sync(&mdp->pdev->dev); 2162 2163 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); 2164 sh_eth_write(ndev, 0, TROCR); /* (write clear) */ 2165 ndev->stats.collisions += sh_eth_read(ndev, CDCR); 2166 sh_eth_write(ndev, 0, CDCR); /* (write clear) */ 2167 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); 2168 sh_eth_write(ndev, 0, LCCR); /* (write clear) */ 2169 if (sh_eth_is_gether(mdp)) { 2170 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); 2171 sh_eth_write(ndev, 0, CERCR); /* (write clear) */ 2172 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); 2173 sh_eth_write(ndev, 0, CEECR); /* (write clear) */ 2174 } else { 2175 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); 2176 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ 2177 } 2178 pm_runtime_put_sync(&mdp->pdev->dev); 2179 2180 return &ndev->stats; 2181 } 2182 2183 /* ioctl to device function */ 2184 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 2185 { 2186 struct sh_eth_private *mdp = netdev_priv(ndev); 2187 struct phy_device *phydev = mdp->phydev; 2188 2189 if (!netif_running(ndev)) 2190 return -EINVAL; 2191 2192 if (!phydev) 2193 return -ENODEV; 2194 2195 return phy_mii_ioctl(phydev, rq, cmd); 2196 } 2197 2198 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ 2199 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, 2200 int entry) 2201 { 2202 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4); 2203 } 2204 2205 static u32 sh_eth_tsu_get_post_mask(int entry) 2206 { 2207 return 0x0f << (28 - ((entry % 8) * 4)); 2208 } 2209 2210 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) 2211 { 2212 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); 2213 } 2214 2215 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev, 2216 int entry) 2217 { 2218 struct sh_eth_private *mdp = netdev_priv(ndev); 2219 u32 tmp; 2220 void *reg_offset; 2221 2222 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 2223 tmp = ioread32(reg_offset); 2224 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset); 2225 } 2226 2227 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev, 2228 int entry) 2229 { 2230 struct sh_eth_private *mdp = netdev_priv(ndev); 2231 u32 post_mask, ref_mask, tmp; 2232 void *reg_offset; 2233 2234 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 2235 post_mask = sh_eth_tsu_get_post_mask(entry); 2236 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; 2237 2238 tmp = ioread32(reg_offset); 2239 iowrite32(tmp & ~post_mask, reg_offset); 2240 2241 /* If other port enables, the function returns "true" */ 2242 return tmp & ref_mask; 2243 } 2244 2245 static int sh_eth_tsu_busy(struct net_device *ndev) 2246 { 2247 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100; 2248 struct sh_eth_private *mdp = netdev_priv(ndev); 2249 2250 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { 2251 udelay(10); 2252 timeout--; 2253 if (timeout <= 0) { 2254 dev_err(&ndev->dev, "%s: timeout\n", __func__); 2255 return -ETIMEDOUT; 2256 } 2257 } 2258 2259 return 0; 2260 } 2261 2262 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg, 2263 const u8 *addr) 2264 { 2265 u32 val; 2266 2267 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]; 2268 iowrite32(val, reg); 2269 if (sh_eth_tsu_busy(ndev) < 0) 2270 return -EBUSY; 2271 2272 val = addr[4] << 8 | addr[5]; 2273 iowrite32(val, reg + 4); 2274 if (sh_eth_tsu_busy(ndev) < 0) 2275 return -EBUSY; 2276 2277 return 0; 2278 } 2279 2280 static void sh_eth_tsu_read_entry(void *reg, u8 *addr) 2281 { 2282 u32 val; 2283 2284 val = ioread32(reg); 2285 addr[0] = (val >> 24) & 0xff; 2286 addr[1] = (val >> 16) & 0xff; 2287 addr[2] = (val >> 8) & 0xff; 2288 addr[3] = val & 0xff; 2289 val = ioread32(reg + 4); 2290 addr[4] = (val >> 8) & 0xff; 2291 addr[5] = val & 0xff; 2292 } 2293 2294 2295 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr) 2296 { 2297 struct sh_eth_private *mdp = netdev_priv(ndev); 2298 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2299 int i; 2300 u8 c_addr[ETH_ALEN]; 2301 2302 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 2303 sh_eth_tsu_read_entry(reg_offset, c_addr); 2304 if (ether_addr_equal(addr, c_addr)) 2305 return i; 2306 } 2307 2308 return -ENOENT; 2309 } 2310 2311 static int sh_eth_tsu_find_empty(struct net_device *ndev) 2312 { 2313 u8 blank[ETH_ALEN]; 2314 int entry; 2315 2316 memset(blank, 0, sizeof(blank)); 2317 entry = sh_eth_tsu_find_entry(ndev, blank); 2318 return (entry < 0) ? -ENOMEM : entry; 2319 } 2320 2321 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, 2322 int entry) 2323 { 2324 struct sh_eth_private *mdp = netdev_priv(ndev); 2325 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2326 int ret; 2327 u8 blank[ETH_ALEN]; 2328 2329 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & 2330 ~(1 << (31 - entry)), TSU_TEN); 2331 2332 memset(blank, 0, sizeof(blank)); 2333 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank); 2334 if (ret < 0) 2335 return ret; 2336 return 0; 2337 } 2338 2339 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr) 2340 { 2341 struct sh_eth_private *mdp = netdev_priv(ndev); 2342 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2343 int i, ret; 2344 2345 if (!mdp->cd->tsu) 2346 return 0; 2347 2348 i = sh_eth_tsu_find_entry(ndev, addr); 2349 if (i < 0) { 2350 /* No entry found, create one */ 2351 i = sh_eth_tsu_find_empty(ndev); 2352 if (i < 0) 2353 return -ENOMEM; 2354 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr); 2355 if (ret < 0) 2356 return ret; 2357 2358 /* Enable the entry */ 2359 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | 2360 (1 << (31 - i)), TSU_TEN); 2361 } 2362 2363 /* Entry found or created, enable POST */ 2364 sh_eth_tsu_enable_cam_entry_post(ndev, i); 2365 2366 return 0; 2367 } 2368 2369 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr) 2370 { 2371 struct sh_eth_private *mdp = netdev_priv(ndev); 2372 int i, ret; 2373 2374 if (!mdp->cd->tsu) 2375 return 0; 2376 2377 i = sh_eth_tsu_find_entry(ndev, addr); 2378 if (i) { 2379 /* Entry found */ 2380 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 2381 goto done; 2382 2383 /* Disable the entry if both ports was disabled */ 2384 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 2385 if (ret < 0) 2386 return ret; 2387 } 2388 done: 2389 return 0; 2390 } 2391 2392 static int sh_eth_tsu_purge_all(struct net_device *ndev) 2393 { 2394 struct sh_eth_private *mdp = netdev_priv(ndev); 2395 int i, ret; 2396 2397 if (unlikely(!mdp->cd->tsu)) 2398 return 0; 2399 2400 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { 2401 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 2402 continue; 2403 2404 /* Disable the entry if both ports was disabled */ 2405 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 2406 if (ret < 0) 2407 return ret; 2408 } 2409 2410 return 0; 2411 } 2412 2413 static void sh_eth_tsu_purge_mcast(struct net_device *ndev) 2414 { 2415 struct sh_eth_private *mdp = netdev_priv(ndev); 2416 u8 addr[ETH_ALEN]; 2417 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2418 int i; 2419 2420 if (unlikely(!mdp->cd->tsu)) 2421 return; 2422 2423 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 2424 sh_eth_tsu_read_entry(reg_offset, addr); 2425 if (is_multicast_ether_addr(addr)) 2426 sh_eth_tsu_del_entry(ndev, addr); 2427 } 2428 } 2429 2430 /* Multicast reception directions set */ 2431 static void sh_eth_set_multicast_list(struct net_device *ndev) 2432 { 2433 struct sh_eth_private *mdp = netdev_priv(ndev); 2434 u32 ecmr_bits; 2435 int mcast_all = 0; 2436 unsigned long flags; 2437 2438 spin_lock_irqsave(&mdp->lock, flags); 2439 /* Initial condition is MCT = 1, PRM = 0. 2440 * Depending on ndev->flags, set PRM or clear MCT 2441 */ 2442 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT; 2443 2444 if (!(ndev->flags & IFF_MULTICAST)) { 2445 sh_eth_tsu_purge_mcast(ndev); 2446 mcast_all = 1; 2447 } 2448 if (ndev->flags & IFF_ALLMULTI) { 2449 sh_eth_tsu_purge_mcast(ndev); 2450 ecmr_bits &= ~ECMR_MCT; 2451 mcast_all = 1; 2452 } 2453 2454 if (ndev->flags & IFF_PROMISC) { 2455 sh_eth_tsu_purge_all(ndev); 2456 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM; 2457 } else if (mdp->cd->tsu) { 2458 struct netdev_hw_addr *ha; 2459 netdev_for_each_mc_addr(ha, ndev) { 2460 if (mcast_all && is_multicast_ether_addr(ha->addr)) 2461 continue; 2462 2463 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) { 2464 if (!mcast_all) { 2465 sh_eth_tsu_purge_mcast(ndev); 2466 ecmr_bits &= ~ECMR_MCT; 2467 mcast_all = 1; 2468 } 2469 } 2470 } 2471 } else { 2472 /* Normal, unicast/broadcast-only mode. */ 2473 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT; 2474 } 2475 2476 /* update the ethernet mode */ 2477 sh_eth_write(ndev, ecmr_bits, ECMR); 2478 2479 spin_unlock_irqrestore(&mdp->lock, flags); 2480 } 2481 2482 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) 2483 { 2484 if (!mdp->port) 2485 return TSU_VTAG0; 2486 else 2487 return TSU_VTAG1; 2488 } 2489 2490 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, 2491 __be16 proto, u16 vid) 2492 { 2493 struct sh_eth_private *mdp = netdev_priv(ndev); 2494 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2495 2496 if (unlikely(!mdp->cd->tsu)) 2497 return -EPERM; 2498 2499 /* No filtering if vid = 0 */ 2500 if (!vid) 2501 return 0; 2502 2503 mdp->vlan_num_ids++; 2504 2505 /* The controller has one VLAN tag HW filter. So, if the filter is 2506 * already enabled, the driver disables it and the filte 2507 */ 2508 if (mdp->vlan_num_ids > 1) { 2509 /* disable VLAN filter */ 2510 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 2511 return 0; 2512 } 2513 2514 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), 2515 vtag_reg_index); 2516 2517 return 0; 2518 } 2519 2520 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, 2521 __be16 proto, u16 vid) 2522 { 2523 struct sh_eth_private *mdp = netdev_priv(ndev); 2524 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2525 2526 if (unlikely(!mdp->cd->tsu)) 2527 return -EPERM; 2528 2529 /* No filtering if vid = 0 */ 2530 if (!vid) 2531 return 0; 2532 2533 mdp->vlan_num_ids--; 2534 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 2535 2536 return 0; 2537 } 2538 2539 /* SuperH's TSU register init function */ 2540 static void sh_eth_tsu_init(struct sh_eth_private *mdp) 2541 { 2542 if (sh_eth_is_rz_fast_ether(mdp)) { 2543 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ 2544 return; 2545 } 2546 2547 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ 2548 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ 2549 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ 2550 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); 2551 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); 2552 sh_eth_tsu_write(mdp, 0, TSU_PRISL0); 2553 sh_eth_tsu_write(mdp, 0, TSU_PRISL1); 2554 sh_eth_tsu_write(mdp, 0, TSU_FWSL0); 2555 sh_eth_tsu_write(mdp, 0, TSU_FWSL1); 2556 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); 2557 if (sh_eth_is_gether(mdp)) { 2558 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ 2559 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ 2560 } else { 2561 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ 2562 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ 2563 } 2564 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ 2565 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ 2566 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ 2567 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ 2568 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ 2569 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ 2570 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ 2571 } 2572 2573 /* MDIO bus release function */ 2574 static int sh_mdio_release(struct net_device *ndev) 2575 { 2576 struct mii_bus *bus = dev_get_drvdata(&ndev->dev); 2577 2578 /* unregister mdio bus */ 2579 mdiobus_unregister(bus); 2580 2581 /* remove mdio bus info from net_device */ 2582 dev_set_drvdata(&ndev->dev, NULL); 2583 2584 /* free bitbang info */ 2585 free_mdio_bitbang(bus); 2586 2587 return 0; 2588 } 2589 2590 /* MDIO bus init function */ 2591 static int sh_mdio_init(struct net_device *ndev, int id, 2592 struct sh_eth_plat_data *pd) 2593 { 2594 int ret, i; 2595 struct bb_info *bitbang; 2596 struct sh_eth_private *mdp = netdev_priv(ndev); 2597 2598 /* create bit control struct for PHY */ 2599 bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info), 2600 GFP_KERNEL); 2601 if (!bitbang) { 2602 ret = -ENOMEM; 2603 goto out; 2604 } 2605 2606 /* bitbang init */ 2607 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; 2608 bitbang->set_gate = pd->set_mdio_gate; 2609 bitbang->mdi_msk = PIR_MDI; 2610 bitbang->mdo_msk = PIR_MDO; 2611 bitbang->mmd_msk = PIR_MMD; 2612 bitbang->mdc_msk = PIR_MDC; 2613 bitbang->ctrl.ops = &bb_ops; 2614 2615 /* MII controller setting */ 2616 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 2617 if (!mdp->mii_bus) { 2618 ret = -ENOMEM; 2619 goto out; 2620 } 2621 2622 /* Hook up MII support for ethtool */ 2623 mdp->mii_bus->name = "sh_mii"; 2624 mdp->mii_bus->parent = &ndev->dev; 2625 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2626 mdp->pdev->name, id); 2627 2628 /* PHY IRQ */ 2629 mdp->mii_bus->irq = devm_kzalloc(&ndev->dev, 2630 sizeof(int) * PHY_MAX_ADDR, 2631 GFP_KERNEL); 2632 if (!mdp->mii_bus->irq) { 2633 ret = -ENOMEM; 2634 goto out_free_bus; 2635 } 2636 2637 for (i = 0; i < PHY_MAX_ADDR; i++) 2638 mdp->mii_bus->irq[i] = PHY_POLL; 2639 if (pd->phy_irq > 0) 2640 mdp->mii_bus->irq[pd->phy] = pd->phy_irq; 2641 2642 /* register mdio bus */ 2643 ret = mdiobus_register(mdp->mii_bus); 2644 if (ret) 2645 goto out_free_bus; 2646 2647 dev_set_drvdata(&ndev->dev, mdp->mii_bus); 2648 2649 return 0; 2650 2651 out_free_bus: 2652 free_mdio_bitbang(mdp->mii_bus); 2653 2654 out: 2655 return ret; 2656 } 2657 2658 static const u16 *sh_eth_get_register_offset(int register_type) 2659 { 2660 const u16 *reg_offset = NULL; 2661 2662 switch (register_type) { 2663 case SH_ETH_REG_GIGABIT: 2664 reg_offset = sh_eth_offset_gigabit; 2665 break; 2666 case SH_ETH_REG_FAST_RZ: 2667 reg_offset = sh_eth_offset_fast_rz; 2668 break; 2669 case SH_ETH_REG_FAST_RCAR: 2670 reg_offset = sh_eth_offset_fast_rcar; 2671 break; 2672 case SH_ETH_REG_FAST_SH4: 2673 reg_offset = sh_eth_offset_fast_sh4; 2674 break; 2675 case SH_ETH_REG_FAST_SH3_SH2: 2676 reg_offset = sh_eth_offset_fast_sh3_sh2; 2677 break; 2678 default: 2679 pr_err("Unknown register type (%d)\n", register_type); 2680 break; 2681 } 2682 2683 return reg_offset; 2684 } 2685 2686 static const struct net_device_ops sh_eth_netdev_ops = { 2687 .ndo_open = sh_eth_open, 2688 .ndo_stop = sh_eth_close, 2689 .ndo_start_xmit = sh_eth_start_xmit, 2690 .ndo_get_stats = sh_eth_get_stats, 2691 .ndo_tx_timeout = sh_eth_tx_timeout, 2692 .ndo_do_ioctl = sh_eth_do_ioctl, 2693 .ndo_validate_addr = eth_validate_addr, 2694 .ndo_set_mac_address = eth_mac_addr, 2695 .ndo_change_mtu = eth_change_mtu, 2696 }; 2697 2698 static const struct net_device_ops sh_eth_netdev_ops_tsu = { 2699 .ndo_open = sh_eth_open, 2700 .ndo_stop = sh_eth_close, 2701 .ndo_start_xmit = sh_eth_start_xmit, 2702 .ndo_get_stats = sh_eth_get_stats, 2703 .ndo_set_rx_mode = sh_eth_set_multicast_list, 2704 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, 2705 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, 2706 .ndo_tx_timeout = sh_eth_tx_timeout, 2707 .ndo_do_ioctl = sh_eth_do_ioctl, 2708 .ndo_validate_addr = eth_validate_addr, 2709 .ndo_set_mac_address = eth_mac_addr, 2710 .ndo_change_mtu = eth_change_mtu, 2711 }; 2712 2713 static int sh_eth_drv_probe(struct platform_device *pdev) 2714 { 2715 int ret, devno = 0; 2716 struct resource *res; 2717 struct net_device *ndev = NULL; 2718 struct sh_eth_private *mdp = NULL; 2719 struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev); 2720 const struct platform_device_id *id = platform_get_device_id(pdev); 2721 2722 /* get base addr */ 2723 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2724 if (unlikely(res == NULL)) { 2725 dev_err(&pdev->dev, "invalid resource\n"); 2726 ret = -EINVAL; 2727 goto out; 2728 } 2729 2730 ndev = alloc_etherdev(sizeof(struct sh_eth_private)); 2731 if (!ndev) { 2732 ret = -ENOMEM; 2733 goto out; 2734 } 2735 2736 /* The sh Ether-specific entries in the device structure. */ 2737 ndev->base_addr = res->start; 2738 devno = pdev->id; 2739 if (devno < 0) 2740 devno = 0; 2741 2742 ndev->dma = -1; 2743 ret = platform_get_irq(pdev, 0); 2744 if (ret < 0) { 2745 ret = -ENODEV; 2746 goto out_release; 2747 } 2748 ndev->irq = ret; 2749 2750 SET_NETDEV_DEV(ndev, &pdev->dev); 2751 2752 mdp = netdev_priv(ndev); 2753 mdp->num_tx_ring = TX_RING_SIZE; 2754 mdp->num_rx_ring = RX_RING_SIZE; 2755 mdp->addr = devm_ioremap_resource(&pdev->dev, res); 2756 if (IS_ERR(mdp->addr)) { 2757 ret = PTR_ERR(mdp->addr); 2758 goto out_release; 2759 } 2760 2761 spin_lock_init(&mdp->lock); 2762 mdp->pdev = pdev; 2763 pm_runtime_enable(&pdev->dev); 2764 pm_runtime_resume(&pdev->dev); 2765 2766 if (!pd) { 2767 dev_err(&pdev->dev, "no platform data\n"); 2768 ret = -EINVAL; 2769 goto out_release; 2770 } 2771 2772 /* get PHY ID */ 2773 mdp->phy_id = pd->phy; 2774 mdp->phy_interface = pd->phy_interface; 2775 /* EDMAC endian */ 2776 mdp->edmac_endian = pd->edmac_endian; 2777 mdp->no_ether_link = pd->no_ether_link; 2778 mdp->ether_link_active_low = pd->ether_link_active_low; 2779 2780 /* set cpu data */ 2781 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; 2782 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); 2783 sh_eth_set_default_cpu_data(mdp->cd); 2784 2785 /* set function */ 2786 if (mdp->cd->tsu) 2787 ndev->netdev_ops = &sh_eth_netdev_ops_tsu; 2788 else 2789 ndev->netdev_ops = &sh_eth_netdev_ops; 2790 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); 2791 ndev->watchdog_timeo = TX_TIMEOUT; 2792 2793 /* debug message level */ 2794 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; 2795 2796 /* read and set MAC address */ 2797 read_mac_address(ndev, pd->mac_addr); 2798 if (!is_valid_ether_addr(ndev->dev_addr)) { 2799 dev_warn(&pdev->dev, 2800 "no valid MAC address supplied, using a random one.\n"); 2801 eth_hw_addr_random(ndev); 2802 } 2803 2804 /* ioremap the TSU registers */ 2805 if (mdp->cd->tsu) { 2806 struct resource *rtsu; 2807 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2808 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); 2809 if (IS_ERR(mdp->tsu_addr)) { 2810 ret = PTR_ERR(mdp->tsu_addr); 2811 goto out_release; 2812 } 2813 mdp->port = devno % 2; 2814 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; 2815 } 2816 2817 /* initialize first or needed device */ 2818 if (!devno || pd->needs_init) { 2819 if (mdp->cd->chip_reset) 2820 mdp->cd->chip_reset(ndev); 2821 2822 if (mdp->cd->tsu) { 2823 /* TSU init (Init only)*/ 2824 sh_eth_tsu_init(mdp); 2825 } 2826 } 2827 2828 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64); 2829 2830 /* network device register */ 2831 ret = register_netdev(ndev); 2832 if (ret) 2833 goto out_napi_del; 2834 2835 /* mdio bus init */ 2836 ret = sh_mdio_init(ndev, pdev->id, pd); 2837 if (ret) 2838 goto out_unregister; 2839 2840 /* print device information */ 2841 pr_info("Base address at 0x%x, %pM, IRQ %d.\n", 2842 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); 2843 2844 platform_set_drvdata(pdev, ndev); 2845 2846 return ret; 2847 2848 out_unregister: 2849 unregister_netdev(ndev); 2850 2851 out_napi_del: 2852 netif_napi_del(&mdp->napi); 2853 2854 out_release: 2855 /* net_dev free */ 2856 if (ndev) 2857 free_netdev(ndev); 2858 2859 out: 2860 return ret; 2861 } 2862 2863 static int sh_eth_drv_remove(struct platform_device *pdev) 2864 { 2865 struct net_device *ndev = platform_get_drvdata(pdev); 2866 struct sh_eth_private *mdp = netdev_priv(ndev); 2867 2868 sh_mdio_release(ndev); 2869 unregister_netdev(ndev); 2870 netif_napi_del(&mdp->napi); 2871 pm_runtime_disable(&pdev->dev); 2872 free_netdev(ndev); 2873 2874 return 0; 2875 } 2876 2877 #ifdef CONFIG_PM 2878 static int sh_eth_runtime_nop(struct device *dev) 2879 { 2880 /* Runtime PM callback shared between ->runtime_suspend() 2881 * and ->runtime_resume(). Simply returns success. 2882 * 2883 * This driver re-initializes all registers after 2884 * pm_runtime_get_sync() anyway so there is no need 2885 * to save and restore registers here. 2886 */ 2887 return 0; 2888 } 2889 2890 static const struct dev_pm_ops sh_eth_dev_pm_ops = { 2891 .runtime_suspend = sh_eth_runtime_nop, 2892 .runtime_resume = sh_eth_runtime_nop, 2893 }; 2894 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops) 2895 #else 2896 #define SH_ETH_PM_OPS NULL 2897 #endif 2898 2899 static struct platform_device_id sh_eth_id_table[] = { 2900 { "sh7619-ether", (kernel_ulong_t)&sh7619_data }, 2901 { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, 2902 { "sh7724-ether", (kernel_ulong_t)&sh7724_data }, 2903 { "sh7734-gether", (kernel_ulong_t)&sh7734_data }, 2904 { "sh7757-ether", (kernel_ulong_t)&sh7757_data }, 2905 { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga }, 2906 { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, 2907 { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data }, 2908 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, 2909 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data }, 2910 { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data }, 2911 { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data }, 2912 { } 2913 }; 2914 MODULE_DEVICE_TABLE(platform, sh_eth_id_table); 2915 2916 static struct platform_driver sh_eth_driver = { 2917 .probe = sh_eth_drv_probe, 2918 .remove = sh_eth_drv_remove, 2919 .id_table = sh_eth_id_table, 2920 .driver = { 2921 .name = CARDNAME, 2922 .pm = SH_ETH_PM_OPS, 2923 }, 2924 }; 2925 2926 module_platform_driver(sh_eth_driver); 2927 2928 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); 2929 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); 2930 MODULE_LICENSE("GPL v2"); 2931